repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
zhaohongyin/mmdetection-2.15
|
[
"9fd29bfd373a6ad00674471c04ecc916f8ad413e"
] |
[
"mmdet/models/losses/eqlv2.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.distributed as dist\nfrom mmdet.utils import get_root_logger\nfrom functools import partial\n\nfrom ..builder import LOSSES\n\n\[email protected]_module()\nclass EQLv2(nn.Module):\n def __init__(self,\n use_sigmoid=True,\n reduction='mean',\n class_weight=None,\n loss_weight=1.0,\n num_classes=1203, # 1203 for lvis v1.0, 1230 for lvis v0.5\n gamma=12,\n mu=0.8,\n alpha=4.0,\n vis_grad=False):\n super().__init__()\n self.use_sigmoid = True\n self.reduction = reduction\n self.loss_weight = loss_weight\n self.class_weight = class_weight\n self.num_classes = num_classes\n self.group = True\n\n # cfg for eqlv2\n self.vis_grad = vis_grad\n self.gamma = gamma\n self.mu = mu\n self.alpha = alpha\n\n # initial variables\n self._pos_grad = None\n self._neg_grad = None\n self.pos_neg = None\n\n def _func(x, gamma, mu):\n return 1 / (1 + torch.exp(-gamma * (x - mu)))\n self.map_func = partial(_func, gamma=self.gamma, mu=self.mu)\n logger = get_root_logger()\n logger.info(f\"build EQL v2, gamma: {gamma}, mu: {mu}, alpha: {alpha}\")\n\n def forward(self,\n cls_score,\n label,\n weight=None,\n avg_factor=None,\n reduction_override=None,\n **kwargs):\n self.n_i, self.n_c = cls_score.size()\n\n self.gt_classes = label\n self.pred_class_logits = cls_score\n\n #import pdb\n #pdb.set_trace() \n def expand_label(pred, gt_classes):\n target = pred.new_zeros(self.n_i, self.n_c)\n target[torch.arange(self.n_i), gt_classes] = 1\n return target\n\n target = expand_label(cls_score, label)\n\n pos_w, neg_w = self.get_weight(cls_score)\n\n weight = pos_w * target + neg_w * (1 - target)\n\n cls_loss = F.binary_cross_entropy_with_logits(cls_score, target,\n reduction='none')\n cls_loss = torch.sum(cls_loss * weight) / self.n_i\n\n self.collect_grad(cls_score.detach(), target.detach(), weight.detach())\n\n return self.loss_weight * cls_loss\n\n def get_channel_num(self, num_classes):\n num_channel = num_classes + 1\n return num_channel\n\n def get_activation(self, cls_score):\n cls_score = torch.sigmoid(cls_score)\n n_i, n_c = cls_score.size()\n bg_score = cls_score[:, -1].view(n_i, 1)\n cls_score[:, :-1] *= (1 - bg_score)\n return cls_score\n\n def collect_grad(self, cls_score, target, weight):\n prob = torch.sigmoid(cls_score)\n grad = target * (prob - 1) + (1 - target) * prob\n grad = torch.abs(grad)\n\n # do not collect grad for objectiveness branch [:-1]\n pos_grad = torch.sum(grad * target * weight, dim=0)[:-1]\n neg_grad = torch.sum(grad * (1 - target) * weight, dim=0)[:-1]\n\n dist.all_reduce(pos_grad)\n dist.all_reduce(neg_grad)\n\n self._pos_grad += pos_grad\n self._neg_grad += neg_grad\n self.pos_neg = self._pos_grad / (self._neg_grad + 1e-10)\n\n def get_weight(self, cls_score):\n # we do not have information about pos grad and neg grad at beginning\n if self._pos_grad is None:\n self._pos_grad = cls_score.new_zeros(self.num_classes)\n self._neg_grad = cls_score.new_zeros(self.num_classes)\n neg_w = cls_score.new_ones((self.n_i, self.n_c))\n pos_w = cls_score.new_ones((self.n_i, self.n_c))\n else:\n # the negative weight for objectiveness is always 1\n neg_w = torch.cat([self.map_func(self.pos_neg), cls_score.new_ones(1)])\n pos_w = 1 + self.alpha * (1 - neg_w)\n neg_w = neg_w.view(1, -1).expand(self.n_i, self.n_c)\n pos_w = pos_w.view(1, -1).expand(self.n_i, self.n_c)\n return pos_w, neg_w\n"
] |
[
[
"torch.abs",
"torch.sigmoid",
"torch.nn.functional.binary_cross_entropy_with_logits",
"torch.sum",
"torch.exp",
"torch.arange",
"torch.distributed.all_reduce"
]
] |
kosletr/SessionBehaviorClassifierAPI
|
[
"15e72da6c9c84dca20beb16469c855e11f901b82"
] |
[
"malicious-payload-text-classifier/data_utils.py"
] |
[
"import numpy as np\nimport re\nimport csv\n\n\nclass Data(object):\n \"\"\"\n Class to handle loading and processing of raw datasets.\n \"\"\"\n\n def __init__(self, data_source,\n alphabet=\"abcdefghijklmnopqrstuvwxyz0123456789-,;.!?:'\\\"/\\\\|_@#$%^&*~`+-=<>()[]{}\",\n input_size=1014, num_of_classes=8):\n \"\"\"\n Initialization of a Data object.\n\n Args:\n data_source (str): Raw data file path\n alphabet (str): Alphabet of characters to index\n input_size (int): Size of input features\n num_of_classes (int): Number of classes in data\n \"\"\"\n self.alphabet = alphabet\n self.alphabet_size = len(self.alphabet)\n self.dict = {} # Maps each character to an integer\n self.no_of_classes = num_of_classes\n for idx, char in enumerate(self.alphabet):\n self.dict[char] = idx + 1\n self.length = input_size\n self.data_source = data_source\n\n def load_data(self):\n \"\"\"\n Load raw data from the source file into data variable.\n\n Returns: None\n\n \"\"\"\n data = []\n with open(self.data_source, 'r', encoding='utf-8') as f:\n rdr = csv.reader(f, delimiter=',', quotechar='\"')\n for row in rdr:\n txt = \"\"\n for s in row[1:]:\n txt = txt + \" \" + \\\n re.sub(\"^\\s*(.-)\\s*$\", \"%1\", s).replace(\"\\\\n\", \"\\n\")\n data.append((int(row[0]), txt)) # format: (label, text)\n self.data = np.array(data)\n print(\"Data loaded from \" + self.data_source)\n\n def get_all_data(self):\n \"\"\"\n Return all loaded data from data variable.\n\n Returns:\n (np.ndarray) Data transformed from raw to indexed form with associated one-hot label.\n\n \"\"\"\n data_size = len(self.data)\n start_index = 0\n end_index = data_size\n batch_texts = self.data[start_index:end_index]\n batch_indices = []\n one_hot = np.eye(self.no_of_classes, dtype='int64')\n classes = []\n for c, s in batch_texts:\n batch_indices.append(self.str_to_indexes(s))\n #c = int(c) - 1\n c = int(c)\n classes.append(one_hot[c])\n return np.asarray(batch_indices, dtype='int64'), np.asarray(classes), batch_texts\n\n def str_to_indexes(self, s):\n \"\"\"\n Convert a string to character indexes based on character dictionary.\n\n Args:\n s (str): String to be converted to indexes\n\n Returns:\n str2idx (np.ndarray): Indexes of characters in s\n\n \"\"\"\n s = s.lower()\n max_length = min(len(s), self.length)\n str2idx = np.zeros(self.length, dtype='int64')\n for i in range(1, max_length + 1):\n c = s[-i]\n if c in self.dict:\n str2idx[i - 1] = self.dict[c]\n return str2idx\n"
] |
[
[
"numpy.asarray",
"numpy.eye",
"numpy.array",
"numpy.zeros"
]
] |
ahmadsalim/numpyro
|
[
"015c80ddd24cf6bc89006fc3a70b424fecd09331"
] |
[
"numpyro/infer/kernels.py"
] |
[
"from abc import ABC, abstractmethod\nfrom typing import Callable, List, Dict, Tuple\nimport numpy as np\nimport numpy.random as npr\nimport jax.numpy as jnp\nimport jax.scipy.stats\nimport jax.scipy.linalg\nimport numpyro.distributions as dist\nfrom numpyro.util import sqrth, posdef, safe_norm\n\n\nclass PrecondMatrix(ABC):\n @abstractmethod\n def compute(self, particles: jnp.ndarray, loss_fn: Callable[[jnp.ndarray], float]):\n \"\"\"\n Computes a preconditioning matrix for a given set of particles and a loss function\n :param particles: The Stein particles to compute the preconditioning matrix from\n :param loss_fn: Loss function given particles\n \"\"\"\n raise NotImplementedError\n\n\nclass SteinKernel(ABC):\n @property\n @abstractmethod\n def mode(self):\n \"\"\"\n Returns the type of kernel, either 'norm' or 'vector' or 'matrix'.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def compute(self, particles: jnp.ndarray, particle_info: Dict[str, Tuple[int, int]],\n loss_fn: Callable[[jnp.ndarray], float]):\n \"\"\"\n Computes the kernel function given the input Stein particles\n :param particles: The Stein particles to compute the kernel from\n :param particle_info: A mapping from parameter names to the position in the particle matrix\n :param loss_fn: Loss function given particles\n \"\"\"\n raise NotImplementedError\n\n\nclass RBFKernel(SteinKernel):\n \"\"\"\n Calculates the Gaussian RBF kernel function with median bandwidth.\n This is the kernel used in the original \"Stein Variational Gradient Descent\" paper by Liu and Wang\n :param mode: Either 'norm' (default) specifying to take the norm of each particle, 'vector' to return a component-wise kernel\n or 'matrix' to return a matrix-valued kernel\n :param matrix_mode: Either 'norm_diag' (default) for diagonal filled with the norm kernel or 'vector_diag' for diagonal of vector-valued kernel\n :param bandwidth_factor: A multiplier to the bandwidth based on data size n (default 1/log(n))\n \"\"\"\n\n def __init__(self, mode='norm', matrix_mode='norm_diag',\n bandwidth_factor: Callable[[float], float] = lambda n: 1 / jnp.log(n)):\n assert mode == 'norm' or mode == 'vector' or mode == 'matrix'\n assert matrix_mode == 'norm_diag' or matrix_mode == 'vector_diag'\n self._mode = mode\n self.matrix_mode = matrix_mode\n self.bandwidth_factor = bandwidth_factor\n\n def _normed(self):\n return self._mode == 'norm' or (self.mode == 'matrix' and self.matrix_mode == 'norm_diag')\n\n def compute(self, particles, particle_info, loss_fn):\n diffs = jnp.expand_dims(particles, axis=0) - jnp.expand_dims(particles, axis=1) # N x N (x D)\n if self._normed() and particles.ndim >= 2:\n diffs = safe_norm(diffs, ord=2, axis=-1) # N x D -> N\n diffs = jnp.reshape(diffs, (diffs.shape[0] * diffs.shape[1], -1)) # N * N (x D)\n factor = self.bandwidth_factor(particles.shape[0])\n if diffs.ndim >= 2:\n diff_norms = safe_norm(diffs, ord=2, axis=-1)\n else:\n diff_norms = diffs\n median = jnp.argsort(diff_norms)[int(diffs.shape[0] / 2)]\n bandwidth = jnp.abs(diffs)[median] ** 2 * factor + 1e-5\n if self._normed():\n bandwidth = bandwidth[0]\n\n def kernel(x, y):\n diff = safe_norm(x - y, ord=2) if self._normed() and x.ndim >= 1 else x - y\n kernel_res = jnp.exp(- diff ** 2 / bandwidth)\n if self._mode == 'matrix':\n if self.matrix_mode == 'norm_diag':\n return kernel_res * jnp.identity(x.shape[0])\n else:\n return jnp.diag(kernel_res)\n else:\n return kernel_res\n\n return kernel\n\n @property\n def mode(self):\n return self._mode\n\n\nclass IMQKernel(SteinKernel):\n \"\"\"\n Calculates the IMQ kernel, from \"Measuring Sample Quality with Kernels\" by Gorham and Mackey\n :param mode: Either 'norm' (default) specifying to take the norm of each particle or 'vector' to return a component-wise kernel\n :param const: Positive multi-quadratic constant (c)\n :param exponent: Inverse exponent (beta) between (-1, 0)\n \"\"\"\n\n # Based on\n def __init__(self, mode='norm', const=1.0, expon=-0.5):\n assert mode == 'norm' or mode == 'vector'\n assert 0.0 < const\n assert -1.0 < expon < 0.0\n self._mode = mode\n self.const = const\n self.expon = expon\n\n @property\n def mode(self):\n return self._mode\n\n def compute(self, particles, particle_info, loss_fn):\n def kernel(x, y):\n diff = safe_norm(x - y, ord=2, axis=-1) if self._mode == 'norm' else x - y\n return (jnp.array(self.const) ** 2 + diff ** 2) ** self.expon\n return kernel\n\n\nclass LinearKernel(SteinKernel):\n \"\"\"\n Calculates the linear kernel, from \"Stein Variational Gradient Descent as Moment Matching\" by Liu and Wang\n \"\"\"\n\n def __init__(self):\n self._mode = 'norm'\n\n @property\n def mode(self):\n return self._mode\n\n def compute(self, particles: jnp.ndarray, particle_info, loss_fn):\n def kernel(x, y):\n if x.ndim >= 1:\n return x @ y + 1\n else:\n return x * y + 1\n return kernel\n\n\nclass RandomFeatureKernel(SteinKernel):\n \"\"\"\n Calculates the random kernel, from \"Stein Variational Gradient Descent as Moment Matching\" by Liu and Wang\n :param bandwidth_subset: How many particles should be used to calculate the bandwidth? (default None, meaning all particles)\n :param random_indices: The set of indices which to do random feature expansion on. (default None, meaning all indices)\n :param bandwidth_factor: A multiplier to the bandwidth based on data size n (default 1/log(n))\n \"\"\"\n\n def __init__(self, bandwidth_subset=None, random_indices=None,\n bandwidth_factor: Callable[[float], float] = lambda n: 1 / jnp.log(n)):\n assert bandwidth_subset is None or bandwidth_subset > 0\n self._mode = 'norm'\n self.bandwidth_subset = bandwidth_subset\n self.random_indices = None\n self.bandwidth_factor = bandwidth_factor\n self._random_weights = None\n self._random_biases = None\n\n @property\n def mode(self):\n return self._mode\n\n def compute(self, particles, particle_info, loss_fn):\n if self._random_weights is None:\n self._random_weights = jnp.array(npr.randn(*particles.shape))\n self._random_biases = jnp.array(npr.rand(*particles.shape) * 2 * np.pi)\n factor = self.bandwidth_factor(particles.shape[0])\n if self.bandwidth_subset is not None:\n particles = particles[npr.choice(particles.shape[0], self.bandwidth_subset)]\n diffs = jnp.expand_dims(particles, axis=0) - jnp.expand_dims(particles, axis=1) # N x N x D\n if particles.ndim >= 2:\n diffs = safe_norm(diffs, ord=2, axis=-1) # N x N x D -> N x N\n diffs = jnp.reshape(diffs, (diffs.shape[0] * diffs.shape[1], -1)) # N * N x 1\n if diffs.ndim >= 2:\n diff_norms = safe_norm(diffs, ord=2, axis=-1)\n else:\n diff_norms = diffs\n median = jnp.argsort(diff_norms)[int(diffs.shape[0] / 2)]\n bandwidth = jnp.abs(diffs)[median] ** 2 * factor + 1e-5\n\n def feature(x, w, b):\n return jnp.sqrt(2) * jnp.cos((x @ w + b) / bandwidth)\n\n def kernel(x, y):\n ws = self._random_weights if self.random_indices is None else self._random_weights[self.random_indices]\n bs = self._random_biases if self.random_indices is None else self._random_biases[self.random_indices]\n return jnp.sum(jax.vmap(lambda w, b: feature(x, w, b) * feature(y, w, b))(ws, bs))\n return kernel\n\n\nclass MixtureKernel(SteinKernel):\n \"\"\"\n Implements a mixture of multiple kernels from \"Stein Variational Gradient Descent as Moment Matching\" by Liu and Wang\n :param ws: Weight of each kernel in the mixture\n :param kernel_fns: Different kernel functions to mix together\n \"\"\"\n\n def __init__(self, ws: List[float], kernel_fns: List[SteinKernel]):\n assert len(ws) == len(kernel_fns)\n assert len(kernel_fns) > 1\n assert all(kf.mode == kernel_fns[0].mode for kf in kernel_fns)\n self.ws = ws\n self.kernel_fns = kernel_fns\n\n @property\n def mode(self):\n return self.kernel_fns[0].mode\n\n def compute(self, particles, particle_info, loss_fn):\n kernels = [kf.compute(particles, particle_info, loss_fn) for kf in self.kernel_fns]\n\n def kernel(x, y):\n res = self.ws[0] * kernels[0](x, y)\n for w, k in zip(self.ws[1:], kernels[1:]):\n res = res + w * k(x, y)\n return res\n\n return kernel\n\n\nclass HessianPrecondMatrix(PrecondMatrix):\n \"\"\"\n Calculates the constant precondition matrix based on the negative Hessian of the loss \n from \"Stein Variational Gradient Descent with Matrix-Valued Kernels\" by Wang, Tang, Bajaj and Liu\n \"\"\"\n\n def compute(self, particles, loss_fn):\n hessian = -jax.vmap(jax.hessian(loss_fn))(particles)\n return hessian\n\n\nclass PrecondMatrixKernel(SteinKernel):\n \"\"\"\n Calculates the preconditioned kernel from \"Stein Variational Gradient Descent with Matrix-Valued Kernels\" by Wang, Tang, Bajaj and Liu\n :param precond_matrix_fn: The constant preconditioning matrix\n :param inner_kernel_fn: The inner kernel function\n :param precond_mode: How to use the precondition matrix, either constant ('const') \n or as mixture with anchor points ('anchor_points')\n \"\"\"\n\n def __init__(self, precond_matrix_fn: PrecondMatrix, inner_kernel_fn: SteinKernel,\n precond_mode='anchor_points'):\n assert inner_kernel_fn.mode == 'matrix'\n assert precond_mode == 'const' or precond_mode == 'anchor_points'\n self.precond_matrix_fn = precond_matrix_fn\n self.inner_kernel_fn = inner_kernel_fn\n self.precond_mode = precond_mode\n\n @property\n def mode(self):\n return 'matrix'\n\n def compute(self, particles, particle_info, loss_fn):\n qs = self.precond_matrix_fn.compute(particles, loss_fn)\n if self.precond_mode == 'const':\n qs = jnp.expand_dims(jnp.mean(qs, axis=0), axis=0)\n qs_inv = jnp.linalg.inv(qs)\n qs_sqrt = sqrth(qs)\n qs_inv_sqrt = sqrth(qs_inv)\n inner_kernel = self.inner_kernel_fn.compute(particles, particle_info, loss_fn)\n\n def kernel(x, y):\n if self.precond_mode == 'const':\n wxs = jnp.array([1.])\n wys = jnp.array([1.])\n else:\n wxs = jax.nn.softmax(\n jax.vmap(lambda z, q_inv: dist.MultivariateNormal(z, posdef(q_inv)).log_prob(x))(particles, qs_inv))\n wys = jax.nn.softmax(\n jax.vmap(lambda z, q_inv: dist.MultivariateNormal(z, posdef(q_inv)).log_prob(y))(particles, qs_inv))\n return jnp.sum(\n jax.vmap(lambda qs, qis, wx, wy: wx * wy * (qis @ inner_kernel(qs @ x, qs @ y) @ qis.transpose()))(\n qs_sqrt, qs_inv_sqrt, wxs, wys), axis=0)\n\n return kernel\n\n\nclass GraphicalKernel(SteinKernel):\n \"\"\"\n Calculates graphical kernel used in \"Stein Variational Message Passing for Continuous Graphical Models\" by Wang, Zheng and Liu\n :param local_kernel_fns: A mapping between parameters and a choice of kernel function for that parameter (default to default_kernel_fn for each parameter)\n :param default_kernel_fn: The default choice of kernel function when none is specified for a particular parameter\n \"\"\"\n\n def __init__(self, local_kernel_fns: Dict[str, SteinKernel] = None, default_kernel_fn: SteinKernel = RBFKernel()):\n self.local_kernel_fns = local_kernel_fns if local_kernel_fns is not None else {}\n self.default_kernel_fn = default_kernel_fn\n\n @property\n def mode(self):\n return 'matrix'\n\n def compute(self, particles, particle_info, loss_fn):\n local_kernels = []\n for pk, (start_idx, end_idx) in particle_info.items():\n pk_kernel_fn = self.local_kernel_fns.get(pk, self.default_kernel_fn)\n pk_loss_fn = lambda ps: loss_fn(\n jnp.concatenate([particles[:, :start_idx], ps, particles[:, end_idx:]], axis=-1))\n pk_kernel = pk_kernel_fn.compute(particles[:, start_idx:end_idx], {pk: (0, end_idx - start_idx)},\n pk_loss_fn)\n local_kernels.append((pk_kernel, pk_kernel_fn.mode, start_idx, end_idx))\n\n def kernel(x, y):\n kernel_res = []\n for kernel, mode, start_idx, end_idx in local_kernels:\n v = kernel(x[start_idx:end_idx], y[start_idx:end_idx])\n if mode == 'norm':\n v = v * jnp.identity(end_idx - start_idx)\n elif mode == 'vector':\n v = jnp.diag(v)\n kernel_res.append(v)\n return jax.scipy.linalg.block_diag(*kernel_res)\n\n return kernel\n"
] |
[
[
"numpy.random.randn",
"numpy.random.rand",
"numpy.random.choice"
]
] |
drorlab/pensa
|
[
"0d4c138793d6e4f05f85cb9ece2bf4f0ddc1882f"
] |
[
"pensa/statesinfo/discrete_states.py"
] |
[
"import numpy as np\nfrom queue import PriorityQueue \nimport math\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import curve_fit\nfrom scipy.signal import argrelextrema\nimport os\nfrom pensa.features import *\n\n\n# -- Functions to cluster feature distributions into discrete states --\n\n\ndef _smooth(x,window_len,window=None):\n \"\"\"\n Smooth data so that true extrema can be found without any noise\n\n\n Parameters\n ----------\n x : list\n Distribution to be smoothed.\n window_len : int\n number of bins to smooth over.\n window : str, optional\n Type of window to use for the smoothing. The default is None=Hanning.\n\n Raises\n ------\n ValueError\n If window argument is not recognised.\n\n Returns\n -------\n list\n Smoothed distribution.\n\n \"\"\"\n if window is None:\n window_type='hanning'\n if x.ndim != 1:\n raise ValueError\n if x.size < window_len:\n raise ValueError\n if window_len<3:\n return x\n if not window_type in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:\n raise ValueError\n s=np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]\n if window_type == 'flat': #moving average\n w=np.ones(window_len,'d')\n else:\n w=eval('np.'+window_type+'(window_len)')\n y=np.convolve(w/w.sum(),s,mode='valid')\n return y\n\ndef _find_nearest(distr, value):\n \"\"\"\n Find the nearest value in a distribution to an arbitrary reference value.\n\n Parameters\n ----------\n distr : list\n The distribution to locate a certain point within.\n value : float\n Reference value for locating within the distribution.\n\n Returns\n -------\n float\n Closest value to reference value in distribution.\n\n \"\"\"\n array = np.array(distr)\n idx = (np.abs(array - value)).argmin()\n return array[idx]\n\ndef _printKclosest(arr,n,x,k): \n \"\"\"\n Print K closest values to a specified value. \n\n Parameters\n ----------\n arr : list\n The distribution of values.\n n : int\n Search through the first n values of arr for k closest values.\n x : float\n The reference value for which the closest values are sought.\n k : int\n Number of closest values desired.\n\n Returns\n -------\n a : list\n The closest k values to x.\n\n \"\"\"\n a=[]\n # Make a max heap of difference with \n # first k elements. \n pq = PriorityQueue() \n for neighb in range(k): \n pq.put((-abs(arr[neighb]-x),neighb)) \n # Now process remaining elements \n for neighb in range(k,n): \n diff = abs(arr[neighb]-x) \n p,pi = pq.get() \n curr = -p \n # If difference with current \n # element is more than root, \n # then put it back. \n if diff>curr: \n pq.put((-curr,pi)) \n continue\n else: \n # Else remove root and insert \n pq.put((-diff,neighb)) \n # Print contents of heap. \n while(not pq.empty()): \n p,q = pq.get() \n a.append(str(\"{} \".format(arr[q])))\n return a\n\ndef _gauss(x, x0, sigma, a):\n \"\"\"\n Create a Gaussian distribution for a given x-axis linsapce and Gaussian parameters.\n\n Parameters\n ----------\n x : list\n x-axis distribution.\n x0 : float\n Mean x-value for Gaussian.\n sigma : float\n Gaussian sigma, related to FWHM.\n a : float\n Gaussian amplitude.\n\n Returns\n -------\n gaussian : list\n y-axis Gaussian distribution over the x-axis space.\n\n \"\"\"\n\n if sigma != 0:\n gaussian = abs(a*np.exp(-(x-x0)**2/(2*sigma**2)))\n return gaussian\n\ndef _bimodal(x,mu1,sigma1,A1,mu2,sigma2,A2):\n \"\"\" Two gaussians \"\"\"\n return _gauss(x,mu1,sigma1,A1)+_gauss(x,mu2,sigma2,A2)\n\ndef _trimodal(x,mu1,sigma1,A1,mu2,sigma2,A2,mu3,sigma3,A3):\n \"\"\" Three gaussians \"\"\"\n return _gauss(x,mu1,sigma1,A1)+_gauss(x,mu2,sigma2,A2)+_gauss(x,mu3,sigma3,A3)\n\ndef _quadmodal(x,mu1,sigma1,A1,mu2,sigma2,A2,mu3,sigma3,A3,mu4,sigma4,A4):\n \"\"\" Four gaussians \"\"\"\n return _gauss(x,mu1,sigma1,A1)+_gauss(x,mu2,sigma2,A2)+_gauss(x,mu3,sigma3,A3)+_gauss(x,mu4,sigma4,A4)\n\ndef _quinmodal(x,mu1,sigma1,A1,mu2,sigma2,A2,mu3,sigma3,A3,mu4,sigma4,A4,mu5,sigma5,A5):\n \"\"\" Five gaussians \"\"\"\n return _gauss(x,mu1,sigma1,A1)+_gauss(x,mu2,sigma2,A2)+_gauss(x,mu3,sigma3,A3)+_gauss(x,mu4,sigma4,A4)+_gauss(x,mu5,sigma5,A5)\n\ndef _sexmodal(x,mu1,sigma1,A1,mu2,sigma2,A2,mu3,sigma3,A3,mu4,sigma4,A4,mu5,sigma5,A5,mu6,sigma6,A6):\n \"\"\" Six gaussians \"\"\"\n return _gauss(x,mu1,sigma1,A1)+_gauss(x,mu2,sigma2,A2)+_gauss(x,mu3,sigma3,A3)+_gauss(x,mu4,sigma4,A4)+_gauss(x,mu5,sigma5,A5)+_gauss(x,mu6,sigma6,A6) \n\ndef _septmodal(x,mu1,sigma1,A1,mu2,sigma2,A2,mu3,sigma3,A3,mu4,sigma4,A4,mu5,sigma5,A5,mu6,sigma6,A6,mu7,sigma7,A7):\n \"\"\" Seven gaussians \"\"\"\n return _gauss(x,mu1,sigma1,A1)+_gauss(x,mu2,sigma2,A2)+_gauss(x,mu3,sigma3,A3)+_gauss(x,mu4,sigma4,A4)+_gauss(x,mu5,sigma5,A5)+_gauss(x,mu6,sigma6,A6)+_gauss(x,mu7,sigma7,A7) \n\ndef _octomodal(x,mu1,sigma1,A1,mu2,sigma2,A2,mu3,sigma3,A3,mu4,sigma4,A4,mu5,sigma5,A5,mu6,sigma6,A6,mu7,sigma7,A7,mu8,sigma8,A8):\n \"\"\" Eight gaussians \"\"\"\n return _gauss(x,mu1,sigma1,A1)+_gauss(x,mu2,sigma2,A2)+_gauss(x,mu3,sigma3,A3)+_gauss(x,mu4,sigma4,A4)+_gauss(x,mu5,sigma5,A5)+_gauss(x,mu6,sigma6,A6)+_gauss(x,mu7,sigma7,A7)+_gauss(x,mu8,sigma8,A8) \n\ndef _nonamodal(x,mu1,sigma1,A1,mu2,sigma2,A2,mu3,sigma3,A3,mu4,sigma4,A4,mu5,sigma5,A5,mu6,sigma6,A6,mu7,sigma7,A7,mu8,sigma8,A8,mu9,sigma9,A9):\n \"\"\" Nine gaussians \"\"\"\n return _gauss(x,mu1,sigma1,A1)+_gauss(x,mu2,sigma2,A2)+_gauss(x,mu3,sigma3,A3)+_gauss(x,mu4,sigma4,A4)+_gauss(x,mu5,sigma5,A5)+_gauss(x,mu6,sigma6,A6)+_gauss(x,mu7,sigma7,A7)+_gauss(x,mu8,sigma8,A8)+_gauss(x,mu9,sigma9,A9) \n\ndef _decamodal(x,mu1,sigma1,A1,mu2,sigma2,A2,mu3,sigma3,A3,mu4,sigma4,A4,mu5,sigma5,A5,mu6,sigma6,A6,mu7,sigma7,A7,mu8,sigma8,A8,mu9,sigma9,A9,mu10,sigma10,A10):\n \"\"\" Ten gaussians \"\"\"\n return _gauss(x,mu1,sigma1,A1)+_gauss(x,mu2,sigma2,A2)+_gauss(x,mu3,sigma3,A3)+_gauss(x,mu4,sigma4,A4)+_gauss(x,mu5,sigma5,A5)+_gauss(x,mu6,sigma6,A6)+_gauss(x,mu7,sigma7,A7)+_gauss(x,mu8,sigma8,A8)+_gauss(x,mu9,sigma9,A9)+_gauss(x,mu10,sigma10,A10) \n\ndef _integral(x, mu, sigma, A):\n \"\"\"\n Gaussian integral for evaluating state probabilities. Integration between\n negative infinity and x.\n\n Parameters\n ----------\n x : float\n Upper limit for integral.\n mu : float\n Gaussian mean.\n sigma : float\n Gaussian sigma.\n A : float\n Gaussian amplitude.\n\n Returns\n -------\n integral : float\n Area under Gaussian from negative infinity to x.\n\n \"\"\"\n integral = (A/2) * (1 + math.erf((x - mu) / (sigma * np.sqrt(2))))\n return integral\n\n\n\ndef _gauss_fit(distribution, traj1_len, gauss_bin, gauss_smooth): \n \"\"\"\n Obtaining the gaussians to fit the distribution into a Gaussian mix.\n Bin number is chosen based on 3 degree resolution (120 bins for 360 degrees)\n\n Parameters\n ----------\n distribution : list\n Distribution of interest for the fitting.\n gauss_bin : int\n Bin the distribution into gauss_bin bins.\n gauss_smooth : int\n Smooth the distribution according to a Hanning window length of gauss_smooth.\n\n Returns\n -------\n gaussians : list\n y-axis values for the Gaussian distribution.\n xline : list\n x-axis values for the Gaussian distribution.\n\n \"\"\"\n \n distr1 = distribution[:traj1_len]\n distr2 = distribution[traj1_len:]\n \n histox = np.histogram(distribution, bins=gauss_bin, density=True)[1]\n histo1 = np.histogram(distr1, bins=gauss_bin, range=(min(histox),max(histox)), density=True)[0]\n histo2 = np.histogram(distr2, bins=gauss_bin, range=(min(histox),max(histox)), density=True)[0]\n \n combined_histo = [(height1 + height2)/2 for height1,height2 in zip(histo1,histo2)] \n\n distributionx = _smooth(histox[0:-1], gauss_smooth)\n ## Setting histrogram minimum to zero with uniform linear shift (for noisey distributions)\n distributiony = _smooth(combined_histo-min(combined_histo), gauss_smooth)\n \n maxima = [distributiony[item] for item in argrelextrema(distributiony, np.greater)][0]\n ## Obtain Gaussian guess params\n mean_pop=[]\n sigma_pop=[]\n num_closest_neighb=28\n ## Locate sigma from FWHM for each maxima\n sig_vals=[]\n for extrema in maxima:\n ## Finding closest values to half maximum\n closest_yvals = _printKclosest(distributiony, len(distributiony), extrema*0.5, num_closest_neighb)\n closest_xvals = [np.where(distributiony==float(closesty))[0][0] for closesty in closest_yvals]\n\n mean_xval = distributionx[np.where(distributiony==extrema)[0][0]]\n half_max_xval = _find_nearest(distributionx[closest_xvals],mean_xval)\n \n FWHM = np.absolute(half_max_xval - mean_xval)\n sigma = FWHM /(2*(np.sqrt(2*np.log(2)))) \n sig_vals.append(sigma) \n \n ##the mean x of the gaussian is the value of x at the peak of y\n mean_vals=[distributionx[np.where(distributiony==extrema)[0][0]] for extrema in maxima]\n for extr_num in range(len(maxima)):\n mean_pop.append(mean_vals[extr_num])\n sigma_pop.append(sig_vals[extr_num])\n \n ##x is the space of angles\n Gauss_xvals=np.linspace(min(distribution),max(distribution),10000) \n ##choosing the fitting mode\n peak_number=[_gauss,_bimodal,_trimodal,_quadmodal,_quinmodal,_sexmodal,_septmodal,_octomodal,_nonamodal,_decamodal]\n mode=peak_number[len(sig_vals)-1] \n expected=[]\n \n for param_num in range(len(mean_pop)):\n expected.append(mean_pop[param_num])\n expected.append(sigma_pop[param_num])\n expected.append(maxima[param_num]) \n\n params, cov = curve_fit(mode,distributionx,distributiony,expected,maxfev=1000000) \n\n gaussians=[]\n gauss_num_space=np.linspace(0,(len(params))-3,int(len(params)/3)) \n\n for gauss_index in gauss_num_space:\n intmax = _integral(max(distribution),\n params[0+int(gauss_index)], \n params[1+int(gauss_index)], \n params[2+int(gauss_index)])\n \n intmin = _integral(min(distribution),\n params[0+int(gauss_index)],\n params[1+int(gauss_index)], \n params[2+int(gauss_index)])\n \n if np.abs(intmax-intmin)>0.02:\n gaussians.append(_gauss(Gauss_xvals, \n params[0+int(gauss_index)],\n params[1+int(gauss_index)], \n params[2+int(gauss_index)]))\n \n return gaussians, Gauss_xvals\n\n\ndef smart_gauss_fit(distr, traj1_len, gauss_bins=180, gauss_smooth=None, write_name=None):\n \"\"\"\n Obtaining the gaussians to fit the distribution into a Gaussian mix. \n Bin number automatically adjusted if the Gaussian fit experiences errors.\n\n Parameters\n ----------\n distr : list\n Distribution of interest for the fitting.\n gauss_bins : int, optional\n Bin the distribution into gauss_bin bins. The default is 180.\n gauss_smooth : int, optional\n Smooth the distribution according to a Hanning window length of gauss_smooth.\n The default is ~10% of gauss_bins.\n write_name : str, optional\n Used in warning to notify which feature has had binning altered during clustering.\n The default is None.\n\n Returns\n -------\n gaussians : list\n y-axis values for the Gaussian distribution.\n xline : list\n x-axis values for the Gaussian distribution.\n\n \"\"\"\n \n smooth_origin = gauss_smooth\n bin_origin = gauss_bins\n if gauss_smooth is None:\n gauss_smooth = int(gauss_bins*0.10)\n \n trial = 0\n attempt_no = 0\n \n ##making a list of +/- values for bin trials to ensure minimal change\n bin_adjust_up = np.array(range(1,10000))\n bin_adjust_down = bin_adjust_up.copy()*-1\n bin_adjust = np.insert(bin_adjust_up, np.arange(len(bin_adjust_down)), bin_adjust_down)\n \n ##if clustering does not work for a given bin number then adjust the bin number\n while trial < 1:\n try:\n gaussians, Gauss_xvals = _gauss_fit(distr, traj1_len, gauss_bins, gauss_smooth)\n trial += 1\n except:\n attempt_no += 1\n trial = 0\n gauss_bins = bin_origin + bin_adjust[attempt_no]\n \n ##only warn about clustering changes if specific parameters were input\n if bin_origin != 180 or smooth_origin is not None:\n if attempt_no > 0.1*bin_origin:\n if write_name is None:\n print('Warning: Altered gauss_bins by >10% for clustering.\\nYou might want to check cluster plot.')\n else:\n print('Warning: Altered gauss_bins by >10% for clustering of '+write_name+'.\\nYou might want to check cluster plot.')\n \n return gaussians, Gauss_xvals\n\ndef get_intersects(gaussians, distribution, Gauss_xvals, write_plots=None,write_name=None):\n \"\"\"\n Obtain the intersects of a mixture of Gaussians which have been obtained\n from decomposing a distribution into Gaussians. Additional state limits are\n added at the beginning and end of the distribution.\n\n Parameters\n ----------\n gaussians : list of lists\n A list of X gaussians.\n distribution : list\n The distribution that Gaussians have been obtained from.\n xline : list\n The x-axis linespace that the distribution spans.\n write_plots : bool, optional\n If true, visualise the states over the raw distribution. The default is None.\n write_name : str, optional\n Filename for write_plots. The default is None.\n\n\n Returns\n -------\n all_intersects : list\n All the Gaussian intersects.\n\n \"\"\"\n ##adding the minimum angle value as the first boundary\n all_intersects=[min(distribution)]\n mean_gauss_xval=[]\n for gauss_num in range(len(gaussians)):\n mean_gauss_xval.append(Gauss_xvals[list(gaussians[gauss_num]).index(max(gaussians[gauss_num]))])\n \n ##sort gaussians in order of their mean xval \n reorder_gaussians=[gaussians[mean_gauss_xval.index(mean)] for mean in sorted(mean_gauss_xval)] \n # reorder_gaussians=[gaussians[gauss_num] for gauss_num in reorder_indices]\n \n for gauss_index in range(len(reorder_gaussians)-1): \n ##Find indices between neighbouring gaussians\n idx = np.argwhere(np.diff(np.sign(reorder_gaussians[gauss_index] - reorder_gaussians[gauss_index+1]))).flatten()\n if len(idx)==1:\n all_intersects.append(float(Gauss_xvals[idx][0]) )\n elif len(idx)!=0:\n ## Select the intersect with the maximum probability\n intersect_ymax=max([reorder_gaussians[gauss_index][intersect] for intersect in idx])\n intersect_ymax_index=[item for item in idx if reorder_gaussians[gauss_index][item]==intersect_ymax] \n all_intersects.append(float(Gauss_xvals[intersect_ymax_index]))\n ## For gaussian neighbours that don't intersect, set state limit as center between maxima\n elif len(idx)==0: \n gauss_max1=list(reorder_gaussians[gauss_index]).index(max(reorder_gaussians[gauss_index]))\n gauss_max2=list(reorder_gaussians[gauss_index+1]).index(max(reorder_gaussians[gauss_index+1]))\n intersect = 0.5* np.abs(Gauss_xvals[gauss_max2] + Gauss_xvals[gauss_max1])\n all_intersects.append(float(intersect))\n \n all_intersects.append(max(distribution)) \n \n if write_plots is True:\n if not os.path.exists('ssi_plots/'):\n os.makedirs('ssi_plots/')\n plt.figure() \n plt.ion()\n plt.hist(distribution,bins=360, density=True, alpha=0.5)\n for gauss_index in range(len(reorder_gaussians)):\n plt.plot(Gauss_xvals, reorder_gaussians[gauss_index], lw=2) \n for intersect_index in range(len(all_intersects)):\n plt.axvline(all_intersects[intersect_index],color='k',lw=1,ls='--') \n plt.xlabel('Radians')\n plt.ylabel('Count')\n plt.title(write_name) \n plt.ioff()\n plt.savefig('ssi_plots/'+write_name+\".png\")\n plt.close()\n \n return all_intersects\n \ndef determine_state_limits(distr, traj1_len, gauss_bins=180, gauss_smooth=None, write_plots=None, write_name=None): \n \"\"\"\n Cluster a distribution into discrete states with well-defined limits.\n The function handles both residue angle distributions and water \n distributions. For waters, the assignment of an additional non-angular \n state is performed if changes in pocket occupancy occur. The clustering\n requires that the distribution can be decomposed to a mixture of Gaussians. \n\n Parameters\n ----------\n distr : list\n Distribution for specific feature.\n gauss_bins : int, optional\n Number of histogram bins to assign for the clustering algorithm. \n The default is 180.\n gauss_smooth : int, optional\n Number of bins to perform smoothing over. The default is ~10% of gauss_bins.\n write_plots : bool, optional\n If true, visualise the states over the raw distribution. The default is None.\n write_name : str, optional\n Filename for write_plots. The default is None.\n\n Returns\n -------\n list\n State intersects for each cluster in numerical order.\n\n \"\"\"\n new_dist=distr.copy()\n distribution=[item for item in new_dist if item != 10000.0]\n ##obtaining the gaussian fit\n gaussians, Gauss_xvals = smart_gauss_fit(distribution, traj1_len, gauss_bins, gauss_smooth, write_name)\n ##discretising each state by gaussian intersects \n intersection_of_states = get_intersects(gaussians, distribution, Gauss_xvals, write_plots, write_name) \n if distr.count(10000.0)>=1:\n intersection_of_states.append(20000.0) \n \n order_intersect=np.sort(intersection_of_states) \n return list(order_intersect)\n\n# -- Functions to operate on discrete states --\n\ndef _check(value,x,y):\n \"\"\"\n Check if a value is between x and y\n\n Parameters\n ----------\n value : float\n Value of interest.\n x : float\n Limit x.\n y : float\n Limit y.\n\n Returns\n -------\n int\n Numerical bool if value is between limits x and y.\n\n \"\"\"\n if x <= value <= y:\n return 1\n else:\n return 0\n\n\ndef calculate_entropy(state_limits,distribution_list):\n \"\"\"\n Calculate the Shannon entropy of a distribution as the summation of all \n -p*log(p) where p refers to the probability of a conformational state. \n \n Parameters\n ----------\n state_limits : list of lists\n A list of values that represent the limits of each state for each\n distribution.\n distribution_list : list of lists\n A list containing multivariate distributions (lists) for a particular\n residue or water\n\n Returns\n -------\n entropy : float\n The Shannon entropy value \n\n \"\"\"\n\n state_lims = state_limits.copy()\n dist_list = distribution_list.copy()\n ## Ignore singular states and corresponding distributions\n state_no = 0 \n while state_no < len(state_lims):\n \n if len(state_lims[state_no])==2:\n del dist_list[state_no]\n del state_lims[state_no]\n \n else:\n state_no +=1\n \n entropy=0.0\n if len(state_lims)!=0:\n ## subtract 1 since number of states = number of partitions - 1\n mut_prob=np.zeros(([len(state_lims[i])-1 for i in range(len(state_lims))])) \n ##iterating over every multidimensional index in the array\n it = np.nditer(mut_prob, flags=['multi_index'])\n \n while not it.finished:\n arrayindices=list(it.multi_index)\n limit_occupancy_checks=np.zeros((len(arrayindices), len(dist_list[0])))\n \n for dist_num in range(len(arrayindices)):\n limits=[state_lims[dist_num][arrayindices[dist_num]], state_lims[dist_num][arrayindices[dist_num]+1]]\n distribution=dist_list[dist_num]\n \n for frame_num in range(len(distribution)):\n limit_occupancy_checks[dist_num][frame_num]= _check(distribution[frame_num],limits[0],limits[1]) \n mut_prob[it.multi_index]= sum(np.prod(limit_occupancy_checks,axis=0)) / len(limit_occupancy_checks[0])\n ##calculating the entropy as the summation of all -p*log(p) \n \n if mut_prob[it.multi_index] != 0:\n entropy+=-1*mut_prob[it.multi_index]*math.log(mut_prob[it.multi_index],2)\n it.iternext()\n return entropy\n\n\n\n"
] |
[
[
"numpy.sqrt",
"matplotlib.pyplot.plot",
"scipy.signal.argrelextrema",
"numpy.exp",
"numpy.histogram",
"scipy.optimize.curve_fit",
"numpy.where",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"numpy.log",
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"numpy.array",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel",
"numpy.absolute",
"matplotlib.pyplot.axvline",
"numpy.nditer",
"numpy.abs",
"numpy.sort",
"numpy.ones",
"numpy.sign",
"matplotlib.pyplot.ioff",
"numpy.prod",
"matplotlib.pyplot.xlabel"
]
] |
janismac/ksp_rtls_launch_to_rendezvous
|
[
"195ebfb5aacf1a857aaaf0a69bf071d93d887efd"
] |
[
"predict_orbit_BCBF.py"
] |
[
"import numpy as np\nimport scipy.integrate\n\ndef predict_orbit_BCBF(vessel, frame):\n r0 = vessel.position(frame)\n v0 = vessel.velocity(frame)\n\n omega = vessel.orbit.body.rotational_speed\n mu = vessel.orbit.body.gravitational_parameter\n\n y0 = list(r0)+list(v0)\n\n t_grid = np.arange(0.0,60.0*20,5.0)\n result = scipy.integrate.odeint(lambda y,t: vacuum_coast_BCBF_ODE(y,omega,mu), y0, t_grid, atol=1e-5, rtol=1e-5)\n\n # return vessel position for the next 10 to 20 minutes\n return result[t_grid >= 600.0]\n\ndef vacuum_coast_BCBF_ODE(y,omega,mu):\n r = y[0:3]\n v = y[3:6]\n w = np.array([0.0,-omega,0.0])\n a_gravity = -r * mu * np.dot(r,r)**(-1.5)\n a_coriolis = -2 * np.cross(w,v)\n a_centrifugal = -np.cross(w,np.cross(w,r))\n a = a_gravity + a_coriolis + a_centrifugal\n return np.hstack((v,a))\n"
] |
[
[
"numpy.dot",
"numpy.hstack",
"numpy.arange",
"numpy.cross",
"numpy.array"
]
] |
acolono/ACONA-scheduler-intelligence
|
[
"1ccaaca21a2b4e10b30242294d5fc8dc087dbd8d"
] |
[
"dags/acona_notifications.py"
] |
[
"from airflow.decorators import dag, task\nfrom airflow.utils.dates import days_ago\nfrom airflow.operators.bash import BashOperator\nfrom airflow.providers.postgres.operators.postgres import PostgresOperator\nfrom airflow.hooks.postgres_hook import PostgresHook\nfrom airflow.models import Variable\nfrom datetime import datetime, timedelta\nfrom acona_postgres_tools import acona_truncate_table, acona_data_write, acona_fetch_data, acona_fetch_one\n# [END import_module]\n\n\n# [START default_args]\n# These args will get passed on to each operator\n# You can override them on a per-task basis during operator initialization\ndefault_args = {\n 'owner': 'airflow'\n}\n# [END default_args]\n\n\n# [START instantiate_dag]\n@dag(\ndefault_args=default_args,\nstart_date=days_ago(2),\ntags=['notifications'],\nschedule_interval='30 4 * * 0')\n\ndef acona_notifications():\n\n # [END instantiate_dag]\n\n # [START notify]\n @task()\n def notify(metric):\n \"\"\"\n #### Get data for current date from Warehouse\n \"\"\"\n\n import json\n import requests\n import os\n import urllib.parse\n import pandas as pd\n import numpy as np\n import re\n\n WAREHOUSE_TOKEN = Variable.get(\"WAREHOUSE_TOKEN\")\n WAREHOUSE_URL = Variable.get(\"WAREHOUSE_URL\")\n\n # Load urls (for specific domain only?)\n urls = os.popen('curl ' + WAREHOUSE_URL + '/rpc/acona_urls -H \"Authorization: Bearer ' + WAREHOUSE_TOKEN + '\"').read()\n\n notifications = {}\n\n date = (datetime.now() - timedelta(1)).strftime('%Y-%m-%d')\n #date = '2021-10-22'\n\n #Load metric data\n sql = \"select * from internal.variables where var_data_table = '{}'\".format(metric)\n values = acona_fetch_one(sql)\n metric_id = values[0]\n #TODO: Add metric title to internal.variables table and use it here.\n metric_mn = values[1]\n\n for url in json.loads(urls):\n #Load historic data\n url = url['url']\n sql = \"select value from api.\" + metric + \" where date = '{}' and url = '{}'\".format(date, url)\n values = acona_fetch_one(sql)\n if values:\n value = values[0]\n # Load forecasted upper value.\n sql = \"select value from api.\" + metric + \"_f_upper where date = '{}' and url = '{}'\".format(date, url)\n upper_values = acona_fetch_one(sql)\n upper_value = None\n if upper_values:\n upper_value = upper_values[0]\n # Load forecasted lower value.\n sql = \"select value from api.\" + metric + \"_f_lower where date = '{}' and url = '{}'\".format(date, url)\n lower_values = acona_fetch_one(sql)\n lower_value = None\n if lower_values:\n lower_value = lower_values[0]\n # Compare values.\n notification_id = re.sub('[^A-Za-z0-9]+', '', str(date)) + '_' + re.sub('[^A-Za-z0-9]+', '', str(url)) + str(metric) + '_lower'\n if lower_value != None and value < lower_value:\n #write notification.\n data = {'notification_id': [notification_id], 'url': [url], 'date': [date], 'variable_id': [metric_id]}\n dataf = pd.DataFrame(data)\n acona_data_write('api.notifications', dataf)\n text_en = 'Attention, the value for ' + metric_mn + ' is lower than expected. Please check if something is wrong.'\n title_en = 'Value is lower as expected'\n data = {'notification_id': [notification_id], 'langcode': ['en'], 'title': [title_en], 'text': [text_en]}\n dataf = pd.DataFrame(data)\n acona_data_write('api.notification_texts', dataf)\n if upper_value != None and value > upper_value:\n #write notification.\n data = {'notification_id': [notification_id], 'url': [url], 'date': [date], 'variable_id': [metric_id]}\n dataf = pd.DataFrame(data)\n acona_data_write('api.notifications', dataf)\n text_en = 'Attention, the value for ' + metric_mn + ' is higher than expected.'\n title_en = 'Value is higher as expected'\n data = {'notification_id': [notification_id], 'langcode': ['en'], 'title': [title_en], 'text': [text_en]}\n dataf = pd.DataFrame(data)\n acona_data_write('api.notification_texts', dataf)\n\n # write calc dates\n data = {'variable': 'api.notifications', 'date': date, 'url': url}\n dataf = pd.DataFrame(data)\n acona_data_write('internal.var_calc_dates', dataf)\n # [END notify]\n\n # [START main_flow]\n\n # Supported metrics. Todo: Load from data warehouse.\n metrics = {\n 'metric_d_page_views',\n 'metric_d_bounces',\n 'metric_d_page_views',\n 'metric_d_visits',\n 'metric_d_unique_visits',\n 'metric_d_conversions',\n 'metric_d_visit_time_total',\n 'metric_d_visit_time_average',\n 'metric_d_visits_converted',\n 'metric_d_bounce_rate'\n }\n\n #metrics = {\n #'metric_d_page_views'\n #}\n\n # Loop over metrics, forecast values and save in data warehouse\n for metric in metrics:\n notify(metric)\n\n # [END main_flow]\n\n# [START dag_invocation]\nacona_notifications = acona_notifications()\n# [END dag_invocation]\n"
] |
[
[
"pandas.DataFrame"
]
] |
martinwe001/U-Net-Building-Segmentation
|
[
"8bd32a8a2bca25a4c0bd9b22fee5a09d77bffbad"
] |
[
"test.py"
] |
[
"import os\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"2\"\nimport tensorflow as tf\nimport numpy as np\nimport cv2\nfrom glob import glob\nfrom tqdm import tqdm\nimport tensorflow_addons as tfa\n\n\nif __name__ == \"__main__\":\n \"\"\" Load the test images \"\"\"\n test_images = glob(\"building-segmentation/test/test_64/*\")\n\n \"\"\" Load the model \"\"\"\n\n model = 'unet'\n epochs = 300\n res = 64\n\n model = tf.keras.models.load_model(f\"{model}_models/{model}_{epochs}_epochs_{res}.h5\", custom_objects={'MaxUnpooling2D': tfa.layers.MaxUnpooling2D})\n\n for path in tqdm(test_images, total=len(test_images)):\n x = cv2.imread(path, cv2.IMREAD_COLOR)\n original_image = x\n h, w, _ = x.shape\n\n x = cv2.resize(x, (64, 64))\n #x = x/255.0\n x = x.astype(np.float32)\n\n x = np.expand_dims(x, axis=0)\n pred_mask = model.predict(x)[0]\n\n pred_mask = np.concatenate(\n [\n pred_mask,\n pred_mask,\n pred_mask\n ], axis=2)\n pred_mask = (pred_mask > 0.5) * 255\n pred_mask = pred_mask.astype(np.float32)\n pred_mask = cv2.resize(pred_mask, (w, h))\n\n original_image = original_image.astype(np.float32)\n\n alpha_image = 0.8\n alpha_mask = 1\n cv2.addWeighted(pred_mask, alpha_mask, original_image, alpha_image, 0, original_image)\n\n name = path.split(\"/\")[-1]\n cv2.imwrite(f\"save_images/{name}\", original_image)\n"
] |
[
[
"numpy.concatenate",
"tensorflow.keras.models.load_model",
"numpy.expand_dims"
]
] |
AndrewZhaoLuo/OnnxSnippets
|
[
"40e2231da8dc3d9152bc71daff5f4b154b97a5e4"
] |
[
"onnx_export/export_mobilenetv3_block.py"
] |
[
"import torch\nfrom pytorch.common.blocks import mobilenetv3_block\n\nfrom onnx_export import common\n\n\nclass ExportMobilenetV3:\n default_conditions = {\n \"in_channels\": 64,\n \"spatial_dimension\": 128,\n }\n\n sequential_conditions = {\n \"in_channels\": [32, 64, 128],\n }\n\n def get_all_conditions(self):\n conditions = set()\n\n for condition_name in self.sequential_conditions:\n for v in self.sequential_conditions[condition_name]:\n new_condition = self.default_conditions.copy()\n new_condition[condition_name] = v\n conditions.add(tuple(new_condition.items()))\n\n return conditions\n\n def export_model(\n self, torch_model, ndim, features_in, spatial_dimensions, name, dir=\"export/\"\n ):\n dims = [1, features_in] + [spatial_dimensions] * ndim\n\n # Input to the model\n x = torch.randn(*dims, requires_grad=True)\n common.export_model(\n torch_model,\n x,\n name,\n dir=dir,\n dynamic_axes={\n \"input\": {\n 0: \"batch_size\",\n 2: \"height_in\",\n 3: \"width_in\",\n }, # variable length axes\n \"output\": {0: \"batch_size\", 2: \"height_out\", 3: \"width_out\"},\n },\n )\n\n def export_mobilenetv3_block(\n self,\n in_channels,\n spatial_dimension,\n dir=\"./export\",\n ):\n model = mobilenetv3_block.MobilenetV3Block(in_channels)\n name = f\"mobilenetv3_block_inc={in_channels}_outc={in_channels}\"\n\n self.export_model(model, 2, in_channels, spatial_dimension, name, dir=dir)\n\n\nif __name__ == \"__main__\":\n exporter = ExportMobilenetV3()\n conds = exporter.get_all_conditions()\n for cond in conds:\n print(\"Exporting:\", cond)\n cond = dict(cond)\n exporter.export_mobilenetv3_block(**cond, dir=\"export/mobilenetv3_block\")\n"
] |
[
[
"torch.randn"
]
] |
dalonsoa/tensorlayer
|
[
"066c09be1eea1b49914b2a6e806329a599edce58"
] |
[
"tensorlayer/app/computer_vision_object_detection/yolov4.py"
] |
[
"#! /usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"YOLOv4 for MS COCO.\n\n# Reference:\n- [tensorflow-yolov4-tflite](\n https://github.com/hunglc007/tensorflow-yolov4-tflite)\n\n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\nimport tensorlayer as tl\nfrom tensorlayer.activation import mish\nfrom tensorlayer.layers import Conv2d, MaxPool2d, BatchNorm2d, ZeroPad2d, UpSampling2d, Concat, Input, Elementwise\nfrom tensorlayer.models import Model\nfrom tensorlayer import logging\n\nINPUT_SIZE = 416\nweights_url = {'link': 'https://pan.baidu.com/s/1MC1dmEwpxsdgHO1MZ8fYRQ', 'password': 'idsz'}\n\n\ndef upsample(input_layer):\n return UpSampling2d(scale=2)(input_layer)\n\n\ndef convolutional(\n input_layer, filters_shape, downsample=False, activate=True, bn=True, activate_type='leaky', name=None\n):\n if downsample:\n input_layer = ZeroPad2d(((1, 0), (1, 0)))(input_layer)\n padding = 'VALID'\n strides = 2\n else:\n strides = 1\n padding = 'SAME'\n\n if bn:\n b_init = None\n else:\n b_init = tl.initializers.constant(value=0.0)\n\n conv = Conv2d(\n n_filter=filters_shape[-1], filter_size=(filters_shape[0], filters_shape[1]), strides=(strides, strides),\n padding=padding, b_init=b_init, name=name\n )(input_layer)\n\n if bn:\n if activate ==True:\n if activate_type == 'leaky':\n conv = BatchNorm2d(act='lrelu0.1')(conv)\n elif activate_type == 'mish':\n conv = BatchNorm2d(act=mish)(conv)\n else:\n conv = BatchNorm2d()(conv)\n return conv\n\n\ndef residual_block(input_layer, input_channel, filter_num1, filter_num2, activate_type='leaky'):\n short_cut = input_layer\n conv = convolutional(input_layer, filters_shape=(1, 1, input_channel, filter_num1), activate_type=activate_type)\n conv = convolutional(conv, filters_shape=(3, 3, filter_num1, filter_num2), activate_type=activate_type)\n\n residual_output = Elementwise(tf.add)([short_cut, conv])\n return residual_output\n\n\ndef cspdarknet53(input_data=None):\n\n input_data = convolutional(input_data, (3, 3, 3, 32), activate_type='mish')\n input_data = convolutional(input_data, (3, 3, 32, 64), downsample=True, activate_type='mish')\n\n route = input_data\n route = convolutional(route, (1, 1, 64, 64), activate_type='mish', name='conv_rote_block_1')\n input_data = convolutional(input_data, (1, 1, 64, 64), activate_type='mish')\n\n for i in range(1):\n input_data = residual_block(input_data, 64, 32, 64, activate_type=\"mish\")\n input_data = convolutional(input_data, (1, 1, 64, 64), activate_type='mish')\n\n input_data = Concat()([input_data, route])\n input_data = convolutional(input_data, (1, 1, 128, 64), activate_type='mish')\n input_data = convolutional(input_data, (3, 3, 64, 128), downsample=True, activate_type='mish')\n route = input_data\n route = convolutional(route, (1, 1, 128, 64), activate_type='mish', name='conv_rote_block_2')\n input_data = convolutional(input_data, (1, 1, 128, 64), activate_type='mish')\n for i in range(2):\n input_data = residual_block(input_data, 64, 64, 64, activate_type=\"mish\")\n input_data = convolutional(input_data, (1, 1, 64, 64), activate_type='mish')\n input_data = Concat()([input_data, route])\n\n input_data = convolutional(input_data, (1, 1, 128, 128), activate_type='mish')\n input_data = convolutional(input_data, (3, 3, 128, 256), downsample=True, activate_type='mish')\n route = input_data\n route = convolutional(route, (1, 1, 256, 128), activate_type='mish', name='conv_rote_block_3')\n input_data = convolutional(input_data, (1, 1, 256, 128), activate_type='mish')\n for i in range(8):\n input_data = residual_block(input_data, 128, 128, 128, activate_type=\"mish\")\n input_data = convolutional(input_data, (1, 1, 128, 128), activate_type='mish')\n input_data = Concat()([input_data, route])\n\n input_data = convolutional(input_data, (1, 1, 256, 256), activate_type='mish')\n route_1 = input_data\n input_data = convolutional(input_data, (3, 3, 256, 512), downsample=True, activate_type='mish')\n route = input_data\n route = convolutional(route, (1, 1, 512, 256), activate_type='mish', name='conv_rote_block_4')\n input_data = convolutional(input_data, (1, 1, 512, 256), activate_type='mish')\n for i in range(8):\n input_data = residual_block(input_data, 256, 256, 256, activate_type=\"mish\")\n input_data = convolutional(input_data, (1, 1, 256, 256), activate_type='mish')\n input_data = Concat()([input_data, route])\n\n input_data = convolutional(input_data, (1, 1, 512, 512), activate_type='mish')\n route_2 = input_data\n input_data = convolutional(input_data, (3, 3, 512, 1024), downsample=True, activate_type='mish')\n route = input_data\n route = convolutional(route, (1, 1, 1024, 512), activate_type='mish', name='conv_rote_block_5')\n input_data = convolutional(input_data, (1, 1, 1024, 512), activate_type='mish')\n for i in range(4):\n input_data = residual_block(input_data, 512, 512, 512, activate_type=\"mish\")\n input_data = convolutional(input_data, (1, 1, 512, 512), activate_type='mish')\n input_data = Concat()([input_data, route])\n\n input_data = convolutional(input_data, (1, 1, 1024, 1024), activate_type='mish')\n input_data = convolutional(input_data, (1, 1, 1024, 512))\n input_data = convolutional(input_data, (3, 3, 512, 1024))\n input_data = convolutional(input_data, (1, 1, 1024, 512))\n\n maxpool1 = MaxPool2d(filter_size=(13, 13), strides=(1, 1))(input_data)\n maxpool2 = MaxPool2d(filter_size=(9, 9), strides=(1, 1))(input_data)\n maxpool3 = MaxPool2d(filter_size=(5, 5), strides=(1, 1))(input_data)\n input_data = Concat()([maxpool1, maxpool2, maxpool3, input_data])\n\n input_data = convolutional(input_data, (1, 1, 2048, 512))\n input_data = convolutional(input_data, (3, 3, 512, 1024))\n input_data = convolutional(input_data, (1, 1, 1024, 512))\n\n return route_1, route_2, input_data\n\n\ndef YOLOv4(NUM_CLASS, pretrained=False):\n\n input_layer = Input([None, INPUT_SIZE, INPUT_SIZE, 3])\n route_1, route_2, conv = cspdarknet53(input_layer)\n\n route = conv\n conv = convolutional(conv, (1, 1, 512, 256))\n conv = upsample(conv)\n route_2 = convolutional(route_2, (1, 1, 512, 256), name='conv_yolo_1')\n conv = Concat()([route_2, conv])\n\n conv = convolutional(conv, (1, 1, 512, 256))\n conv = convolutional(conv, (3, 3, 256, 512))\n conv = convolutional(conv, (1, 1, 512, 256))\n conv = convolutional(conv, (3, 3, 256, 512))\n conv = convolutional(conv, (1, 1, 512, 256))\n\n route_2 = conv\n conv = convolutional(conv, (1, 1, 256, 128))\n conv = upsample(conv)\n route_1 = convolutional(route_1, (1, 1, 256, 128), name='conv_yolo_2')\n conv = Concat()([route_1, conv])\n\n conv = convolutional(conv, (1, 1, 256, 128))\n conv = convolutional(conv, (3, 3, 128, 256))\n conv = convolutional(conv, (1, 1, 256, 128))\n conv = convolutional(conv, (3, 3, 128, 256))\n conv = convolutional(conv, (1, 1, 256, 128))\n\n route_1 = conv\n conv = convolutional(conv, (3, 3, 128, 256), name='conv_route_1')\n conv_sbbox = convolutional(conv, (1, 1, 256, 3 * (NUM_CLASS + 5)), activate=False, bn=False)\n\n conv = convolutional(route_1, (3, 3, 128, 256), downsample=True, name='conv_route_2')\n conv = Concat()([conv, route_2])\n\n conv = convolutional(conv, (1, 1, 512, 256))\n conv = convolutional(conv, (3, 3, 256, 512))\n conv = convolutional(conv, (1, 1, 512, 256))\n conv = convolutional(conv, (3, 3, 256, 512))\n conv = convolutional(conv, (1, 1, 512, 256))\n\n route_2 = conv\n conv = convolutional(conv, (3, 3, 256, 512), name='conv_route_3')\n conv_mbbox = convolutional(conv, (1, 1, 512, 3 * (NUM_CLASS + 5)), activate=False, bn=False)\n\n conv = convolutional(route_2, (3, 3, 256, 512), downsample=True, name='conv_route_4')\n conv = Concat()([conv, route])\n\n conv = convolutional(conv, (1, 1, 1024, 512))\n conv = convolutional(conv, (3, 3, 512, 1024))\n conv = convolutional(conv, (1, 1, 1024, 512))\n conv = convolutional(conv, (3, 3, 512, 1024))\n conv = convolutional(conv, (1, 1, 1024, 512))\n\n conv = convolutional(conv, (3, 3, 512, 1024))\n conv_lbbox = convolutional(conv, (1, 1, 1024, 3 * (NUM_CLASS + 5)), activate=False, bn=False)\n\n network = Model(input_layer, [conv_sbbox, conv_mbbox, conv_lbbox])\n\n if pretrained:\n restore_params(network, model_path='model/model.npz')\n\n return network\n\n\ndef restore_params(network, model_path='models.npz'):\n logging.info(\"Restore pre-trained weights\")\n\n try:\n npz = np.load(model_path, allow_pickle=True)\n except:\n print(\"Download the model file, placed in the /model \")\n print(\"Weights download: \", weights_url['link'], \"password:\", weights_url['password'])\n\n txt_path = 'model/yolov4_config.txt'\n f = open(txt_path, \"r\")\n line = f.readlines()\n for i in range(len(line)):\n network.all_weights[i].assign(npz[line[i].strip()])\n logging.info(\" Loading weights %s in %s\" % (network.all_weights[i].shape, network.all_weights[i].name))\n"
] |
[
[
"numpy.load"
]
] |
bjedwards/NetworkX_fork
|
[
"6cb4465d73b8adc4692206fdbc8e1a3934d94fe6"
] |
[
"networkx/linalg/tests/test_spectrum.py"
] |
[
"from nose import SkipTest\n\nimport networkx as nx\nfrom networkx.generators.degree_seq import havel_hakimi_graph\n\nclass TestSpectrum(object):\n @classmethod\n def setupClass(cls):\n global numpy\n global assert_equal\n global assert_almost_equal\n try:\n import numpy\n from numpy.testing import assert_equal,assert_almost_equal\n except ImportError:\n raise SkipTest('NumPy not available.')\n\n def setUp(self):\n deg=[3,2,2,1,0]\n self.G=havel_hakimi_graph(deg)\n self.P=nx.path_graph(3)\n self.A=numpy.array([[0, 1, 1, 1, 0], \n [1, 0, 1, 0, 0], \n [1, 1, 0, 0, 0], \n [1, 0, 0, 0, 0], \n [0, 0, 0, 0, 0]])\n\n def test_adjacency_matrix(self):\n \"Conversion to adjacency matrix\"\n assert_equal(nx.adj_matrix(self.G),self.A)\n\n def test_laplacian(self):\n \"Graph Laplacian\"\n NL=numpy.array([[ 3, -1, -1, -1, 0], \n [-1, 2, -1, 0, 0], \n [-1, -1, 2, 0, 0], \n [-1, 0, 0, 1, 0], \n [ 0, 0, 0, 0, 0]])\n assert_equal(nx.laplacian(self.G),NL)\n\n def test_generalized_laplacian(self):\n \"Generalized Graph Laplacian\"\n GL=numpy.array([[ 1.00, -0.408, -0.408, -0.577, 0.00],\n [-0.408, 1.00, -0.50, 0.00 , 0.00], \n [-0.408, -0.50, 1.00, 0.00, 0.00], \n [-0.577, 0.00, 0.00, 1.00, 0.00],\n [ 0.00, 0.00, 0.00, 0.00, 0.00]]) \n assert_almost_equal(nx.generalized_laplacian(self.G),GL,decimal=3)\n \n def test_normalized_laplacian(self):\n \"Generalized Graph Laplacian\"\n GL=numpy.array([[ 1.00, -0.408, -0.408, -0.577, 0.00],\n [-0.408, 1.00, -0.50, 0.00 , 0.00], \n [-0.408, -0.50, 1.00, 0.00, 0.00], \n [-0.577, 0.00, 0.00, 1.00, 0.00],\n [ 0.00, 0.00, 0.00, 0.00, 0.00]]) \n assert_almost_equal(nx.normalized_laplacian(self.G),GL,decimal=3)\n \n\n\n def test_laplacian_spectrum(self):\n \"Laplacian eigenvalues\"\n evals=numpy.array([0, 0, 1, 3, 4])\n e=sorted(nx.laplacian_spectrum(self.G))\n assert_almost_equal(e,evals)\n\n def test_adjacency_spectrum(self):\n \"Adjacency eigenvalues\"\n evals=numpy.array([-numpy.sqrt(2), 0, numpy.sqrt(2)])\n e=sorted(nx.adjacency_spectrum(self.P))\n assert_almost_equal(e,evals)\n\n"
] |
[
[
"numpy.testing.assert_almost_equal",
"numpy.array",
"numpy.sqrt"
]
] |
Lornatang/DayHR
|
[
"02e81961e0a710f2c82db70c06b505e608db61cc"
] |
[
"Python/AI-ToolBox/computer_vision/image_classification_keras/parameterAdjusting_practice/CNN_v3/CNN_v3.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 1 12:02:27 2018\n\nCNN调参练习version3:\n\n使用一个类似AlexNet和VGG的简单架构\n在CNN_v2的基础上,进行数据增强\n\n@author: zyb_as\n\"\"\"\n\n# -----------------------------------------------------------\n# 基本参数\n# -----------------------------------------------------------\n\ntrainSetRootPath = '../../../dataset/trainSetExample' # 训练集根目录路径,该路径下应该分布着存放各个类别图像数据的文件夹\nvalidSetRootPath = '../../../dataset/validSetExample/' # 验证集根目录路径,该路径下应该分布着存放各个类别图像数据的文件夹\ntargetSize = (224, 224, 3) # 设置缩放大小(拿到的数据集将会是统一的这个尺寸)\ncategoryNum = 3 # 你需要人工确认待识别类别的数量\nbatchSize = 32\nepochNum = 100\n\n\n\n#-----------------------------------------------------------------------------------------\n# image data generator\n# 使用 Keras 的 ImageDataGenerator 方法读取数据,同时进行数据增强\n#-----------------------------------------------------------------------------------------\nfrom keras.preprocessing.image import ImageDataGenerator\n\n# **根据想要尝试的图像增强方法修改这里**\ntrain_datagen = ImageDataGenerator(rescale=1/255.,\nrotation_range = 10, #数据提升时图片随机转动的角度(整数)\nwidth_shift_range = 0.1, #图片水平偏移的幅度(图片宽度的某个比例,浮点数)\nheight_shift_range = 0.1, #图片竖直偏移的幅度(图片高度的某个比例,浮点数)\nshear_range = 0.2, #剪切强度(逆时针方向的剪切变换角度,浮点数)\nzoom_range = 0.2, #随机缩放的幅度(缩放范围[1 - zoom_range, 1+zoom_range])\nhorizontal_flip = True, #进行随机水平翻转\nvertical_flip = False #进行随机竖直翻转\n)\n\nval_datagen = ImageDataGenerator(rescale=1/255.) \n\ntrain_generator = train_datagen.flow_from_directory( \ntrainSetRootPath, #会扫描该目录下的文件,有几个文件就会默认有几类\ntarget_size=(targetSize[0], targetSize[1]), #生成的图片像素大小\nbatch_size=batchSize, #一次生成的图片数目\nclass_mode='categorical') \n \nvalidation_generator = val_datagen.flow_from_directory( \nvalidSetRootPath, \ntarget_size=(targetSize[0], targetSize[1]), \nbatch_size=batchSize, \nclass_mode='categorical')\n\n\n#-----------------------------------------------------------------------------------------\n# Set CallBack(loss history)\n#-----------------------------------------------------------------------------------------\nfrom keras.callbacks import Callback \n\n# 记录训练过程\n# Callback 用于记录每个epoch的 loss 和 accuracy\nclass LossHistory(Callback): \n \n def on_train_begin(self, logs={}): \n self.losses = [] \n self.acces = []\n self.val_losses = [] \n self.val_acces = []\n \n def on_epoch_end(self, batch, logs={}): \n self.losses.append(logs.get('loss')) \n self.acces.append(logs.get('acc'))\n self.val_losses.append(logs.get('val_loss'))\n self.val_acces.append(logs.get('val_acc')) \n \nhistory = LossHistory()\n\n# -----------------------------------------------------------\n# 构建网络\n# -----------------------------------------------------------\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Conv2D, MaxPooling2D, Flatten\nfrom keras.optimizers import SGD, Adam\nfrom matplotlib import pyplot as plt\n\n\ndef buildCNN(): \n \"\"\"\n 根据指定块数量构造CNN\n 默认blockNum最小为3\n 一个block为两个卷积层一个max pooling\n \"\"\"\n model = Sequential()\n \n model.add(Conv2D(32, (3, 3), activation='relu', input_shape = targetSize))\n model.add(Conv2D(32, (3, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n \n model.add(Conv2D(64, (3, 3), activation='relu'))\n model.add(Conv2D(64, (3, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n \n model.add(Conv2D(128, (3, 3), activation='relu'))\n model.add(Conv2D(128, (3, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n \n model.add(Conv2D(256, (3, 3), activation='relu'))\n model.add(Conv2D(256, (3, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n \n model.add(Flatten())\n model.add(Dense(256, activation='relu'))\n model.add(Dense(256, activation='relu'))\n\n model.add(Dense(categoryNum, activation='softmax'))\n\n # choose a optimizer\n #sgd = SGD(lr=0.0000001, decay=1e-6, momentum=0.1, nesterov=False)\n adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n \n model.compile(loss='categorical_crossentropy', \n optimizer=adam, metrics=['accuracy'])\n return model\n\n\n\n# -----------------------------------------------------------\n# 构建并训练CNN模型\n# -----------------------------------------------------------\nprint('---------------------')\nprint('start training model with the image enhancement strategy\\n')\n\nmodel = buildCNN()\n\n#训练集样本总数\ntrain_sample_count = len(train_generator.filenames)\n#测试集样本总数\nval_sample_count = len(validation_generator.filenames)\n\nmodel.fit_generator( \n train_generator,\n steps_per_epoch= int(train_sample_count/batchSize) + 1, # steps_per_epoch定义多少batch算作完成一次epoch \n epochs=epochNum,\n validation_data=validation_generator, \n validation_steps= int(val_sample_count/batchSize) + 1, # batch_size, \n callbacks=[history])\n\nprint('\\ntraining finished')\nprint('---------------------\\n')\n\n# 评价模型与可视化 \nprint('best accuracy on training set:' + str(max(history.acces))) \nprint('best accuracy on validation set:' + str(max(history.val_acces)))\n\nprint('\\nvalidation accuracy record on each epoches:')\nprint(history.val_acces)\n\nplt.title('Result Analysis')\nplt.plot([x for x in range(1, len(history.acces) + 1)], history.acces, color='green', label='training accuracy')\nplt.plot([x for x in range(1, len(history.val_acces) + 1)], history.val_acces, color='skyblue', label='validation accuracy')\nplt.legend() # 显示图例\nplt.xlabel('epoches')\nplt.ylabel('accuracy')\nplt.show()\n \n\n\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
jpmjpmjpm/kraken
|
[
"8ceae75230f5e47bc5d75f8d68fb41b0532cd0ca"
] |
[
"kraken/ketos.py"
] |
[
"# -*- coding: utf-8 -*-\n#\n# Copyright 2015 Benjamin Kiessling\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n# or implied. See the License for the specific language governing\n# permissions and limitations under the License.\nimport os\nimport time\nimport json\nimport glob\nimport uuid\nimport click\nimport logging\nimport unicodedata\n\nfrom click import open_file\nfrom bidi.algorithm import get_display\n\nfrom typing import cast, Set, List, IO, Any\nfrom collections import defaultdict\n\nfrom kraken.lib import log\nfrom kraken.lib.exceptions import KrakenCairoSurfaceException\nfrom kraken.lib.exceptions import KrakenEncodeException\nfrom kraken.lib.exceptions import KrakenInputException\nfrom kraken.lib.default_specs import (SEGMENTATION_HYPER_PARAMS,\n RECOGNITION_HYPER_PARAMS,\n SEGMENTATION_SPEC,\n RECOGNITION_SPEC)\n\nAPP_NAME = 'kraken'\n\nlogging.captureWarnings(True)\nlogger = logging.getLogger('kraken')\n\ndef message(msg, **styles):\n if logger.getEffectiveLevel() >= 30:\n click.secho(msg, **styles)\n\n\[email protected]()\[email protected]_option()\[email protected]('-v', '--verbose', default=0, count=True)\[email protected]('-s', '--seed', default=None, type=click.INT,\n help='Seed for numpy\\'s and torch\\'s RNG. Set to a fixed value to '\n 'ensure reproducable random splits of data')\ndef cli(verbose, seed):\n if seed:\n import numpy.random\n numpy.random.seed(seed)\n from torch import manual_seed\n manual_seed(seed)\n\n log.set_logger(logger, level=30-min(10*verbose, 20))\n\n\ndef _validate_manifests(ctx, param, value):\n images = []\n for manifest in value:\n for entry in manifest.readlines():\n im_p = entry.rstrip('\\r\\n')\n if os.path.isfile(im_p):\n images.append(im_p)\n else:\n logger.warning('Invalid entry \"{}\" in {}'.format(im_p, manifest.name))\n return images\n\n\ndef _expand_gt(ctx, param, value):\n images = []\n for expression in value:\n images.extend([x for x in glob.iglob(expression, recursive=True) if os.path.isfile(x)])\n return images\n\ndef _validate_merging(ctx, param, value):\n \"\"\"\n Maps baseline/region merging to a dict of merge structures.\n \"\"\"\n if not value:\n return None\n merge_dict = {} # type: Dict[str, str]\n try:\n for m in value:\n k, v = m.split(':')\n merge_dict[v] = k # type: ignore\n except Exception:\n raise click.BadParameter('Mappings must be in format target:src')\n return merge_dict\n\[email protected]('segtrain')\[email protected]_context\[email protected]('-o', '--output', show_default=True, type=click.Path(), default='model', help='Output model file')\[email protected]('-s', '--spec', show_default=True,\n default=SEGMENTATION_SPEC,\n help='VGSL spec of the baseline labeling network')\[email protected]('--line-width', show_default=True, default=SEGMENTATION_HYPER_PARAMS['line_width'], help='The height of each baseline in the target after scaling')\[email protected]('-i', '--load', show_default=True, type=click.Path(exists=True, readable=True), help='Load existing file to continue training')\[email protected]('-F', '--freq', show_default=True, default=SEGMENTATION_HYPER_PARAMS['freq'], type=click.FLOAT,\n help='Model saving and report generation frequency in epochs during training')\[email protected]('-q', '--quit', show_default=True, default=SEGMENTATION_HYPER_PARAMS['quit'], type=click.Choice(['early', 'dumb']),\n help='Stop condition for training. Set to `early` for early stopping or `dumb` for fixed number of epochs')\[email protected]('-N', '--epochs', show_default=True, default=SEGMENTATION_HYPER_PARAMS['epochs'], help='Number of epochs to train for')\[email protected]('--lag', show_default=True, default=SEGMENTATION_HYPER_PARAMS['lag'], help='Number of evaluations (--report frequence) to wait before stopping training without improvement')\[email protected]('--min-delta', show_default=True, default=SEGMENTATION_HYPER_PARAMS['min_delta'], type=click.FLOAT, help='Minimum improvement between epochs to reset early stopping. By default it scales the delta by the best loss')\[email protected]('-d', '--device', show_default=True, default='cpu', help='Select device to use (cpu, cuda:0, cuda:1, ...)')\[email protected]('--optimizer', show_default=True, default=SEGMENTATION_HYPER_PARAMS['optimizer'], type=click.Choice(['Adam', 'SGD', 'RMSprop']), help='Select optimizer')\[email protected]('-r', '--lrate', show_default=True, default=SEGMENTATION_HYPER_PARAMS['lrate'], help='Learning rate')\[email protected]('-m', '--momentum', show_default=True, default=SEGMENTATION_HYPER_PARAMS['momentum'], help='Momentum')\[email protected]('-w', '--weight-decay', show_default=True, default=SEGMENTATION_HYPER_PARAMS['weight_decay'], help='Weight decay')\[email protected]('--schedule', show_default=True, type=click.Choice(['constant', '1cycle', 'exponential', 'cosine', 'step', 'reduceonplateau']), default=RECOGNITION_HYPER_PARAMS['schedule'],\n help='Set learning rate scheduler. For 1cycle, cycle length is determined by the `--step-size` option.')\[email protected]('-g', '--gamma', show_default=True, default=RECOGNITION_HYPER_PARAMS['gamma'], help='Decay factor for exponential, step, and reduceonplateau learning rate schedules')\[email protected]('-ss', '--step-size', show_default=True, default=RECOGNITION_HYPER_PARAMS['step_size'], help='Number of validation runs between learning rate decay for exponential and step LR schedules')\[email protected]('--sched-patience', show_default=True, default=RECOGNITION_HYPER_PARAMS['rop_patience'], help='Minimal number of validation runs between LR reduction for reduceonplateau LR schedule.')\[email protected]('--cos-max', show_default=True, default=RECOGNITION_HYPER_PARAMS['cos_t_max'], help='Epoch of minimal learning rate for cosine LR scheduler.')\[email protected]('-p', '--partition', show_default=True, default=0.9, help='Ground truth data partition ratio between train/validation set')\[email protected]('-t', '--training-files', show_default=True, default=None, multiple=True,\n callback=_validate_manifests, type=click.File(mode='r', lazy=True),\n help='File(s) with additional paths to training data')\[email protected]('-e', '--evaluation-files', show_default=True, default=None, multiple=True,\n callback=_validate_manifests, type=click.File(mode='r', lazy=True),\n help='File(s) with paths to evaluation data. Overrides the `-p` parameter')\[email protected]('--threads', show_default=True, default=1, help='Number of OpenMP threads and workers when running on CPU.')\[email protected]('--load-hyper-parameters/--no-load-hyper-parameters', show_default=True, default=False,\n help='When loading an existing model, retrieve hyper-parameters from the model')\[email protected]('--force-binarization/--no-binarization', show_default=True,\n default=False, help='Forces input images to be binary, otherwise '\n 'the appropriate color format will be auto-determined through the '\n 'network specification. Will be ignored in `path` mode.')\[email protected]('-f', '--format-type', type=click.Choice(['path', 'xml', 'alto', 'page']), default='xml',\n help='Sets the training data format. In ALTO and PageXML mode all '\n 'data is extracted from xml files containing both baselines and a '\n 'link to source images. In `path` mode arguments are image files '\n 'sharing a prefix up to the last extension with JSON `.path` files '\n 'containing the baseline information.')\[email protected]('--suppress-regions/--no-suppress-regions', show_default=True, default=False, help='Disables region segmentation training.')\[email protected]('--suppress-baselines/--no-suppress-baselines', show_default=True, default=False, help='Disables baseline segmentation training.')\[email protected]('-vr', '--valid-regions', show_default=True, default=None, multiple=True, help='Valid region types in training data. May be used multiple times.')\[email protected]('-vb', '--valid-baselines', show_default=True, default=None, multiple=True, help='Valid baseline types in training data. May be used multiple times.')\[email protected]('-mr', '--merge-regions', show_default=True, default=None, help='Region merge mapping. One or more mappings of the form `$target:$src` where $src is merged into $target.', multiple=True, callback=_validate_merging)\[email protected]('-mb', '--merge-baselines', show_default=True, default=None, help='Baseline type merge mapping. Same syntax as `--merge-regions`', multiple=True, callback=_validate_merging)\[email protected]('-br', '--bounding-regions', show_default=True, default=None, multiple=True, help='Regions treated as boundaries for polygonization purposes. May be used multiple times.')\[email protected]('--augment/--no-augment', show_default=True, default=SEGMENTATION_HYPER_PARAMS['augment'], help='Enable image augmentation')\[email protected]('--resize', show_default=True, default='fail', type=click.Choice(['add', 'both', 'fail']),\n help='Output layer resizing option. If set to `add` new classes will be '\n 'added, `both` will set the layer to match exactly '\n 'the training data classes, `fail` will abort if training data and model '\n 'classes do not match.')\[email protected]('-tl', '--topline', 'topline', show_default=True, flag_value='topline',\n help='Switch for the baseline location in the scripts. '\n 'Set to topline if the data is annotated with a hanging baseline, as is '\n 'common with Hebrew, Bengali, Devanagari, etc. Set to '\n ' centerline for scripts annotated with a central line.')\[email protected]('-cl', '--centerline', 'topline', flag_value='centerline')\[email protected]('-bl', '--baseline', 'topline', flag_value='baseline', default='baseline')\[email protected]('ground_truth', nargs=-1, callback=_expand_gt, type=click.Path(exists=False, dir_okay=False))\ndef segtrain(ctx, output, spec, line_width, load, freq, quit, epochs,\n lag, min_delta, device, optimizer, lrate, momentum, weight_decay,\n schedule, gamma, step_size, sched_patience, cos_max, partition,\n training_files, evaluation_files, threads, load_hyper_parameters,\n force_binarization, format_type, suppress_regions,\n suppress_baselines, valid_regions, valid_baselines, merge_regions,\n merge_baselines, bounding_regions, augment, resize, topline, ground_truth):\n \"\"\"\n Trains a baseline labeling model for layout analysis\n \"\"\"\n import re\n import torch\n import shutil\n import numpy as np\n\n from kraken.lib.train import KrakenTrainer\n\n if resize != 'fail' and not load:\n raise click.BadOptionUsage('resize', 'resize option requires loading an existing model')\n\n logger.info('Building ground truth set from {} document images'.format(len(ground_truth) + len(training_files)))\n\n\n # load model if given. if a new model has to be created we need to do that\n # after data set initialization, otherwise to output size is still unknown.\n nn = None\n\n # populate hyperparameters from command line args\n hyper_params = SEGMENTATION_HYPER_PARAMS.copy()\n hyper_params.update({'line_width': line_width,\n 'freq': freq,\n 'quit': quit,\n 'epochs': epochs,\n 'lag': lag,\n 'min_delta': min_delta,\n 'optimizer': optimizer,\n 'lrate': lrate,\n 'momentum': momentum,\n 'weight_decay': weight_decay,\n 'schedule': schedule,\n 'augment': augment,\n 'gamma': gamma,\n 'step_size': step_size,\n 'rop_patience': sched_patience,\n 'cos_t_max': cos_max\n })\n\n # disable automatic partition when given evaluation set explicitly\n if evaluation_files:\n partition = 1\n ground_truth = list(ground_truth)\n\n # merge training_files into ground_truth list\n if training_files:\n ground_truth.extend(training_files)\n\n if len(ground_truth) == 0:\n raise click.UsageError('No training data was provided to the train command. Use `-t` or the `ground_truth` argument.')\n\n np.random.shuffle(ground_truth)\n\n training_files = ground_truth[:int(len(ground_truth) * partition)]\n if evaluation_files:\n logger.debug(f'Using {len(evaluation_files)} lines/files from explicit eval set')\n else:\n evaluation_files = ground_truth[int(len(ground_truth) * partition):]\n logger.debug(f'Taking {len(evaluation_files)} lines/files from training set for evaluation')\n\n def _init_progressbar(label, length):\n if 'bar' in ctx.meta:\n ctx.meta['bar'].__exit__(None, None, None)\n ctx.meta['bar'] = log.progressbar(label=label, length=length, show_pos=True)\n ctx.meta['bar'].__enter__()\n return lambda: ctx.meta['bar'].update(1)\n\n topline = {'topline': True,\n 'baseline': False,\n 'centerline': None}[topline]\n\n trainer = KrakenTrainer.segmentation_train_gen(hyper_params,\n message=message,\n progress_callback=_init_progressbar,\n output=output,\n spec=spec,\n load=load,\n device=device,\n training_data=training_files,\n evaluation_data=evaluation_files,\n threads=threads,\n load_hyper_parameters=load_hyper_parameters,\n force_binarization=force_binarization,\n format_type=format_type,\n suppress_regions=suppress_regions,\n suppress_baselines=suppress_baselines,\n valid_regions=valid_regions,\n valid_baselines=valid_baselines,\n merge_regions=merge_regions,\n merge_baselines=merge_baselines,\n bounding_regions=bounding_regions,\n augment=augment,\n resize=resize,\n topline=topline)\n\n with log.progressbar(label='stage {}/{}'.format(1, trainer.stopper.epochs if trainer.stopper.epochs > 0 else '∞'),\n length=trainer.event_it, show_pos=True) as bar:\n\n def _draw_progressbar():\n bar.update(1)\n\n def _print_eval(epoch, accuracy, mean_acc, mean_iu, freq_iu, **kwargs):\n message('Accuracy report ({}) mean_iu: {:0.4f} freq_iu: {:0.4f} mean_acc: {:0.4f} accuracy: {:0.4f}'.format(epoch, mean_iu, freq_iu, mean_acc, accuracy))\n # reset progress bar\n bar.label = 'stage {}/{}'.format(epoch+1, trainer.stopper.epochs if trainer.stopper.epochs > 0 else '∞')\n bar.pos = 0\n bar.finished = False\n bar.start = bar.last_eta = time.time()\n\n trainer.run(_print_eval, _draw_progressbar)\n\n if quit == 'early':\n message('Moving best model {0}_{1}.mlmodel ({2}) to {0}_best.mlmodel'.format(output, trainer.stopper.best_epoch, trainer.stopper.best_loss))\n logger.info('Moving best model {0}_{1}.mlmodel ({2}) to {0}_best.mlmodel'.format(output, trainer.stopper.best_epoch, trainer.stopper.best_loss))\n shutil.copy(f'{output}_{trainer.stopper.best_epoch}.mlmodel', f'{output}_best.mlmodel')\n\n\[email protected]('train')\[email protected]_context\[email protected]('-B', '--batch-size', show_default=True, type=click.INT,\n default=RECOGNITION_HYPER_PARAMS['batch_size'], help='batch sample size')\[email protected]('-p', '--pad', show_default=True, type=click.INT, default=16, help='Left and right '\n 'padding around lines')\[email protected]('-o', '--output', show_default=True, type=click.Path(), default='model', help='Output model file')\[email protected]('-s', '--spec', show_default=True, default=RECOGNITION_SPEC,\n help='VGSL spec of the network to train. CTC layer will be added automatically.')\[email protected]('-a', '--append', show_default=True, default=None, type=click.INT,\n help='Removes layers before argument and then appends spec. Only works when loading an existing model')\[email protected]('-i', '--load', show_default=True, type=click.Path(exists=True, readable=True), help='Load existing file to continue training')\[email protected]('-F', '--freq', show_default=True, default=RECOGNITION_HYPER_PARAMS['freq'], type=click.FLOAT,\n help='Model saving and report generation frequency in epochs during training')\[email protected]('-q', '--quit', show_default=True, default=RECOGNITION_HYPER_PARAMS['quit'], type=click.Choice(['early', 'dumb']),\n help='Stop condition for training. Set to `early` for early stooping or `dumb` for fixed number of epochs')\[email protected]('-N', '--epochs', show_default=True, default=RECOGNITION_HYPER_PARAMS['epochs'], help='Number of epochs to train for')\[email protected]('--lag', show_default=True, default=RECOGNITION_HYPER_PARAMS['lag'], help='Number of evaluations (--report frequence) to wait before stopping training without improvement')\[email protected]('--min-delta', show_default=True, default=RECOGNITION_HYPER_PARAMS['min_delta'], type=click.FLOAT, help='Minimum improvement between epochs to reset early stopping. Default is scales the delta by the best loss')\[email protected]('-d', '--device', show_default=True, default='cpu', help='Select device to use (cpu, cuda:0, cuda:1, ...)')\[email protected]('--optimizer', show_default=True, default=RECOGNITION_HYPER_PARAMS['optimizer'], type=click.Choice(['Adam', 'SGD', 'RMSprop']), help='Select optimizer')\[email protected]('-r', '--lrate', show_default=True, default=RECOGNITION_HYPER_PARAMS['lrate'], help='Learning rate')\[email protected]('-m', '--momentum', show_default=True, default=RECOGNITION_HYPER_PARAMS['momentum'], help='Momentum')\[email protected]('-w', '--weight-decay', show_default=True, default=RECOGNITION_HYPER_PARAMS['weight_decay'], help='Weight decay')\[email protected]('--schedule', show_default=True, type=click.Choice(['constant', '1cycle', 'exponential', 'cosine', 'step', 'reduceonplateau']), default=RECOGNITION_HYPER_PARAMS['schedule'],\n help='Set learning rate scheduler. For 1cycle, cycle length is determined by the `--epoch` option.')\[email protected]('-g', '--gamma', show_default=True, default=RECOGNITION_HYPER_PARAMS['gamma'], help='Decay factor for exponential, step, and reduceonplateau learning rate schedules')\[email protected]('-ss', '--step-size', show_default=True, default=RECOGNITION_HYPER_PARAMS['step_size'], help='Number of validation runs between learning rate decay for exponential and step LR schedules')\[email protected]('--sched-patience', show_default=True, default=RECOGNITION_HYPER_PARAMS['rop_patience'], help='Minimal number of validation runs between LR reduction for reduceonplateau LR schedule.')\[email protected]('--cos-max', show_default=True, default=RECOGNITION_HYPER_PARAMS['cos_t_max'], help='Epoch of minimal learning rate for cosine LR scheduler.')\[email protected]('-p', '--partition', show_default=True, default=0.9, help='Ground truth data partition ratio between train/validation set')\[email protected]('-u', '--normalization', show_default=True, type=click.Choice(['NFD', 'NFKD', 'NFC', 'NFKC']),\n default=RECOGNITION_HYPER_PARAMS['normalization'], help='Ground truth normalization')\[email protected]('-n', '--normalize-whitespace/--no-normalize-whitespace',\n show_default=True, default=RECOGNITION_HYPER_PARAMS['normalize_whitespace'], help='Normalizes unicode whitespace')\[email protected]('-c', '--codec', show_default=True, default=None, type=click.File(mode='r', lazy=True),\n help='Load a codec JSON definition (invalid if loading existing model)')\[email protected]('--resize', show_default=True, default='fail', type=click.Choice(['add', 'both', 'fail']),\n help='Codec/output layer resizing option. If set to `add` code '\n 'points will be added, `both` will set the layer to match exactly '\n 'the training data, `fail` will abort if training data and model '\n 'codec do not match.')\[email protected]('--reorder/--no-reorder', show_default=True, default=True, help='Reordering of code points to display order')\[email protected]('--base-dir', show_default=True, default='auto',\n type=click.Choice(['L', 'R', 'auto']), help='Set base text '\n 'direction. This should be set to the direction used during the '\n 'creation of the training data. If set to `auto` it will be '\n 'overridden by any explicit value given in the input files.')\[email protected]('-t', '--training-files', show_default=True, default=None, multiple=True,\n callback=_validate_manifests, type=click.File(mode='r', lazy=True),\n help='File(s) with additional paths to training data')\[email protected]('-e', '--evaluation-files', show_default=True, default=None, multiple=True,\n callback=_validate_manifests, type=click.File(mode='r', lazy=True),\n help='File(s) with paths to evaluation data. Overrides the `-p` parameter')\[email protected]('--preload/--no-preload', show_default=True, default=None, help='Hard enable/disable for training data preloading')\[email protected]('--threads', show_default=True, default=1, help='Number of OpenMP threads and workers when running on CPU.')\[email protected]('--load-hyper-parameters/--no-load-hyper-parameters', show_default=True, default=False,\n help='When loading an existing model, retrieve hyperparameters from the model')\[email protected]('--repolygonize/--no-repolygonize', show_default=True,\n default=False, help='Repolygonizes line data in ALTO/PageXML '\n 'files. This ensures that the trained model is compatible with the '\n 'segmenter in kraken even if the original image files either do '\n 'not contain anything but transcriptions and baseline information '\n 'or the polygon data was created using a different method. Will '\n 'be ignored in `path` mode. Note that this option will be slow '\n 'and will not scale input images to the same size as the segmenter '\n 'does.')\[email protected]('--force-binarization/--no-binarization', show_default=True,\n default=False, help='Forces input images to be binary, otherwise '\n 'the appropriate color format will be auto-determined through the '\n 'network specification. Will be ignored in `path` mode.')\[email protected]('-f', '--format-type', type=click.Choice(['path', 'xml', 'alto', 'page']), default='path',\n help='Sets the training data format. In ALTO and PageXML mode all '\n 'data is extracted from xml files containing both line definitions and a '\n 'link to source images. In `path` mode arguments are image files '\n 'sharing a prefix up to the last extension with `.gt.txt` text files '\n 'containing the transcription.')\[email protected]('--augment/--no-augment', show_default=True, default=RECOGNITION_HYPER_PARAMS['augment'], help='Enable image augmentation')\[email protected]('ground_truth', nargs=-1, callback=_expand_gt, type=click.Path(exists=False, dir_okay=False))\ndef train(ctx, batch_size, pad, output, spec, append, load, freq, quit, epochs,\n lag, min_delta, device, optimizer, lrate, momentum, weight_decay,\n schedule, gamma, step_size, sched_patience, cos_max, partition,\n normalization, normalize_whitespace, codec, resize, reorder,\n base_dir, training_files, evaluation_files, preload, threads,\n load_hyper_parameters, repolygonize, force_binarization, format_type,\n augment, ground_truth):\n \"\"\"\n Trains a model from image-text pairs.\n \"\"\"\n if not load and append:\n raise click.BadOptionUsage('append', 'append option requires loading an existing model')\n\n if resize != 'fail' and not load:\n raise click.BadOptionUsage('resize', 'resize option requires loading an existing model')\n\n import shutil\n import numpy as np\n from kraken.lib.train import KrakenTrainer\n\n hyper_params = RECOGNITION_HYPER_PARAMS.copy()\n hyper_params.update({'freq': freq,\n 'pad': pad,\n 'batch_size': batch_size,\n 'quit': quit,\n 'epochs': epochs,\n 'lag': lag,\n 'min_delta': min_delta,\n 'optimizer': optimizer,\n 'lrate': lrate,\n 'momentum': momentum,\n 'weight_decay': weight_decay,\n 'schedule': schedule,\n 'gamma': gamma,\n 'step_size': step_size,\n 'rop_patience': sched_patience,\n 'cos_t_max': cos_max,\n 'normalization': normalization,\n 'normalize_whitespace': normalize_whitespace,\n 'augment': augment\n })\n\n # disable automatic partition when given evaluation set explicitly\n if evaluation_files:\n partition = 1\n ground_truth = list(ground_truth)\n\n # merge training_files into ground_truth list\n if training_files:\n ground_truth.extend(training_files)\n\n if len(ground_truth) == 0:\n raise click.UsageError('No training data was provided to the train command. Use `-t` or the `ground_truth` argument.')\n\n if reorder and base_dir != 'auto':\n reorder = base_dir\n\n np.random.shuffle(ground_truth)\n training_files = ground_truth[:int(len(ground_truth) * partition)]\n if evaluation_files:\n logger.debug(f'Using {len(evaluation_files)} lines/files from explicit eval set')\n else:\n evaluation_files = ground_truth[int(len(ground_truth) * partition):]\n logger.debug(f'Taking {len(evaluation_files)} lines/files from training set for evaluation')\n\n def _init_progressbar(label, length):\n if 'bar' in ctx.meta:\n ctx.meta['bar'].__exit__(None, None, None)\n ctx.meta['bar'] = log.progressbar(label=label, length=length, show_pos=True)\n ctx.meta['bar'].__enter__()\n return lambda: ctx.meta['bar'].update(1)\n\n trainer = KrakenTrainer.recognition_train_gen(hyper_params,\n message=message,\n progress_callback=_init_progressbar,\n output=output,\n spec=spec,\n append=append,\n load=load,\n device=device,\n reorder=reorder,\n training_data=training_files,\n evaluation_data=evaluation_files,\n preload=preload,\n threads=threads,\n load_hyper_parameters=load_hyper_parameters,\n repolygonize=repolygonize,\n force_binarization=force_binarization,\n format_type=format_type,\n codec=codec,\n resize=resize,\n augment=augment)\n\n with log.progressbar(label='stage {}/{}'.format(1, trainer.stopper.epochs if trainer.stopper.epochs > 0 else '∞'),\n length=trainer.event_it, show_pos=True) as bar:\n\n def _draw_progressbar():\n bar.update(1)\n\n def _print_eval(epoch, accuracy, chars, error, **kwargs):\n message('Accuracy report ({}) {:0.4f} {} {}'.format(epoch, accuracy, chars, error))\n # reset progress bar\n bar.label = 'stage {}/{}'.format(epoch+1, trainer.stopper.epochs if trainer.stopper.epochs > 0 else '∞')\n bar.pos = 0\n bar.finished = False\n bar.start = bar.last_eta = time.time()\n\n trainer.run(_print_eval, _draw_progressbar)\n\n if quit == 'early':\n message('Moving best model {0}_{1}.mlmodel ({2}) to {0}_best.mlmodel'.format(output, trainer.stopper.best_epoch, trainer.stopper.best_loss))\n logger.info('Moving best model {0}_{1}.mlmodel ({2}) to {0}_best.mlmodel'.format(output, trainer.stopper.best_epoch, trainer.stopper.best_loss))\n shutil.copy(f'{output}_{trainer.stopper.best_epoch}.mlmodel', f'{output}_best.mlmodel')\n\n\[email protected]('test')\[email protected]_context\[email protected]('-B', '--batch-size', show_default=True, type=click.INT,\n default=RECOGNITION_HYPER_PARAMS['batch_size'], help='Batch sample size')\[email protected]('-m', '--model', show_default=True, type=click.Path(exists=True, readable=True),\n multiple=True, help='Model(s) to evaluate')\[email protected]('-e', '--evaluation-files', show_default=True, default=None, multiple=True,\n callback=_validate_manifests, type=click.File(mode='r', lazy=True),\n help='File(s) with paths to evaluation data.')\[email protected]('-d', '--device', show_default=True, default='cpu', help='Select device to use (cpu, cuda:0, cuda:1, ...)')\[email protected]('-p', '--pad', show_default=True, type=click.INT, default=16, help='Left and right '\n 'padding around lines')\[email protected]('--threads', show_default=True, default=1, help='Number of OpenMP threads when running on CPU.')\[email protected]('--reorder/--no-reorder', show_default=True, default=True, help='Reordering of code points to display order')\[email protected]('--base-dir', show_default=True, default='auto',\n type=click.Choice(['L', 'R', 'auto']), help='Set base text '\n 'direction. This should be set to the direction used during the '\n 'creation of the training data. If set to `auto` it will be '\n 'overridden by any explicit value given in the input files.')\[email protected]('-u', '--normalization', show_default=True, type=click.Choice(['NFD', 'NFKD', 'NFC', 'NFKC']),\n default=None, help='Ground truth normalization')\[email protected]('-n', '--normalize-whitespace/--no-normalize-whitespace',\n show_default=True, default=True, help='Normalizes unicode whitespace')\[email protected]('--repolygonize/--no-repolygonize', show_default=True,\n default=False, help='Repolygonizes line data in ALTO/PageXML '\n 'files. This ensures that the trained model is compatible with the '\n 'segmenter in kraken even if the original image files either do '\n 'not contain anything but transcriptions and baseline information '\n 'or the polygon data was created using a different method. Will '\n 'be ignored in `path` mode. Note, that this option will be slow '\n 'and will not scale input images to the same size as the segmenter '\n 'does.')\[email protected]('--force-binarization/--no-binarization', show_default=True,\n default=False, help='Forces input images to be binary, otherwise '\n 'the appropriate color format will be auto-determined through the '\n 'network specification. Will be ignored in `path` mode.')\[email protected]('-f', '--format-type', type=click.Choice(['path', 'xml', 'alto', 'page']), default='path',\n help='Sets the training data format. In ALTO and PageXML mode all '\n 'data is extracted from xml files containing both baselines and a '\n 'link to source images. In `path` mode arguments are image files '\n 'sharing a prefix up to the last extension with JSON `.path` files '\n 'containing the baseline information.')\[email protected]('test_set', nargs=-1, callback=_expand_gt, type=click.Path(exists=False, dir_okay=False))\ndef test(ctx, batch_size, model, evaluation_files, device, pad, threads,\n reorder, base_dir, normalization, normalize_whitespace, repolygonize,\n force_binarization, format_type, test_set):\n \"\"\"\n Evaluate on a test set.\n \"\"\"\n if not model:\n raise click.UsageError('No model to evaluate given.')\n\n import regex\n import unicodedata\n import numpy as np\n from PIL import Image\n from torch.utils.data import DataLoader\n\n from kraken.serialization import render_report\n from kraken.lib import models\n from kraken.lib.dataset import global_align, compute_confusions, preparse_xml_data, PolygonGTDataset, GroundTruthDataset, generate_input_transforms, collate_sequences\n\n logger.info('Building test set from {} line images'.format(len(test_set) + len(evaluation_files)))\n\n nn = {}\n for p in model:\n message('Loading model {}\\t'.format(p), nl=False)\n nn[p] = models.load_any(p)\n message('\\u2713', fg='green')\n\n test_set = list(test_set)\n\n # set number of OpenMP threads\n next(iter(nn.values())).nn.set_num_threads(1)\n\n if evaluation_files:\n test_set.extend(evaluation_files)\n\n if len(test_set) == 0:\n raise click.UsageError('No evaluation data was provided to the test command. Use `-e` or the `test_set` argument.')\n\n if format_type != 'path':\n if repolygonize:\n message('Repolygonizing data')\n test_set = preparse_xml_data(test_set, format_type, repolygonize)\n valid_norm = False\n DatasetClass = PolygonGTDataset\n else:\n DatasetClass = GroundTruthDataset\n t = []\n if force_binarization:\n logger.warning('Forced binarization enabled in `path` mode. Will be ignored.')\n force_binarization = False\n if repolygonize:\n logger.warning('Repolygonization enabled in `path` mode. Will be ignored.')\n test_set = [{'image': img} for img in test_set]\n valid_norm = True\n\n if len(test_set) == 0:\n raise click.UsageError('No evaluation data was provided to the test command. Use `-e` or the `test_set` argument.')\n\n if reorder and base_dir != 'auto':\n reorder = base_dir\n\n acc_list = []\n for p, net in nn.items():\n algn_gt: List[str] = []\n algn_pred: List[str] = []\n chars = 0\n error = 0\n message('Evaluating {}'.format(p))\n logger.info('Evaluating {}'.format(p))\n batch, channels, height, width = net.nn.input\n ts = generate_input_transforms(batch, height, width, channels, pad, valid_norm, force_binarization)\n ds = DatasetClass(normalization=normalization,\n whitespace_normalization=normalize_whitespace,\n reorder=reorder,\n im_transforms=ts,\n preload=False)\n for line in test_set:\n try:\n ds.add(**line)\n except KrakenInputException as e:\n logger.info(e)\n # don't encode validation set as the alphabets may not match causing encoding failures\n ds.no_encode()\n ds_loader = DataLoader(ds,\n batch_size=batch_size,\n num_workers=threads,\n pin_memory=True,\n collate_fn=collate_sequences)\n\n with log.progressbar(ds_loader, label='Evaluating') as bar:\n for batch in bar:\n im = batch['image']\n text = batch['target']\n lens = batch['seq_lens']\n try:\n pred = net.predict_string(im, lens)\n for x, y in zip(pred, text):\n chars += len(y)\n c, algn1, algn2 = global_align(y, x)\n algn_gt.extend(algn1)\n algn_pred.extend(algn2)\n error += c\n except FileNotFoundError as e:\n logger.warning('{} {}. Skipping.'.format(e.strerror, e.filename))\n except KrakenInputException as e:\n logger.warning(str(e))\n acc_list.append((chars-error)/chars)\n confusions, scripts, ins, dels, subs = compute_confusions(algn_gt, algn_pred)\n rep = render_report(p, chars, error, confusions, scripts, ins, dels, subs)\n logger.info(rep)\n message(rep)\n logger.info('Average accuracy: {:0.2f}%, (stddev: {:0.2f})'.format(np.mean(acc_list) * 100, np.std(acc_list) * 100))\n message('Average accuracy: {:0.2f}%, (stddev: {:0.2f})'.format(np.mean(acc_list) * 100, np.std(acc_list) * 100))\n\n\[email protected]('extract')\[email protected]_context\[email protected]('-b', '--binarize/--no-binarize', show_default=True, default=True,\n help='Binarize color/grayscale images')\[email protected]('-u', '--normalization', show_default=True,\n type=click.Choice(['NFD', 'NFKD', 'NFC', 'NFKC']), default=None,\n help='Normalize ground truth')\[email protected]('-s', '--normalize-whitespace/--no-normalize-whitespace',\n show_default=True, default=True, help='Normalizes unicode whitespace')\[email protected]('-n', '--reorder/--no-reorder', default=False, show_default=True,\n help='Reorder transcribed lines to display order')\[email protected]('-r', '--rotate/--no-rotate', default=True, show_default=True,\n help='Skip rotation of vertical lines')\[email protected]('-o', '--output', type=click.Path(), default='training', show_default=True,\n help='Output directory')\[email protected]('--format', default='{idx:06d}', show_default=True, help='Format for extractor output. valid fields are `src` (source file), `idx` (line number), and `uuid` (v4 uuid)')\[email protected]('transcriptions', nargs=-1, type=click.File(lazy=True))\ndef extract(ctx, binarize, normalization, normalize_whitespace, reorder,\n rotate, output, format, transcriptions):\n \"\"\"\n Extracts image-text pairs from a transcription environment created using\n ``ketos transcribe``.\n \"\"\"\n import regex\n import base64\n\n from io import BytesIO\n from PIL import Image\n from lxml import html, etree\n\n from kraken import binarization\n\n try:\n os.mkdir(output)\n except Exception:\n pass\n\n text_transforms = []\n if normalization:\n text_transforms.append(lambda x: unicodedata.normalize(normalization, x))\n if normalize_whitespace:\n text_transforms.append(lambda x: regex.sub(r'\\s', ' ', x))\n if reorder:\n text_transforms.append(get_display)\n\n idx = 0\n manifest = []\n with log.progressbar(transcriptions, label='Reading transcriptions') as bar:\n for fp in bar:\n logger.info('Reading {}'.format(fp.name))\n doc = html.parse(fp)\n etree.strip_tags(doc, etree.Comment)\n td = doc.find(\".//meta[@itemprop='text_direction']\")\n if td is None:\n td = 'horizontal-lr'\n else:\n td = td.attrib['content']\n\n im = None\n dest_dict = {'output': output, 'idx': 0, 'src': fp.name, 'uuid': str(uuid.uuid4())}\n for section in doc.xpath('//section'):\n img = section.xpath('.//img')[0].get('src')\n fd = BytesIO(base64.b64decode(img.split(',')[1]))\n im = Image.open(fd)\n if not im:\n logger.info('Skipping {} because image not found'.format(fp.name))\n break\n if binarize:\n im = binarization.nlbin(im)\n for line in section.iter('li'):\n if line.get('contenteditable') and (not u''.join(line.itertext()).isspace() and u''.join(line.itertext())):\n dest_dict['idx'] = idx\n dest_dict['uuid'] = str(uuid.uuid4())\n logger.debug('Writing line {:06d}'.format(idx))\n l_img = im.crop([int(x) for x in line.get('data-bbox').split(',')])\n if rotate and td.startswith('vertical'):\n im.rotate(90, expand=True)\n l_img.save(('{output}/' + format + '.png').format(**dest_dict))\n manifest.append((format + '.png').format(**dest_dict))\n text = u''.join(line.itertext()).strip()\n for func in text_transforms:\n text = func(text)\n with open(('{output}/' + format + '.gt.txt').format(**dest_dict), 'wb') as t:\n t.write(text.encode('utf-8'))\n idx += 1\n logger.info('Extracted {} lines'.format(idx))\n with open('{}/manifest.txt'.format(output), 'w') as fp:\n fp.write('\\n'.join(manifest))\n\n\[email protected]('transcribe')\[email protected]_context\[email protected]('-d', '--text-direction', default='horizontal-lr',\n type=click.Choice(['horizontal-lr', 'horizontal-rl', 'vertical-lr', 'vertical-rl']),\n help='Sets principal text direction', show_default=True)\[email protected]('--scale', default=None, type=click.FLOAT)\[email protected]('--bw/--orig', default=True, show_default=True,\n help=\"Put nonbinarized images in output\")\[email protected]('-m', '--maxcolseps', default=2, type=click.INT, show_default=True)\[email protected]('-b/-w', '--black_colseps/--white_colseps', default=False, show_default=True)\[email protected]('-f', '--font', default='',\n help='Font family to use')\[email protected]('-fs', '--font-style', default=None,\n help='Font style to use')\[email protected]('-p', '--prefill', default=None,\n help='Use given model for prefill mode.')\[email protected]('--pad', show_default=True, type=(int, int), default=(0, 0),\n help='Left and right padding around lines')\[email protected]('-l', '--lines', type=click.Path(exists=True), show_default=True,\n help='JSON file containing line coordinates')\[email protected]('-o', '--output', type=click.File(mode='wb'), default='transcription.html',\n help='Output file', show_default=True)\[email protected]('images', nargs=-1, type=click.File(mode='rb', lazy=True))\ndef transcription(ctx, text_direction, scale, bw, maxcolseps,\n black_colseps, font, font_style, prefill, pad, lines, output,\n images):\n \"\"\"\n Creates transcription environments for ground truth generation.\n \"\"\"\n from PIL import Image\n\n from kraken import rpred\n from kraken import pageseg\n from kraken import transcribe\n from kraken import binarization\n\n from kraken.lib import models\n\n ti = transcribe.TranscriptionInterface(font, font_style)\n\n if len(images) > 1 and lines:\n raise click.UsageError('--lines option is incompatible with multiple image files')\n\n if prefill:\n logger.info('Loading model {}'.format(prefill))\n message('Loading ANN', nl=False)\n prefill = models.load_any(prefill)\n message('\\u2713', fg='green')\n\n with log.progressbar(images, label='Reading images') as bar:\n for fp in bar:\n logger.info('Reading {}'.format(fp.name))\n im = Image.open(fp)\n if im.mode not in ['1', 'L', 'P', 'RGB']:\n logger.warning('Input {} is in {} color mode. Converting to RGB'.format(fp.name, im.mode))\n im = im.convert('RGB')\n logger.info('Binarizing page')\n im_bin = binarization.nlbin(im)\n im_bin = im_bin.convert('1')\n logger.info('Segmenting page')\n if not lines:\n res = pageseg.segment(im_bin, text_direction, scale, maxcolseps, black_colseps, pad=pad)\n else:\n with open_file(lines, 'r') as fp:\n try:\n fp = cast(IO[Any], fp)\n res = json.load(fp)\n except ValueError as e:\n raise click.UsageError('{} invalid segmentation: {}'.format(lines, str(e)))\n if prefill:\n it = rpred.rpred(prefill, im_bin, res.copy())\n preds = []\n logger.info('Recognizing')\n for pred in it:\n logger.debug('{}'.format(pred.prediction))\n preds.append(pred)\n ti.add_page(im, res, records=preds)\n else:\n ti.add_page(im, res)\n fp.close()\n logger.info('Writing transcription to {}'.format(output.name))\n message('Writing output', nl=False)\n ti.write(output)\n message('\\u2713', fg='green')\n\n\[email protected]('linegen')\[email protected]_context\[email protected]('-f', '--font', default='sans',\n help='Font family to render texts in.')\[email protected]('-n', '--maxlines', type=click.INT, default=0,\n help='Maximum number of lines to generate')\[email protected]('-e', '--encoding', default='utf-8',\n help='Decode text files with given codec.')\[email protected]('-u', '--normalization',\n type=click.Choice(['NFD', 'NFKD', 'NFC', 'NFKC']), default=None,\n help='Normalize ground truth')\[email protected]('-ur', '--renormalize',\n type=click.Choice(['NFD', 'NFKD', 'NFC', 'NFKC']), default=None,\n help='Renormalize text for rendering purposes.')\[email protected]('--reorder/--no-reorder', default=False, help='Reorder code points to display order')\[email protected]('-fs', '--font-size', type=click.INT, default=32,\n help='Font size to render texts in.')\[email protected]('-fw', '--font-weight', type=click.INT, default=400,\n help='Font weight to render texts in.')\[email protected]('-l', '--language',\n help='RFC-3066 language tag for language-dependent font shaping')\[email protected]('-ll', '--max-length', type=click.INT, default=None,\n help=\"Discard lines above length (in Unicode codepoints).\")\[email protected]('--strip/--no-strip', help=\"Remove whitespace from start and end \"\n \"of lines.\")\[email protected]('-d', '--disable-degradation', is_flag=True, help='Dont degrade '\n 'output lines.')\[email protected]('-a', '--alpha', type=click.FLOAT, default=1.5,\n help=\"Mean of folded normal distribution for sampling foreground pixel flip probability\")\[email protected]('-b', '--beta', type=click.FLOAT, default=1.5,\n help=\"Mean of folded normal distribution for sampling background pixel flip probability\")\[email protected]('-d', '--distort', type=click.FLOAT, default=1.0,\n help='Mean of folded normal distribution to take distortion values from')\[email protected]('-ds', '--distortion-sigma', type=click.FLOAT, default=20.0,\n help='Mean of folded normal distribution to take standard deviations for the '\n 'Gaussian kernel from')\[email protected]('--legacy/--no-legacy', default=False,\n help='Use ocropy-style degradations')\[email protected]('-o', '--output', type=click.Path(), default='training_data',\n help='Output directory')\[email protected]('text', nargs=-1, type=click.Path(exists=True))\ndef line_generator(ctx, font, maxlines, encoding, normalization, renormalize,\n reorder, font_size, font_weight, language, max_length, strip,\n disable_degradation, alpha, beta, distort, distortion_sigma,\n legacy, output, text):\n \"\"\"\n Generates artificial text line training data.\n \"\"\"\n import errno\n import numpy as np\n\n from kraken import linegen\n from kraken.lib.util import make_printable\n\n lines: Set[str] = set()\n if not text:\n return\n with log.progressbar(text, label='Reading texts') as bar:\n for t in text:\n with click.open_file(t, encoding=encoding) as fp:\n logger.info('Reading {}'.format(t))\n for l in fp:\n lines.add(l.rstrip('\\r\\n'))\n if normalization:\n lines = set([unicodedata.normalize(normalization, line) for line in lines])\n if strip:\n lines = set([line.strip() for line in lines])\n if max_length:\n lines = set([line for line in lines if len(line) < max_length])\n logger.info('Read {} lines'.format(len(lines)))\n message('Read {} unique lines'.format(len(lines)))\n if maxlines and maxlines < len(lines):\n message('Sampling {} lines\\t'.format(maxlines), nl=False)\n llist = list(lines)\n lines = set(llist[idx] for idx in np.random.randint(0, len(llist), maxlines))\n message('\\u2713', fg='green')\n try:\n os.makedirs(output)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n # calculate the alphabet and print it for verification purposes\n alphabet: Set[str] = set()\n for line in lines:\n alphabet.update(line)\n chars = []\n combining = []\n for char in sorted(alphabet):\n k = make_printable(char)\n if k != char:\n combining.append(k)\n else:\n chars.append(k)\n message('Σ (len: {})'.format(len(alphabet)))\n message('Symbols: {}'.format(''.join(chars)))\n if combining:\n message('Combining Characters: {}'.format(', '.join(combining)))\n lg = linegen.LineGenerator(font, font_size, font_weight, language)\n with log.progressbar(lines, label='Writing images') as bar:\n for idx, line in enumerate(bar):\n logger.info(line)\n try:\n if renormalize:\n im = lg.render_line(unicodedata.normalize(renormalize, line))\n else:\n im = lg.render_line(line)\n except KrakenCairoSurfaceException as e:\n logger.info('{}: {} {}'.format(e.message, e.width, e.height))\n continue\n if not disable_degradation and not legacy:\n im = linegen.degrade_line(im, alpha=alpha, beta=beta)\n im = linegen.distort_line(im, abs(np.random.normal(distort)), abs(np.random.normal(distortion_sigma)))\n elif legacy:\n im = linegen.ocropy_degrade(im)\n im.save('{}/{:06d}.png'.format(output, idx))\n with open('{}/{:06d}.gt.txt'.format(output, idx), 'wb') as fp:\n if reorder:\n fp.write(get_display(line).encode('utf-8'))\n else:\n fp.write(line.encode('utf-8'))\n\n\[email protected]('publish')\[email protected]_context\[email protected]('-i', '--metadata', show_default=True,\n type=click.File(mode='r', lazy=True), help='Metadata for the '\n 'model. Will be prompted from the user if not given')\[email protected]('-a', '--access-token', prompt=True, help='Zenodo access token')\[email protected]('model', nargs=1, type=click.Path(exists=False, readable=True, dir_okay=False))\ndef publish(ctx, metadata, access_token, model):\n \"\"\"\n Publishes a model on the zenodo model repository.\n \"\"\"\n import json\n import pkg_resources\n\n from functools import partial\n from jsonschema import validate\n from jsonschema.exceptions import ValidationError\n\n from kraken import repo\n from kraken.lib import models\n\n with pkg_resources.resource_stream(__name__, 'metadata.schema.json') as fp:\n schema = json.load(fp)\n\n nn = models.load_any(model)\n\n if not metadata:\n author = click.prompt('author')\n affiliation = click.prompt('affiliation')\n summary = click.prompt('summary')\n description = click.edit('Write long form description (training data, transcription standards) of the model here')\n accuracy_default = None\n # take last accuracy measurement in model metadata\n if 'accuracy' in nn.nn.user_metadata and nn.nn.user_metadata['accuracy']:\n accuracy_default = nn.nn.user_metadata['accuracy'][-1][1] * 100\n accuracy = click.prompt('accuracy on test set', type=float, default=accuracy_default)\n script = [click.prompt('script', type=click.Choice(sorted(schema['properties']['script']['items']['enum'])), show_choices=True)]\n license = click.prompt('license', type=click.Choice(sorted(schema['properties']['license']['enum'])), show_choices=True)\n metadata = {\n 'authors': [{'name': author, 'affiliation': affiliation}],\n 'summary': summary,\n 'description': description,\n 'accuracy': accuracy,\n 'license': license,\n 'script': script,\n 'name': os.path.basename(model),\n 'graphemes': ['a']\n }\n while True:\n try:\n validate(metadata, schema)\n except ValidationError as e:\n message(e.message)\n metadata[e.path[-1]] = click.prompt(e.path[-1], type=float if e.schema['type'] == 'number' else str)\n continue\n break\n\n else:\n metadata = json.load(metadata)\n validate(metadata, schema)\n metadata['graphemes'] = [char for char in ''.join(nn.codec.c2l.keys())]\n oid = repo.publish_model(model, metadata, access_token, partial(message, '.', nl=False))\n message('\\nmodel PID: {}'.format(oid))\n\n\nif __name__ == '__main__':\n cli()\n"
] |
[
[
"torch.manual_seed",
"torch.utils.data.DataLoader",
"numpy.random.shuffle",
"numpy.std",
"numpy.random.normal",
"numpy.mean"
]
] |
adarbha/disaster_response
|
[
"34e1e3586caffb18e6d7f57a2c121426c02f1886"
] |
[
"app/run.py"
] |
[
"import json\nimport plotly\nimport pandas as pd\n\n\nfrom flask import Flask\nfrom flask import render_template, request, jsonify\nfrom plotly.graph_objs import Bar\nfrom sklearn.externals import joblib\nfrom sqlalchemy import create_engine\n\nfrom nltk.corpus import stopwords\nfrom nltk.stem.wordnet import WordNetLemmatizer\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem.porter import PorterStemmer\n\n\n\napp = Flask(__name__)\n\ndef tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens\n\n# load data\nengine = create_engine('sqlite:////home/workspace/data/DisasterResponse.db')\ndf = pd.read_sql_table('msg_cat', engine)\n# Need to remove child_alone column\ndf = df.drop(columns = ['original','child_alone'])\n\n# load model\nmodel = joblib.load(\"/home/workspace/models/test_0.pkl\")\n\n\n# index webpage displays cool visuals and receives user input text for model\[email protected]('/')\[email protected]('/index')\ndef index():\n \n # extract data needed for visuals\n # TODO: Below is an example - modify to extract data for your own visuals\n genre_counts = df.groupby('genre').count()['message']\n genre_names = list(genre_counts.index)\n \n # create visuals\n # TODO: Below is an example - modify to create your own visuals\n graphs = [\n {\n 'data': [\n Bar(\n x=genre_names,\n y=genre_counts\n )\n ],\n\n 'layout': {\n 'title': 'Distribution of Message Genres',\n 'yaxis': {\n 'title': \"Count\"\n },\n 'xaxis': {\n 'title': \"Genre\"\n }\n }\n }\n ]\n \n # encode plotly graphs in JSON\n ids = [\"graph-{}\".format(i) for i, _ in enumerate(graphs)]\n graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)\n \n # render web page with plotly graphs\n return render_template('master.html', ids=ids, graphJSON=graphJSON)\n\n\n# web page that handles user query and displays model results\[email protected]('/go')\ndef go():\n # save user input in query\n query = request.args.get('query', '') \n\n # use model to predict classification for query\n print(query)\n classification_labels = model.predict([query])[0]\n classification_probas = model.predict_proba([query])\n classification_probas = [i.tolist()[0][1] for i in classification_probas]\n print('classification_probas')\n classification_results = dict(zip(df.columns[3:], classification_labels))\n classification_results_ = dict(zip(classification_results.keys(), list(zip(classification_labels.tolist(), classification_probas))))\n\n\n # This will render the go.html Please see that file. \n return render_template(\n 'go.html',\n query=query,\n classification_result=classification_results_\n )\n\n\ndef main():\n app.run(host='0.0.0.0', port=3001, debug=True)\n\n\nif __name__ == '__main__':\n main()"
] |
[
[
"pandas.read_sql_table",
"sklearn.externals.joblib.load"
]
] |
cojo24/eia
|
[
"b2115a4cbc2e30c23abcbf4060deef962d3f82f9"
] |
[
"schema.py"
] |
[
"import datetime as dt\nimport pandas as pd\n\nfrom .client import EIA\nfrom collections import namedtuple\nfrom typing import Optional, Union\n\n\nCategoryCollection = namedtuple('CategoryCollection', 'items')\nSeriesCollection = namedtuple('SeriesCollection', 'items')\n\n\nclass Category:\n\n def __init__(\n self,\n category_id: Union[int, str],\n name: str,\n notes: str,\n parent_category_id: Union[int, str],\n childcategories: Optional[CategoryCollection] = None,\n childseries: Optional[SeriesCollection] = None,\n ):\n self.category_id = category_id\n self.name = name\n self.notes = notes\n self.parent_category_id = parent_category_id\n self.childcategories = CategoryCollection([]) if childcategories is None else childcategories\n self.childseries = SeriesCollection([]) if childseries is None else childseries\n\n def __repr__(self):\n cat = '[ Category: {}'.format(self.name)\n\n attrs = [\n '\\n\\t{} = {}'.format(a, self.__getattribute__(a))\n for a in self.__dir__()\n if not a.startswith('__') and a not in ['childcategories', 'childseries']\n if a in [\n 'category_id',\n 'name',\n 'notes',\n 'parent_category_id',\n ]\n ]\n\n ccats = self.__getattribute__('childcategories')\n cseries = self.__getattribute__('childseries')\n\n attrs.append('\\n\\tchildcategories = [{} Category Objects]'.format(len(ccats.items)))\n attrs.append('\\n\\tchildseries = [{} Series Objects]'.format(len(cseries.items)))\n\n attrs = ''.join(attrs)\n cat = cat + attrs + ' ]'\n\n return cat\n\n @classmethod\n def from_category_id(cls, category_id: Union[int, str], eia_client: EIA, load_series=False):\n json = eia_client.get_category(category_id)\n json = json['category']\n category = cls(\n category_id=json['category_id'],\n name=json['name'],\n notes=json['notes'],\n parent_category_id=json['parent_category_id'],\n )\n # For each child category/series:\n # Ping EIA api and instantiate classes\n childcategories = CategoryCollection([\n # recursive call\n cls.from_category_id(c['category_id'], eia_client, load_series=load_series)\n for c in json['childcategories']\n ])\n category.childcategories = childcategories\n\n if load_series is True:\n childseries = SeriesCollection([\n Series.from_series_id(s['series_id'], eia_client)\n for s in json['childseries']\n ])\n for s in childseries.items:\n s.category_id = category.category_id\n category.childseries = childseries\n\n return category\n\n\nclass Series:\n\n def __init__(\n self,\n series_id: str,\n name: str,\n units: str,\n freq: str,\n desc: str,\n start: dt.datetime,\n end: dt.datetime,\n updated: dt.datetime,\n data: pd.DataFrame,\n category_id: Optional[Union[int, str]] = None,\n ):\n self.series_id = series_id\n self.name = name\n self.units = units\n self.freq = freq\n self.desc = desc\n self.start = start\n self.end = end\n self.updated = updated\n self.data = data\n self.category_id = category_id\n\n def __repr__(self):\n s = '[ Series: {}'.format(self.name)\n attrs = ''.join([\n '\\n\\t{} = {}'.format(a, self.__getattribute__(a))\n for a in self.__dir__()\n if a in [\n 'series_id',\n 'name',\n 'units',\n 'freq',\n 'desc',\n 'start',\n 'end',\n 'updated',\n 'category_id',\n ]\n ])\n s = s + attrs + ' ]'\n return s\n\n @classmethod\n def from_series_id(cls, series_id: Union[int, str], eia_client: EIA, dt_format=None):\n\n json = eia_client.get_series(series_id)\n json = json['series'][0]\n\n data = pd.DataFrame(json['data'], columns=['Period', json['series_id']])\n data['Period'] = pd.to_datetime(data['Period'], format=dt_format)\n data = data.set_index('Period')\n\n start = pd.to_datetime(json['start'], format=dt_format)\n end = pd.to_datetime(json['end'], format=dt_format)\n updated = pd.to_datetime(json['updated'])\n\n return cls(\n series_id=json['series_id'],\n name=json.get('name', ''),\n units=json['units'],\n freq=json['f'],\n desc=json.get('description', ''),\n start=start,\n end=end,\n updated=updated,\n data=data,\n )\n"
] |
[
[
"pandas.to_datetime",
"pandas.DataFrame"
]
] |
teejaytanmay/image_object_localization_flipkart
|
[
"ca0976a7df1280be942d666cdebea110e1a70633"
] |
[
"Round3/train.py"
] |
[
"# coding: utf-8\n\nimport keras\nfrom keras.layers import Dense, Conv2D, BatchNormalization, Activation\nfrom keras.layers import AveragePooling2D, MaxPooling2D, Input, Flatten\nfrom keras.optimizers import Adam\nfrom keras.regularizers import l2\nfrom keras import backend as K\nfrom keras.models import Model,load_model\t\nfrom keras.callbacks import ModelCheckpoint,LearningRateScheduler,ReduceLROnPlateau\nimport utils\nimport numpy as np\nimport tensorflow as tf\n\n\n\ndata_train,box_train,data_test,box_test=utils.getdata()\n\n# metric function\ndef my_metric(labels,predictions):\n threshhold=0.9\n x=predictions[:,0]*20\n x=tf.maximum(tf.minimum(x,192.0),0.0)\n y=predictions[:,1]*20\n y=tf.maximum(tf.minimum(y,144.0),0.0)\n width=predictions[:,2]*20\n width=tf.maximum(tf.minimum(width,192.0),0.0)\n height=predictions[:,3]*20\n height=tf.maximum(tf.minimum(height,144.0),0.0)\n label_x=labels[:,0]\n label_y=labels[:,1]\n label_width=labels[:,2]\n label_height=labels[:,3]\n a1=tf.multiply(width,height)\n a2=tf.multiply(label_width,label_height)\n x1=tf.maximum(x,label_x)\n y1=tf.maximum(y,label_y)\n x2=tf.minimum(x+width,label_x+label_width)\n y2=tf.minimum(y+height,label_y+label_height)\n IoU=tf.abs(tf.multiply((x1-x2),(y1-y2)))/(a1+a2-tf.abs(tf.multiply((x1-x2),(y1-y2))))\n condition=tf.less(threshhold,IoU)\n sum=tf.where(condition,tf.ones(tf.shape(condition)),tf.zeros(tf.shape(condition)))\n return tf.reduce_mean(sum)\n\n# loss function\ndef smooth_l1_loss(true_box,pred_box):\n loss=0.0\n for i in range(4):\n residual=tf.abs(true_box[:,i]-pred_box[:,i])\n condition=tf.less(residual,1.0)\n small_res=0.5*tf.square(residual)\n large_res=residual-0.5\n loss=loss+tf.where(condition,small_res,large_res)\n return tf.reduce_mean(loss)\n\n\ndef resnet_block(inputs,num_filters,kernel_size,strides,activation='relu'):\n x=Conv2D(num_filters,kernel_size=kernel_size,strides=strides,padding='same',kernel_initializer='he_normal',kernel_regularizer=l2(1e-3))(inputs)\n x=BatchNormalization()(x)\n if(activation):\n x=Activation('relu')(x)\n return x\n\n\ndef resnet18():\n inputs=Input((192,144,3))\n\n # conv1\n x=resnet_block(inputs,64,[7,7],2)\n\n # conv2\n x=MaxPooling2D([3,3],2,'same')(x)\n for i in range(2):\n a=resnet_block(x,64,[3,3],1)\n b=resnet_block(a,64,[3,3],1,activation=None)\n x=keras.layers.add([x,b])\n x=Activation('relu')(x)\n\n # conv3\n a=resnet_block(x,128,[1,1],2)\n b=resnet_block(a,128,[3,3],1,activation=None)\n x=Conv2D(128,kernel_size=[1,1],strides=2,padding='same',kernel_initializer='he_normal',kernel_regularizer=l2(1e-3))(x)\n x=keras.layers.add([x,b])\n x=Activation('relu')(x)\n\n a=resnet_block(x,128,[3,3],1)\n b=resnet_block(a,128,[3,3],1,activation=None)\n x=keras.layers.add([x,b])\n x=Activation('relu')(x)\n\n # conv4\n a=resnet_block(x,256,[1,1],2)\n b=resnet_block(a,256,[3,3],1,activation=None)\n x=Conv2D(256,kernel_size=[1,1],strides=2,padding='same',kernel_initializer='he_normal',kernel_regularizer=l2(1e-3))(x)\n x=keras.layers.add([x,b])\n x=Activation('relu')(x)\n\n a=resnet_block(x,256,[3,3],1)\n b=resnet_block(a,256,[3,3],1,activation=None)\n x=keras.layers.add([x,b])\n x=Activation('relu')(x)\n\n # conv5\n a=resnet_block(x,512,[1,1],2)\n b=resnet_block(a,512,[3,3],1,activation=None)\n x=Conv2D(512,kernel_size=[1,1],strides=2,padding='same',kernel_initializer='he_normal',kernel_regularizer=l2(1e-3))(x)\n x=keras.layers.add([x,b])\n x=Activation('relu')(x)\n\n a=resnet_block(x,512,[3,3],1)\n b=resnet_block(a,512,[3,3],1,activation=None)\n x=keras.layers.add([x,b])\n x=Activation('relu')(x)\n\n x=AveragePooling2D(pool_size=1,data_format=\"channels_last\")(x)\n # out:1*1*512\n\n y=Flatten()(x)\n # out:512\n y=Dense(1000,kernel_initializer='he_normal',kernel_regularizer=l2(1e-3))(y)\n outputs=Dense(4,kernel_initializer='he_normal',kernel_regularizer=l2(1e-3))(y)\n\n model=Model(inputs=inputs,outputs=outputs)\n return model\n\nmodel = resnet18()\n\nmodel.compile(loss=smooth_l1_loss,optimizer=Adam(),metrics=[my_metric])\n\nmodel.summary()\n\n\ndef lr_sch(epoch):\n #200 total\n if epoch <50:\n return 1e-3\n if 50<=epoch<100:\n return 1e-4\n if epoch>=100:\n return 1e-5\n\nlr_scheduler=LearningRateScheduler(lr_sch)\nlr_reducer=ReduceLROnPlateau(monitor='val_my_metric',factor=0.2,patience=5,mode='max',min_lr=1e-3)\n\ncheckpoint=ModelCheckpoint('model5.h5',monitor='val_loss',verbose=0,save_best_only=True,mode='auto')\n\nmodel_details=model.fit(data_train,box_train,batch_size=128,epochs=55,shuffle=True,validation_split=0.1,callbacks=[lr_scheduler,lr_reducer,checkpoint],verbose=1)\n\nmodel.save('model.h5')\n\nscores=model.evaluate(data_test,box_test,verbose=1)\nprint('Test loss : ',scores[0])\nprint('Test accuracy : ',scores[1])\n\nutils.plot_model(model_details)\n"
] |
[
[
"tensorflow.multiply",
"tensorflow.reduce_mean",
"tensorflow.less",
"tensorflow.maximum",
"tensorflow.shape",
"tensorflow.minimum",
"tensorflow.square",
"tensorflow.where",
"tensorflow.abs"
]
] |
mehrdad-shokri/tensornets
|
[
"e36eff73e5fc984977c5ceadefc1adb089e7bab5"
] |
[
"tensornets/contrib_layers/optimizers.py"
] |
[
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Optimizer ops for use in layers and tf.learn.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport six\n\nfrom .. import contrib_framework\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import clip_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import random_ops\nfrom tensorflow.python.ops import variable_scope as vs\nfrom tensorflow.python.ops import variables as vars_\nfrom tensorflow.python.summary import summary\nfrom tensorflow.python.training import moving_averages\nfrom tensorflow.python.training import optimizer as optimizer_\nfrom tensorflow.python.training import training as train\n\nOPTIMIZER_CLS_NAMES = {\n \"Adagrad\": train.AdagradOptimizer,\n \"Adam\": train.AdamOptimizer,\n \"Ftrl\": train.FtrlOptimizer,\n \"Momentum\": lambda learning_rate: train.MomentumOptimizer(learning_rate, momentum=0.9), # pylint: disable=line-too-long\n \"RMSProp\": train.RMSPropOptimizer,\n \"SGD\": train.GradientDescentOptimizer,\n}\n\nOPTIMIZER_SUMMARIES = [\n \"learning_rate\",\n \"loss\",\n \"gradients\",\n \"gradient_norm\",\n \"global_gradient_norm\",\n]\n\n\ndef optimize_loss(loss,\n global_step,\n learning_rate,\n optimizer,\n gradient_noise_scale=None,\n gradient_multipliers=None,\n clip_gradients=None,\n learning_rate_decay_fn=None,\n update_ops=None,\n variables=None,\n name=None,\n summaries=None,\n colocate_gradients_with_ops=False,\n increment_global_step=True):\n \"\"\"Given loss and parameters for optimizer, returns a training op.\n\n Various ways of passing optimizers include:\n\n - by string specifying the name of the optimizer. See OPTIMIZER_CLS_NAMES\n for full list. E.g. `optimize_loss(..., optimizer='Adam')`.\n - by function taking learning rate `Tensor` as argument and returning an\n `Optimizer` instance. E.g. `optimize_loss(...,\n optimizer=lambda lr: tf.compat.v1.train.MomentumOptimizer(lr,\n momentum=0.5))`.\n Alternatively, if `learning_rate` is `None`, the function takes no\n arguments. E.g. `optimize_loss(..., learning_rate=None,\n optimizer=lambda: tf.compat.v1.train.MomentumOptimizer(0.5,\n momentum=0.5))`.\n - by a subclass of `Optimizer` having a single-argument constructor\n (the argument is the learning rate), such as AdamOptimizer or\n AdagradOptimizer. E.g. `optimize_loss(...,\n optimizer=tf.compat.v1.train.AdagradOptimizer)`.\n - by an instance of a subclass of `Optimizer`.\n E.g., `optimize_loss(...,\n optimizer=tf.compat.v1.train.AdagradOptimizer(0.5))`.\n\n Args:\n loss: Scalar `Tensor`.\n global_step: Scalar int `Tensor`, step counter to update on each step unless\n `increment_global_step` is `False`. If not supplied, it will be fetched\n from the default graph (see `tf.compat.v1.train.get_global_step` for\n details). If it has not been created, no step will be incremented with\n each weight update. `learning_rate_decay_fn` requires `global_step`.\n learning_rate: float or `Tensor`, magnitude of update per each training\n step. Can be `None`.\n optimizer: string, class or optimizer instance, used as trainer. string\n should be name of optimizer, like 'SGD', 'Adam', 'Adagrad'. Full list in\n OPTIMIZER_CLS_NAMES constant. class should be sub-class of `tf.Optimizer`\n that implements `compute_gradients` and `apply_gradients` functions.\n optimizer instance should be instantiation of `tf.Optimizer` sub-class and\n have `compute_gradients` and `apply_gradients` functions.\n gradient_noise_scale: float or None, adds 0-mean normal noise scaled by this\n value.\n gradient_multipliers: dict of variables or variable names to floats. If\n present, gradients for specified variables will be multiplied by given\n constant.\n clip_gradients: float, callable or `None`. If a float is provided, a global\n clipping is applied to prevent the norm of the gradient from exceeding\n this value. Alternatively, a callable can be provided, e.g.,\n `adaptive_clipping_fn()`. This callable takes a list of `(gradients,\n variables)` tuples and returns the same thing with the gradients modified.\n learning_rate_decay_fn: function, takes `learning_rate` and `global_step`\n `Tensor`s, returns `Tensor`. Can be used to implement any learning rate\n decay functions.\n For example: `tf.compat.v1.train.exponential_decay`.\n Ignored if `learning_rate` is not supplied.\n update_ops: list of update `Operation`s to execute at each step. If `None`,\n uses elements of UPDATE_OPS collection. The order of execution between\n `update_ops` and `loss` is non-deterministic.\n variables: list of variables to optimize or `None` to use all trainable\n variables.\n name: The name for this operation is used to scope operations and summaries.\n summaries: List of internal quantities to visualize on tensorboard. If not\n set, the loss, the learning rate, and the global norm of the gradients\n will be reported. The complete list of possible values is in\n OPTIMIZER_SUMMARIES.\n colocate_gradients_with_ops: If True, try colocating gradients with the\n corresponding op.\n increment_global_step: Whether to increment `global_step`. If your model\n calls `optimize_loss` multiple times per training step (e.g. to optimize\n different parts of the model), use this arg to avoid incrementing\n `global_step` more times than necessary.\n\n Returns:\n Training op.\n\n Raises:\n ValueError: if:\n * `loss` is an invalid type or shape.\n * `global_step` is an invalid type or shape.\n * `learning_rate` is an invalid type or value.\n * `optimizer` has the wrong type.\n * `clip_gradients` is neither float nor callable.\n * `learning_rate` and `learning_rate_decay_fn` are supplied, but no\n `global_step` is available.\n * `gradients` is empty.\n \"\"\"\n loss = ops.convert_to_tensor(loss)\n contrib_framework.assert_scalar(loss)\n if global_step is None:\n global_step = train.get_global_step()\n else:\n train.assert_global_step(global_step)\n with vs.variable_scope(name, \"OptimizeLoss\", [loss, global_step]):\n # Update ops take UPDATE_OPS collection if not provided.\n if update_ops is None:\n update_ops = set(ops.get_collection(ops.GraphKeys.UPDATE_OPS))\n # Make sure update ops are ran before computing loss.\n if update_ops:\n loss = control_flow_ops.with_dependencies(list(update_ops), loss)\n\n # Learning rate variable, with possible decay.\n lr = None\n if learning_rate is not None:\n if (isinstance(learning_rate, ops.Tensor) and\n learning_rate.get_shape().ndims == 0):\n lr = learning_rate\n elif isinstance(learning_rate, float):\n if learning_rate < 0.0:\n raise ValueError(\"Invalid learning_rate %s.\", learning_rate)\n lr = vs.get_variable(\n \"learning_rate\", [],\n trainable=False,\n initializer=init_ops.constant_initializer(learning_rate))\n else:\n raise ValueError(\"Learning rate should be 0d Tensor or float. \"\n \"Got %s of type %s\" %\n (str(learning_rate), str(type(learning_rate))))\n if summaries is None:\n summaries = [\"loss\", \"learning_rate\", \"global_gradient_norm\"]\n else:\n for summ in summaries:\n if summ not in OPTIMIZER_SUMMARIES:\n raise ValueError(\"Summaries should be one of [%s], you provided %s.\" %\n (\", \".join(OPTIMIZER_SUMMARIES), summ))\n if learning_rate is not None and learning_rate_decay_fn is not None:\n if global_step is None:\n raise ValueError(\"global_step is required for learning_rate_decay_fn.\")\n lr = learning_rate_decay_fn(lr, global_step)\n if \"learning_rate\" in summaries:\n summary.scalar(\"learning_rate\", lr)\n\n # Create optimizer, given specified parameters.\n if isinstance(optimizer, six.string_types):\n if lr is None:\n raise ValueError(\"Learning rate is None, but should be specified if \"\n \"optimizer is string (%s).\" % optimizer)\n if optimizer not in OPTIMIZER_CLS_NAMES:\n raise ValueError(\n \"Optimizer name should be one of [%s], you provided %s.\" %\n (\", \".join(OPTIMIZER_CLS_NAMES), optimizer))\n opt = OPTIMIZER_CLS_NAMES[optimizer](learning_rate=lr)\n elif (isinstance(optimizer, type) and\n issubclass(optimizer, optimizer_.Optimizer)):\n if lr is None:\n raise ValueError(\"Learning rate is None, but should be specified if \"\n \"optimizer is class (%s).\" % optimizer)\n opt = optimizer(learning_rate=lr)\n elif isinstance(optimizer, optimizer_.Optimizer):\n opt = optimizer\n elif callable(optimizer):\n if learning_rate is not None:\n opt = optimizer(lr)\n else:\n opt = optimizer()\n if not isinstance(opt, optimizer_.Optimizer):\n raise ValueError(\"Unrecognized optimizer: function should return \"\n \"subclass of Optimizer. Got %s.\" % str(opt))\n else:\n raise ValueError(\"Unrecognized optimizer: should be string, \"\n \"subclass of Optimizer, instance of \"\n \"subclass of Optimizer or function with one argument. \"\n \"Got %s.\" % str(optimizer))\n\n # All trainable variables, if specific variables are not specified.\n if variables is None:\n variables = vars_.trainable_variables()\n\n # Compute gradients.\n gradients = opt.compute_gradients(\n loss,\n variables,\n colocate_gradients_with_ops=colocate_gradients_with_ops)\n\n # Optionally add gradient noise.\n if gradient_noise_scale is not None:\n gradients = _add_scaled_noise_to_gradients(gradients,\n gradient_noise_scale)\n\n # Multiply some gradients.\n if gradient_multipliers is not None:\n gradients = _multiply_gradients(gradients, gradient_multipliers)\n if not gradients:\n raise ValueError(\n \"Empty list of (gradient, var) pairs encountered. This is most \"\n \"likely to be caused by an improper value of gradient_multipliers.\")\n\n if \"global_gradient_norm\" in summaries or \"gradient_norm\" in summaries:\n summary.scalar(\"global_norm/gradient_norm\",\n clip_ops.global_norm(list(zip(*gradients))[0]))\n\n # Optionally clip gradients by global norm.\n if isinstance(clip_gradients, float):\n gradients = _clip_gradients_by_norm(gradients, clip_gradients)\n elif callable(clip_gradients):\n gradients = clip_gradients(gradients)\n elif clip_gradients is not None:\n raise ValueError(\"Unknown type %s for clip_gradients\" %\n type(clip_gradients))\n\n # Add scalar summary for loss.\n if \"loss\" in summaries:\n summary.scalar(\"loss\", loss)\n\n # Add histograms for variables, gradients and gradient norms.\n for gradient, variable in gradients:\n if isinstance(gradient, ops.IndexedSlices):\n grad_values = gradient.values\n else:\n grad_values = gradient\n\n if grad_values is not None:\n var_name = variable.name.replace(\":\", \"_\")\n if \"gradients\" in summaries:\n summary.histogram(\"gradients/%s\" % var_name, grad_values)\n if \"gradient_norm\" in summaries:\n summary.scalar(\"gradient_norm/%s\" % var_name,\n clip_ops.global_norm([grad_values]))\n\n if clip_gradients is not None and (\"global_gradient_norm\" in summaries or\n \"gradient_norm\" in summaries):\n summary.scalar(\"global_norm/clipped_gradient_norm\",\n clip_ops.global_norm(list(zip(*gradients))[0]))\n\n # Create gradient updates.\n grad_updates = opt.apply_gradients(\n gradients,\n global_step=global_step if increment_global_step else None,\n name=\"train\")\n\n # Ensure the train_tensor computes grad_updates.\n train_tensor = control_flow_ops.with_dependencies([grad_updates], loss)\n\n return train_tensor\n\n\ndef _clip_gradients_by_norm(grads_and_vars, clip_gradients):\n \"\"\"Clips gradients by global norm.\"\"\"\n gradients, variables = zip(*grads_and_vars)\n clipped_gradients, _ = clip_ops.clip_by_global_norm(gradients, clip_gradients)\n return list(zip(clipped_gradients, variables))\n\n\ndef _adaptive_max_norm(norm, std_factor, decay, global_step, epsilon, name):\n \"\"\"Find max_norm given norm and previous average.\"\"\"\n with vs.variable_scope(name, \"AdaptiveMaxNorm\", [norm]):\n log_norm = math_ops.log(norm + epsilon)\n\n def moving_average(name, value, decay):\n moving_average_variable = vs.get_variable(\n name,\n shape=value.get_shape(),\n dtype=value.dtype,\n initializer=init_ops.zeros_initializer(),\n trainable=False)\n return moving_averages.assign_moving_average(\n moving_average_variable, value, decay, zero_debias=False)\n\n # quicker adaptation at the beginning\n if global_step is not None:\n n = math_ops.cast(global_step, dtypes.float32)\n decay = math_ops.minimum(decay, n / (n + 1.))\n\n # update averages\n mean = moving_average(\"mean\", log_norm, decay)\n sq_mean = moving_average(\"sq_mean\", math_ops.square(log_norm), decay)\n\n variance = sq_mean - math_ops.square(mean)\n std = math_ops.sqrt(math_ops.maximum(epsilon, variance))\n max_norms = math_ops.exp(mean + std_factor * std)\n return max_norms, mean\n\n\ndef adaptive_clipping_fn(std_factor=2.,\n decay=0.95,\n static_max_norm=None,\n global_step=None,\n report_summary=False,\n epsilon=1e-8,\n name=None):\n \"\"\"Adapt the clipping value using statistics on the norms.\n\n Implement adaptive gradient as presented in section 3.2.1 of\n https://arxiv.org/abs/1412.1602.\n\n Keeps a moving average of the mean and std of the log(norm) of the gradient.\n If the norm exceeds `exp(mean + std_factor*std)` then all gradients will be\n rescaled such that the global norm becomes `exp(mean)`.\n\n Args:\n std_factor: Python scaler (or tensor). `max_norm = exp(mean +\n std_factor*std)`\n decay: The smoothing factor of the moving averages.\n static_max_norm: If provided, will threshold the norm to this value as an\n extra safety.\n global_step: Optional global_step. If provided, `decay = decay*n/(n+1)`.\n This provides a quicker adaptation of the mean for the first steps.\n report_summary: If `True`, will add histogram summaries of the `max_norm`.\n epsilon: Small value chosen to avoid zero variance.\n name: The name for this operation is used to scope operations and summaries.\n\n Returns:\n A function for applying gradient clipping.\n \"\"\"\n\n def gradient_clipping(grads_and_vars):\n \"\"\"Internal function for adaptive clipping.\"\"\"\n grads, variables = zip(*grads_and_vars)\n\n norm = clip_ops.global_norm(grads)\n\n max_norm, log_mean = _adaptive_max_norm(norm, std_factor, decay,\n global_step, epsilon, name)\n\n # reports the max gradient norm for debugging\n if report_summary:\n summary.scalar(\"global_norm/adaptive_max_gradient_norm\", max_norm)\n\n # factor will be 1. if norm is smaller than max_norm\n factor = array_ops.where(norm < max_norm, array_ops.ones_like(norm),\n math_ops.exp(log_mean) / norm)\n\n if static_max_norm is not None:\n factor = math_ops.minimum(static_max_norm / norm, factor)\n\n # apply factor\n clipped_grads = []\n for grad in grads:\n if grad is None:\n clipped_grads.append(None)\n elif isinstance(grad, ops.IndexedSlices):\n clipped_grads.append(\n ops.IndexedSlices(grad.values * factor, grad.indices,\n grad.dense_shape))\n else:\n clipped_grads.append(grad * factor)\n\n return list(zip(clipped_grads, variables))\n\n return gradient_clipping\n\n\ndef _add_scaled_noise_to_gradients(grads_and_vars, gradient_noise_scale):\n \"\"\"Adds scaled noise from a 0-mean normal distribution to gradients.\"\"\"\n gradients, variables = zip(*grads_and_vars)\n noisy_gradients = []\n for gradient in gradients:\n if gradient is None:\n noisy_gradients.append(None)\n continue\n if isinstance(gradient, ops.IndexedSlices):\n gradient_shape = gradient.dense_shape\n else:\n gradient_shape = gradient.get_shape()\n noise = random_ops.truncated_normal(gradient_shape) * gradient_noise_scale\n noisy_gradients.append(gradient + noise)\n return list(zip(noisy_gradients, variables))\n\n\ndef _multiply_gradients(grads_and_vars, gradient_multipliers):\n \"\"\"Multiply specified gradients.\"\"\"\n multiplied_grads_and_vars = []\n for grad, var in grads_and_vars:\n if (grad is not None and\n (var in gradient_multipliers or var.name in gradient_multipliers)):\n key = var if var in gradient_multipliers else var.name\n multiplier = gradient_multipliers[key]\n if isinstance(grad, ops.IndexedSlices):\n grad_values = grad.values * multiplier\n grad = ops.IndexedSlices(grad_values, grad.indices, grad.dense_shape)\n else:\n grad *= math_ops.cast(multiplier, grad.dtype)\n multiplied_grads_and_vars.append((grad, var))\n return multiplied_grads_and_vars\n"
] |
[
[
"tensorflow.python.ops.math_ops.log",
"tensorflow.python.training.training.assert_global_step",
"tensorflow.python.summary.summary.scalar",
"tensorflow.python.ops.math_ops.exp",
"tensorflow.python.summary.summary.histogram",
"tensorflow.python.ops.variables.trainable_variables",
"tensorflow.python.ops.init_ops.constant_initializer",
"tensorflow.python.training.training.get_global_step",
"tensorflow.python.training.moving_averages.assign_moving_average",
"tensorflow.python.ops.control_flow_ops.with_dependencies",
"tensorflow.python.framework.ops.get_collection",
"tensorflow.python.framework.ops.IndexedSlices",
"tensorflow.python.ops.variable_scope.variable_scope",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.ops.math_ops.minimum",
"tensorflow.python.ops.random_ops.truncated_normal",
"tensorflow.python.ops.math_ops.square",
"tensorflow.python.training.training.MomentumOptimizer",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.clip_ops.clip_by_global_norm",
"tensorflow.python.ops.array_ops.ones_like",
"tensorflow.python.ops.clip_ops.global_norm",
"tensorflow.python.ops.init_ops.zeros_initializer",
"tensorflow.python.ops.math_ops.maximum"
]
] |
DEVESHTARASIA/h2o-3
|
[
"9bd73fcedb4236b7ea8f214b36ca95f3e00d4548"
] |
[
"h2o-py/h2o/frame.py"
] |
[
"# -*- encoding: utf-8 -*-\n\"\"\"\nH2O data frame.\n\n:copyright: (c) 2016 H2O.ai\n:license: Apache License Version 2.0 (see LICENSE for details)\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport csv\nimport datetime\nimport functools\nimport os\nimport sys\nimport tempfile\nimport traceback\nimport warnings\nfrom io import StringIO\nfrom types import FunctionType\n\nimport requests\n\nimport h2o\nfrom h2o.display import H2ODisplay\nfrom h2o.exceptions import H2OTypeError, H2OValueError\nfrom h2o.expr import ExprNode\nfrom h2o.group_by import GroupBy\nfrom h2o.job import H2OJob\nfrom h2o.utils.compatibility import * # NOQA\nfrom h2o.utils.compatibility import viewitems, viewvalues\nfrom h2o.utils.config import get_config_value\nfrom h2o.utils.shared_utils import (_handle_numpy_array, _handle_pandas_data_frame, _handle_python_dicts,\n _handle_python_lists, _is_list, _is_str_list, _py_tmp_key, _quoted,\n can_use_pandas, quote, normalize_slice, slice_is_normalized, check_frame_id)\nfrom h2o.utils.typechecks import (assert_is_type, assert_satisfies, Enum, I, is_type, numeric, numpy_ndarray,\n numpy_datetime, pandas_dataframe, pandas_timestamp, scipy_sparse, U)\n\n__all__ = (\"H2OFrame\", )\n\n\n\n\nclass H2OFrame(object):\n \"\"\"\n Primary data store for H2O.\n\n H2OFrame is similar to pandas' ``DataFrame``, or R's ``data.frame``. One of the critical distinction is that the\n data is generally not held in memory, instead it is located on a (possibly remote) H2O cluster, and thus\n ``H2OFrame`` represents a mere handle to that data.\n \"\"\"\n\n #-------------------------------------------------------------------------------------------------------------------\n # Construction\n #-------------------------------------------------------------------------------------------------------------------\n\n def __init__(self, python_obj=None, destination_frame=None, header=0, separator=\",\",\n column_names=None, column_types=None, na_strings=None):\n \"\"\"\n Create a new H2OFrame object, possibly from some other object.\n\n :param python_obj: object that will be converted to an ``H2OFrame``. This could have multiple types:\n\n - None: create an empty H2OFrame\n - A list/tuple of strings or numbers: create a single-column H2OFrame containing the contents of this list.\n - A dictionary of ``{name: list}`` pairs: create an H2OFrame with multiple columns, each column having the\n provided ``name`` and contents from ``list``. If the source dictionary is not an OrderedDict, then the\n columns in the H2OFrame may appear shuffled.\n - A list of lists of strings/numbers: construct an H2OFrame from a rectangular table of values, with inner\n lists treated as rows of the table. I.e. ``H2OFrame([[1, 'a'], [2, 'b'], [3, 'c']])`` will create a\n frame with 3 rows and 2 columns, one numeric and one string.\n - A Pandas dataframe, or a Numpy ndarray: create a matching H2OFrame.\n - A Scipy sparse matrix: create a matching sparse H2OFrame.\n\n :param int header: if ``python_obj`` is a list of lists, this parameter can be used to indicate whether the\n first row of the data represents headers. The value of -1 means the first row is data, +1 means the first\n row is the headers, 0 (default) allows H2O to guess whether the first row contains data or headers.\n :param List[str] column_names: explicit list of column names for the new H2OFrame. This will override any\n column names derived from the data. If the python_obj does not contain explicit column names, and this\n parameter is not given, then the columns will be named \"C1\", \"C2\", \"C3\", etc.\n :param column_types: explicit column types for the new H2OFrame. This could be either a list of types for\n each column, or a dictionary of {column name: column type} pairs. In the latter case you may override\n types for only few columns, and let H2O choose the types of the rest.\n :param na_strings: List of strings in the input data that should be interpreted as missing values. This could\n be given on a per-column basis, either as a list-of-lists, or as a dictionary {column name: list of nas}.\n :param str destination_frame: (internal) name of the target DKV key in the H2O backend.\n :param str separator: (deprecated)\n \"\"\"\n coltype = U(None, \"unknown\", \"uuid\", \"string\", \"float\", \"real\", \"double\", \"int\", \"numeric\",\n \"categorical\", \"factor\", \"enum\", \"time\")\n assert_is_type(python_obj, None, list, tuple, dict, numpy_ndarray, pandas_dataframe, scipy_sparse)\n assert_is_type(destination_frame, None, str)\n assert_is_type(header, -1, 0, 1)\n assert_is_type(separator, I(str, lambda s: len(s) == 1))\n assert_is_type(column_names, None, [str])\n assert_is_type(column_types, None, [coltype], {str: coltype})\n assert_is_type(na_strings, None, [str], [[str]], {str: [str]})\n check_frame_id(destination_frame)\n\n self._ex = ExprNode()\n self._ex._children = None\n self._is_frame = True # Indicate that this is an actual frame, allowing typechecks to be made\n if python_obj is not None:\n self._upload_python_object(python_obj, destination_frame, header, separator,\n column_names, column_types, na_strings)\n\n @staticmethod\n def _expr(expr, cache=None):\n # TODO: merge this method with `__init__`\n fr = H2OFrame()\n fr._ex = expr\n if cache is not None:\n fr._ex._cache.fill_from(cache)\n return fr\n\n\n def _upload_python_object(self, python_obj, destination_frame=None, header=0, separator=\",\",\n column_names=None, column_types=None, na_strings=None):\n assert_is_type(python_obj, list, tuple, dict, numpy_ndarray, pandas_dataframe, scipy_sparse)\n if is_type(python_obj, scipy_sparse):\n self._upload_sparse_matrix(python_obj, destination_frame=destination_frame)\n return\n # TODO: all these _handlers should really belong to this class, not to shared_utils.\n processor = (_handle_pandas_data_frame if is_type(python_obj, pandas_dataframe) else\n _handle_numpy_array if is_type(python_obj, numpy_ndarray) else\n _handle_python_dicts if is_type(python_obj, dict) else\n _handle_python_lists)\n col_header, data_to_write = processor(python_obj, header)\n if col_header is None or data_to_write is None:\n raise H2OValueError(\"No data to write\")\n if not column_names:\n column_names = col_header\n\n # create a temporary file that will be written to\n tmp_handle, tmp_path = tempfile.mkstemp(suffix=\".csv\")\n tmp_file = os.fdopen(tmp_handle, 'w')\n # create a new csv writer object thingy\n csv_writer = csv.writer(tmp_file, dialect=\"excel\", quoting=csv.QUOTE_NONNUMERIC)\n csv_writer.writerow(column_names)\n if data_to_write and isinstance(data_to_write[0], dict):\n for row in data_to_write:\n csv_writer.writerow([row.get(k, None) for k in col_header])\n else:\n csv_writer.writerows(data_to_write)\n tmp_file.close() # close the streams\n self._upload_parse(tmp_path, destination_frame, 1, separator, column_names, column_types, na_strings)\n os.remove(tmp_path) # delete the tmp file\n\n\n def _upload_sparse_matrix(self, matrix, destination_frame=None):\n import scipy.sparse as sp\n if not sp.issparse(matrix):\n raise H2OValueError(\"A sparse matrix expected, got %s\" % type(matrix))\n\n tmp_handle, tmp_path = tempfile.mkstemp(suffix=\".svmlight\")\n out = os.fdopen(tmp_handle, \"wt\")\n if destination_frame is None:\n destination_frame = _py_tmp_key(h2o.connection().session_id)\n\n # sp.find(matrix) returns (row indices, column indices, values) of the non-zero elements of A. Unfortunately\n # there is no guarantee that those elements are returned in the correct order, so need to sort\n data = zip(*sp.find(matrix))\n if not isinstance(data, list): data = list(data) # possibly convert from iterator to a list\n data.sort()\n idata = 0 # index of the next element to be consumed from `data`\n for irow in range(matrix.shape[0]):\n if idata < len(data) and data[idata][0] == irow and data[idata][1] == 0:\n y = data[idata][2]\n idata += 1\n else:\n y = 0\n out.write(str(y))\n while idata < len(data) and data[idata][0] == irow:\n out.write(\" \")\n out.write(str(data[idata][1]))\n out.write(\":\")\n out.write(str(data[idata][2]))\n idata += 1\n out.write(\"\\n\")\n out.close()\n\n ret = h2o.api(\"POST /3/PostFile\", filename=tmp_path)\n os.remove(tmp_path)\n rawkey = ret[\"destination_frame\"]\n\n p = {\"source_frames\": [rawkey], \"destination_frame\": destination_frame}\n H2OJob(h2o.api(\"POST /3/ParseSVMLight\", data=p), \"Parse\").poll()\n self._ex._cache._id = destination_frame\n self._ex._cache.fill()\n\n\n @staticmethod\n def get_frame(frame_id):\n \"\"\"\n Retrieve an existing H2OFrame from the H2O cluster using the frame's id.\n\n :param str frame_id: id of the frame to retrieve\n :returns: an existing H2OFrame with the id provided; or None if such frame doesn't exist.\n \"\"\"\n fr = H2OFrame()\n fr._ex._cache._id = frame_id\n try:\n fr._ex._cache.fill()\n except EnvironmentError:\n return None\n return fr\n\n\n def refresh(self):\n \"\"\"Reload frame information from the backend H2O server.\"\"\"\n self._ex._cache.flush()\n self._frame(fill_cache=True)\n\n\n\n #-------------------------------------------------------------------------------------------------------------------\n # Frame properties\n #-------------------------------------------------------------------------------------------------------------------\n\n @property\n def names(self):\n \"\"\"The list of column names (List[str]).\"\"\"\n if not self._ex._cache.names_valid():\n self._ex._cache.flush()\n self._frame(fill_cache=True)\n return list(self._ex._cache.names)\n\n @names.setter\n def names(self, value):\n self.set_names(value)\n\n\n @property\n def nrows(self):\n \"\"\"Number of rows in the dataframe (int).\"\"\"\n if not self._ex._cache.nrows_valid():\n self._ex._cache.flush()\n self._frame(fill_cache=True)\n return self._ex._cache.nrows\n\n\n @property\n def ncols(self):\n \"\"\"Number of columns in the dataframe (int).\"\"\"\n if not self._ex._cache.ncols_valid():\n self._ex._cache.flush()\n self._frame(fill_cache=True)\n return self._ex._cache.ncols\n\n\n @property\n def shape(self):\n \"\"\"Number of rows and columns in the dataframe as a tuple ``(nrows, ncols)``.\"\"\"\n return self.nrows, self.ncols\n\n\n @property\n def types(self):\n \"\"\"The dictionary of column name/type pairs.\"\"\"\n if not self._ex._cache.types_valid():\n self._ex._cache.flush()\n self._frame(fill_cache=True)\n return dict(self._ex._cache.types)\n\n\n @property\n def frame_id(self):\n \"\"\"Internal id of the frame (str).\"\"\"\n return self._frame()._ex._cache._id\n\n @frame_id.setter\n def frame_id(self, newid):\n check_frame_id(newid)\n if self._ex._cache._id is None:\n h2o.assign(self, newid)\n else:\n oldname = self.frame_id\n self._ex._cache._id = newid\n h2o.rapids(\"(rename \\\"{}\\\" \\\"{}\\\")\".format(oldname, newid))\n\n\n def type(self, col):\n \"\"\"\n The type for the given column.\n\n :param col: either a name, or an index of the column to look up\n :returns: type of the column, one of: ``str``, ``int``, ``real``, ``enum``, ``time``, ``bool``.\n :raises H2OValueError: if such column does not exist in the frame.\n \"\"\"\n assert_is_type(col, int, str)\n if not self._ex._cache.types_valid() or not self._ex._cache.names_valid():\n self._ex._cache.flush()\n self._frame(fill_cache=True)\n types = self._ex._cache.types\n if is_type(col, str):\n if col in types:\n return types[col]\n else:\n names = self._ex._cache.names\n if -len(names) <= col < len(names):\n return types[names[col]]\n raise H2OValueError(\"Column '%r' does not exist in the frame\" % col)\n\n\n def _import_parse(self, path, pattern, destination_frame, header, separator, column_names, column_types, na_strings):\n if is_type(path, str) and \"://\" not in path:\n path = os.path.abspath(path)\n rawkey = h2o.lazy_import(path, pattern)\n self._parse(rawkey, destination_frame, header, separator, column_names, column_types, na_strings)\n return self\n\n\n def _upload_parse(self, path, destination_frame, header, sep, column_names, column_types, na_strings):\n ret = h2o.api(\"POST /3/PostFile\", filename=path)\n rawkey = ret[\"destination_frame\"]\n self._parse(rawkey, destination_frame, header, sep, column_names, column_types, na_strings)\n return self\n\n\n def _parse(self, rawkey, destination_frame=\"\", header=None, separator=None, column_names=None, column_types=None,\n na_strings=None):\n setup = h2o.parse_setup(rawkey, destination_frame, header, separator, column_names, column_types, na_strings)\n return self._parse_raw(setup)\n\n\n def _parse_raw(self, setup):\n # Parse parameters (None values provided by setup)\n p = {\"destination_frame\": None,\n \"parse_type\": None,\n \"separator\": None,\n \"single_quotes\": None,\n \"check_header\": None,\n \"number_columns\": None,\n \"chunk_size\": None,\n \"delete_on_done\": True,\n \"blocking\": False,\n \"column_types\": None,\n }\n\n if setup[\"column_names\"]: p[\"column_names\"] = None\n if setup[\"na_strings\"]: p[\"na_strings\"] = None\n\n p.update({k: v for k, v in viewitems(setup) if k in p})\n\n # Extract only 'name' from each src in the array of srcs\n p['source_frames'] = [_quoted(src['name']) for src in setup['source_frames']]\n\n H2OJob(h2o.api(\"POST /3/Parse\", data=p), \"Parse\").poll()\n # Need to return a Frame here for nearly all callers\n # ... but job stats returns only a dest_key, requiring another REST call to get nrow/ncol\n self._ex._cache._id = p[\"destination_frame\"]\n self._ex._cache.fill()\n\n\n def filter_na_cols(self, frac=0.2):\n \"\"\"\n Filter columns with proportion of NAs greater or equals than ``frac``.\n\n :param float frac: Maximum fraction of NAs in the column to keep.\n\n :returns: A list of indices of columns that have fewer NAs than ``frac``. If all columns are filtered,\n None is returned.\n \"\"\"\n return ExprNode(\"filterNACols\", self, frac)._eager_scalar()\n\n\n def columns_by_type(self, coltype=\"numeric\"):\n \"\"\"\n Extract columns of the specified type from the frame.\n\n :param str coltype: A character string indicating which column type to filter by. This must be\n one of the following:\n\n - ``\"numeric\"`` - Numeric, but not categorical or time\n - ``\"categorical\"`` - Integer, with a categorical/factor String mapping\n - ``\"string\"`` - String column\n - ``\"time\"`` - Long msec since the Unix Epoch - with a variety of display/parse options\n - ``\"uuid\"`` - UUID\n - ``\"bad\"`` - No none-NA rows (triple negative! all NAs or zero rows)\n\n :returns: list of indices of columns that have the requested type\n \"\"\"\n assert_is_type(coltype, \"numeric\", \"categorical\", \"string\", \"time\", \"uuid\", \"bad\")\n assert_is_type(self, H2OFrame)\n return ExprNode(\"columnsByType\", self, coltype)._eager_scalar()\n\n\n def __iter__(self):\n return (self[i] for i in range(self.ncol))\n\n def __unicode__(self):\n if sys.gettrace() is None:\n if self._ex is None: return \"This H2OFrame has been removed.\"\n table = self._frame(fill_cache=True)._ex._cache._tabulate(\"simple\", False)\n nrows = \"%d %s\" % (self.nrow, \"row\" if self.nrow == 1 else \"rows\")\n ncols = \"%d %s\" % (self.ncol, \"column\" if self.ncol == 1 else \"columns\")\n return \"%s\\n\\n[%s x %s]\" % (table, nrows, ncols)\n return \"\"\n\n def __repr__(self):\n if sys.gettrace() is None:\n # PUBDEV-2278: using <method>? from IPython caused everything to dump\n stk = traceback.extract_stack()\n if not (\"IPython\" in stk[-2][0] and \"info\" == stk[-2][2]):\n self.show()\n return \"\"\n\n def show(self, use_pandas=False):\n \"\"\"\n Used by the H2OFrame.__repr__ method to print or display a snippet of the data frame.\n\n If called from IPython, displays an html'ized result. Else prints a tabulate'd result.\n \"\"\"\n if self._ex is None:\n print(\"This H2OFrame has been removed.\")\n return\n if not self._ex._cache.is_valid(): self._frame()._ex._cache.fill()\n if H2ODisplay._in_ipy():\n import IPython.display\n if use_pandas and can_use_pandas():\n IPython.display.display(self.head().as_data_frame(fill_cache=True))\n else:\n IPython.display.display_html(self._ex._cache._tabulate(\"html\", False), raw=True)\n else:\n if use_pandas and can_use_pandas():\n print(self.head().as_data_frame(fill_cache=True))\n else:\n s = self.__unicode__()\n stk = traceback.extract_stack()\n if \"IPython\" in stk[-3][0]:\n s = \"\\n%s\" % s\n try:\n print(s)\n except UnicodeEncodeError:\n print(s.encode(\"ascii\", \"replace\"))\n\n\n def summary(self, return_data=False):\n \"\"\"\n Display summary information about the frame.\n\n Summary includes min/mean/max/sigma and other rollup data.\n :param bool return_data: Return a dictionary of the summary output\n \"\"\"\n if not self._ex._cache.is_valid(): self._frame()._ex._cache.fill()\n if not return_data:\n if H2ODisplay._in_ipy():\n import IPython.display\n IPython.display.display_html(self._ex._cache._tabulate(\"html\", True), raw=True)\n else:\n print(self._ex._cache._tabulate(\"simple\", True))\n else:\n return self._ex._cache._data\n\n\n def describe(self, chunk_summary=False):\n \"\"\"\n Generate an in-depth description of this H2OFrame.\n\n This will print to the console the dimensions of the frame; names/types/summary statistics for each column;\n and finally first ten rows of the frame.\n\n :param bool chunk_summary: Retrieve the chunk summary along with the distribution summary\n \"\"\"\n res = h2o.api(\"GET /3/Frames/%s\" % self.frame_id, data={\"row_count\": 10})[\"frames\"][0]\n self._ex._cache._fill_data(res)\n print(\"Rows:{}\".format(self.nrow))\n print(\"Cols:{}\".format(self.ncol))\n\n #The chunk & distribution summaries are not cached, so must be pulled if chunk_summary=True.\n if chunk_summary:\n res[\"chunk_summary\"].show()\n res[\"distribution_summary\"].show()\n print(\"\\n\")\n self.summary()\n\n\n def _frame(self, rows=10, fill_cache=False):\n self._ex._eager_frame()\n if fill_cache:\n self._ex._cache.fill(rows=rows)\n return self\n\n\n def head(self, rows=10, cols=200):\n \"\"\"\n Return the first ``rows`` and ``cols`` of the frame as a new H2OFrame.\n\n :param int rows: maximum number of rows to return\n :param int cols: maximum number of columns to return\n :returns: a new H2OFrame cut from the top left corner of the current frame, and having dimensions at\n most ``rows`` x ``cols``.\n \"\"\"\n assert_is_type(rows, int)\n assert_is_type(cols, int)\n nrows = min(self.nrows, rows)\n ncols = min(self.ncols, cols)\n newdt = self[:nrows, :ncols]\n return newdt._frame(rows=nrows, fill_cache=True)\n\n\n def tail(self, rows=10, cols=200):\n \"\"\"\n Return the last ``rows`` and ``cols`` of the frame as a new H2OFrame.\n\n :param int rows: maximum number of rows to return\n :param int cols: maximum number of columns to return\n :returns: a new H2OFrame cut from the bottom left corner of the current frame, and having dimensions at\n most ``rows`` x ``cols``.\n \"\"\"\n assert_is_type(rows, int)\n assert_is_type(cols, int)\n nrows = min(self.nrows, rows)\n ncols = min(self.ncols, cols)\n start_idx = self.nrows - nrows\n newdt = self[start_idx:start_idx + nrows, :ncols]\n return newdt._frame(rows=nrows, fill_cache=True)\n\n\n def logical_negation(self):\n \"\"\"\n Returns new H2OFrame equal to elementwise Logical NOT applied to the current frame.\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"not\", self), cache=self._ex._cache)\n\n\n def _unop(self, op, rtype=\"real\"):\n if self._is_frame:\n for cname, ctype in self.types.items():\n if ctype not in {\"int\", \"real\", \"bool\"}:\n raise H2OValueError(\"Function %s cannot be applied to %s column '%s'\" % (op, ctype, cname))\n ret = H2OFrame._expr(expr=ExprNode(op, self), cache=self._ex._cache)\n ret._ex._cache._names = [\"%s(%s)\" % (op, name) for name in self._ex._cache._names]\n ret._ex._cache._types = {name: rtype for name in ret._ex._cache._names}\n return ret\n\n\n # Binary operations\n def __add__(self, rhs):\n return _binop(self, \"+\", rhs)\n\n def __sub__(self, rhs):\n return _binop(self, \"-\", rhs)\n\n def __mul__(self, rhs):\n return _binop(self, \"*\", rhs)\n\n def __div__(self, rhs):\n return _binop(self, \"/\", rhs)\n\n def __truediv__(self, rhs):\n return _binop(self, \"/\", rhs)\n\n def __floordiv__(self, rhs):\n return _binop(self, \"intDiv\", rhs)\n\n def __mod__(self, rhs):\n return _binop(self, \"%\", rhs)\n\n def __or__(self, rhs):\n return _binop(self, \"|\", rhs, rtype=\"bool\")\n\n def __and__(self, rhs):\n return _binop(self, \"&\", rhs, rtype=\"bool\")\n\n def __ge__(self, rhs):\n return _binop(self, \">=\", rhs, rtype=\"bool\")\n\n def __gt__(self, rhs):\n return _binop(self, \">\", rhs, rtype=\"bool\")\n\n def __le__(self, rhs):\n return _binop(self, \"<=\", rhs, rtype=\"bool\")\n\n def __lt__(self, rhs):\n return _binop(self, \"<\", rhs, rtype=\"bool\")\n\n def __eq__(self, rhs):\n if rhs is None: rhs = float(\"nan\")\n return _binop(self, \"==\", rhs, rtype=\"bool\")\n\n def __ne__(self, rhs):\n if rhs is None: rhs = float(\"nan\")\n return _binop(self, \"!=\", rhs, rtype=\"bool\")\n\n def __pow__(self, rhs):\n return _binop(self, \"^\", rhs)\n\n def __contains__(self, lhs):\n return all((t == self).any() for t in lhs) if _is_list(lhs) else (lhs == self).any()\n\n # rops\n def __rmod__(self, lhs):\n return _binop(lhs, \"%\", self)\n\n def __radd__(self, lhs):\n return _binop(lhs, \"+\", self)\n\n def __rsub__(self, lhs):\n return _binop(lhs, \"-\", self)\n\n def __rand__(self, lhs):\n return _binop(lhs, \"&\", self, rtype=\"bool\")\n\n def __ror__(self, lhs):\n return _binop(lhs, \"|\", self, rtype=\"bool\")\n\n def __rtruediv__(self, lhs):\n return _binop(lhs, \"/\", self)\n\n def __rdiv__(self, lhs):\n return _binop(lhs, \"/\", self)\n\n def __rfloordiv__(self, lhs):\n return _binop(lhs, \"intDiv\", self, rtype=\"int\")\n\n def __rmul__(self, lhs):\n return _binop(lhs, \"*\", self)\n\n def __rpow__(self, lhs):\n return _binop(lhs, \"^\", self)\n\n # unops\n def __abs__(self):\n return self._unop(\"abs\")\n\n def __invert__(self):\n return self._unop(\"!!\", rtype=\"bool\")\n\n def __nonzero__(self):\n if self.nrows > 1 or self.ncols > 1:\n raise H2OValueError(\n 'This operation is not supported on an H2OFrame. Try using parentheses. '\n 'Did you mean & (logical and), | (logical or), or ~ (logical not)?')\n else:\n return self.__len__()\n\n def __int__(self):\n return int(self.flatten())\n\n def __float__(self):\n return float(self.flatten())\n\n\n def flatten(self):\n \"\"\"\n Convert a 1x1 frame into a scalar.\n\n :returns: content of this 1x1 frame as a scalar (``int``, ``float``, or ``str``).\n :raises H2OValueError: if current frame has shape other than 1x1\n \"\"\"\n if self.shape != (1, 1): raise H2OValueError(\"Not a 1x1 Frame\")\n return ExprNode(\"flatten\", self)._eager_scalar()\n\n\n def getrow(self):\n \"\"\"\n Convert a 1xn frame into an n-element list.\n\n :returns: content of this 1xn frame as a Python list.\n :raises H2OValueError: if current frame has more than one row.\n \"\"\"\n if self.nrows != 1:\n raise H2OValueError(\"This method can only be applied to single-row frames\")\n return ExprNode(\"getrow\", self)._eager_scalar()\n\n\n def mult(self, matrix):\n \"\"\"\n Multiply this frame, viewed as a matrix, by another matrix.\n\n :param matrix: another frame that you want to multiply the current frame by; must be compatible with the\n current frame (i.e. its number of rows must be the same as number of columns in the current frame).\n :returns: new H2OFrame, which is the result of multiplying the current frame by ``matrix``.\n \"\"\"\n if self.ncols != matrix.nrows:\n raise H2OValueError(\"Matrix is not compatible for multiplication with the current frame\")\n return H2OFrame._expr(expr=ExprNode(\"x\", self, matrix))\n\n\n def cos(self):\n \"\"\"Return new H2OFrame equal to elementwise cosine of the current frame.\"\"\"\n return self._unop(\"cos\")\n\n\n def sin(self):\n \"\"\"Return new H2OFrame equal to elementwise sine of the current frame.\"\"\"\n return self._unop(\"sin\")\n\n\n def tan(self):\n \"\"\"Return new H2OFrame equal to elementwise tangent of the current frame.\"\"\"\n return self._unop(\"tan\")\n\n\n def acos(self):\n \"\"\"Return new H2OFrame equal to elementwise arc cosine of the current frame.\"\"\"\n return self._unop(\"acos\")\n\n\n def asin(self):\n \"\"\"Return new H2OFrame equal to elementwise arc sine of the current frame.\"\"\"\n return self._unop(\"asin\")\n\n\n def atan(self):\n \"\"\"Return new H2OFrame equal to elementwise arc tangent of the current frame.\"\"\"\n return self._unop(\"atan\")\n\n\n def cosh(self):\n \"\"\"Make new H2OFrame with values equal to the hyperbolic cosines of the values in the current frame.\"\"\"\n return self._unop(\"cosh\")\n\n\n def sinh(self):\n \"\"\"Return new H2OFrame equal to elementwise hyperbolic sine of the current frame.\"\"\"\n return self._unop(\"sinh\")\n\n\n def tanh(self):\n \"\"\"Return new H2OFrame equal to elementwise hyperbolic tangent of the current frame.\"\"\"\n return self._unop(\"tanh\")\n\n\n def acosh(self):\n \"\"\"Return new H2OFrame equal to elementwise inverse hyperbolic cosine of the current frame.\"\"\"\n return self._unop(\"acosh\")\n\n\n def asinh(self):\n \"\"\"Return new H2OFrame equal to elementwise inverse hyperbolic sine of the current frame.\"\"\"\n return self._unop(\"asinh\")\n\n\n def atanh(self):\n \"\"\"Return new H2OFrame equal to elementwise inverse hyperbolic tangent of the current frame.\"\"\"\n return self._unop(\"atanh\")\n\n\n def cospi(self):\n \"\"\"Return new H2OFrame equal to elementwise cosine of the current frame multiplied by Pi.\"\"\"\n return self._unop(\"cospi\")\n\n\n def sinpi(self):\n \"\"\"Return new H2OFrame equal to elementwise sine of the current frame multiplied by Pi.\"\"\"\n return self._unop(\"sinpi\")\n\n\n def tanpi(self):\n \"\"\"Return new H2OFrame equal to elementwise tangent of the current frame multiplied by Pi.\"\"\"\n return self._unop(\"tanpi\")\n\n\n def abs(self):\n \"\"\"Return new H2OFrame equal to elementwise absolute value of the current frame.\"\"\"\n return self._unop(\"abs\")\n\n\n def sign(self):\n \"\"\"Return new H2OFrame equal to signs of the values in the frame: -1 , +1, or 0.\"\"\"\n return self._unop(\"sign\", rtype=\"int\")\n\n\n def sqrt(self):\n \"\"\"Return new H2OFrame equal to elementwise square root of the current frame.\"\"\"\n return self._unop(\"sqrt\")\n\n\n def trunc(self):\n \"\"\"\n Apply the numeric truncation function.\n\n ``trunc(x)`` is the integer obtained from ``x`` by dropping its decimal tail. This is equal to ``floor(x)``\n if ``x`` is positive, and ``ceil(x)`` if ``x`` is negative. Truncation is also called \"rounding towards zero\".\n\n :returns: new H2OFrame of truncated values of the original frame.\n \"\"\"\n return self._unop(\"trunc\", rtype=\"int\")\n\n\n def ceil(self):\n \"\"\"\n Apply the ceiling function to the current frame.\n\n ``ceil(x)`` is the smallest integer greater or equal to ``x``.\n\n :returns: new H2OFrame of ceiling values of the original frame.\n \"\"\"\n return self._unop(\"ceiling\", rtype=\"int\")\n\n\n def floor(self):\n \"\"\"\n Apply the floor function to the current frame.\n\n ``floor(x)`` is the largest integer smaller or equal to ``x``.\n\n :returns: new H2OFrame of floor values of the original frame.\n \"\"\"\n return self._unop(\"floor\", rtype=\"int\")\n\n\n def log(self):\n \"\"\"Return new H2OFrame equals to elementwise natural logarithm of the current frame.\"\"\"\n return self._unop(\"log\")\n\n\n def log10(self):\n \"\"\"Return new H2OFrame equals to elementwise decimal logarithm of the current frame.\"\"\"\n return self._unop(\"log10\")\n\n\n def log1p(self):\n \"\"\"Return new H2OFrame equals to elementwise ``ln(1 + x)`` for each ``x`` in the current frame.\"\"\"\n return self._unop(\"log1p\")\n\n\n def log2(self):\n \"\"\"Return new H2OFrame equals to elementwise binary logarithm of the current frame.\"\"\"\n return self._unop(\"log2\")\n\n\n def exp(self):\n \"\"\"Return new H2OFrame equals to elementwise exponent (i.e. ``e^x``) of the current frame.\"\"\"\n return self._unop(\"exp\")\n\n\n def expm1(self):\n \"\"\"Return new H2OFrame equals to elementwise exponent minus 1 (i.e. ``e^x - 1``) of the current frame.\"\"\"\n return self._unop(\"expm1\")\n\n\n def gamma(self):\n \"\"\"Return new H2OFrame equals to elementwise gamma function of the current frame.\"\"\"\n return self._unop(\"gamma\")\n\n\n def lgamma(self):\n \"\"\"Return new H2OFrame equals to elementwise logarithm of the gamma function of the current frame.\"\"\"\n return self._unop(\"lgamma\")\n\n\n def digamma(self):\n \"\"\"Return new H2OFrame equals to elementwise digamma function of the current frame.\"\"\"\n return self._unop(\"digamma\")\n\n\n def trigamma(self):\n \"\"\"Return new H2OFrame equals to elementwise trigamma function of the current frame.\"\"\"\n return self._unop(\"trigamma\")\n\n\n @staticmethod\n def moment(year=None, month=None, day=None, hour=None, minute=None, second=None, msec=None, date=None, time=None):\n \"\"\"\n Create a time column from individual components.\n\n Each parameter should be either an integer, or a single-column H2OFrame\n containing the corresponding time parts for each row.\n\n The \"date\" part of the timestamp can be specified using either the tuple ``(year, month, day)``, or an\n explicit ``date`` parameter. The \"time\" part of the timestamp is optional, but can be specified either via\n the ``time`` parameter, or via the ``(hour, minute, second, msec)`` tuple.\n\n :param year: the year part of the constructed date\n :param month: the month part of the constructed date\n :param day: the day-of-the-month part of the constructed date\n :param hour: the hours part of the constructed date\n :param minute: the minutes part of the constructed date\n :param second: the seconds part of the constructed date\n :param msec: the milliseconds part of the constructed date\n :param date date: construct the timestamp from the Python's native ``datetime.date`` (or ``datetime.datetime``)\n object. If the object passed is of type ``date``, then you can specify the time part using either the\n ``time`` argument, or ``hour`` ... ``msec`` arguments (but not both). If the object passed is of type\n ``datetime``, then no other arguments can be provided.\n :param time time: construct the timestamp from this Python's native ``datetime.time`` object. This argument\n cannot be used alone, it should be supplemented with either ``date`` argument, or ``year`` ... ``day``\n tuple.\n\n :returns: H2OFrame with one column containing the date constructed from the provided arguments.\n \"\"\"\n assert_is_type(date, None, datetime.date, numpy_datetime, pandas_timestamp)\n assert_is_type(time, None, datetime.time)\n assert_is_type(year, None, int, H2OFrame)\n assert_is_type(month, None, int, H2OFrame)\n assert_is_type(day, None, int, H2OFrame)\n assert_is_type(hour, None, int, H2OFrame)\n assert_is_type(minute, None, int, H2OFrame)\n assert_is_type(second, None, int, H2OFrame)\n assert_is_type(msec, None, int, H2OFrame)\n if time is not None:\n if hour is not None or minute is not None or second is not None or msec is not None:\n raise H2OValueError(\"Arguments hour, minute, second, msec cannot be used together with `time`.\")\n hour = time.hour\n minute = time.minute\n second = time.second\n msec = time.microsecond // 1000\n if date is not None:\n if is_type(date, pandas_timestamp):\n date = date.to_pydatetime()\n if is_type(date, numpy_datetime):\n date = date.astype(\"M8[ms]\").astype(\"O\")\n if year is not None or month is not None or day is not None:\n raise H2OValueError(\"Arguments year, month and day cannot be used together with `date`.\")\n year = date.year\n month = date.month\n day = date.day\n if isinstance(date, datetime.datetime):\n if time is not None:\n raise H2OValueError(\"Argument `time` cannot be used together with `date` of datetime type.\")\n if hour is not None or minute is not None or second is not None or msec is not None:\n raise H2OValueError(\"Arguments hour, minute, second, msec cannot be used together with `date` \"\n \"of datetime type.\")\n hour = date.hour\n minute = date.minute\n second = date.second\n msec = date.microsecond // 1000\n if year is None or month is None or day is None:\n raise H2OValueError(\"Either arguments (`year`, `month` and `day`) or the `date` are required.\")\n if hour is None: hour = 0\n if minute is None: minute = 0\n if second is None: second = 0\n if msec is None: msec = 0\n\n local_vars = locals()\n res_nrows = None\n for n in [\"year\", \"month\", \"day\", \"hour\", \"minute\", \"second\", \"msec\"]:\n x = local_vars[n]\n if isinstance(x, H2OFrame):\n if x.ncols != 1:\n raise H2OValueError(\"Argument `%s` is a frame with more than 1 column\" % n)\n if x.type(0) not in {\"int\", \"real\"}:\n raise H2OValueError(\"Column `%s` is not numeric (type = %s)\" % (n, x.type(0)))\n if res_nrows is None:\n res_nrows = x.nrows\n if x.nrows == 0 or x.nrows != res_nrows:\n raise H2OValueError(\"Incompatible column `%s` having %d rows\" % (n, x.nrows))\n if res_nrows is None:\n res_nrows = 1\n res = H2OFrame._expr(ExprNode(\"moment\", year, month, day, hour, minute, second, msec))\n res._ex._cache._names = [\"name\"]\n res._ex._cache._types = {\"name\": \"time\"}\n res._ex._cache._nrows = res_nrows\n res._ex._cache._ncols = 1\n return res\n\n\n def unique(self):\n \"\"\"\n Extract the unique values in the column.\n\n :returns: H2OFrame of just the unique values in the column.\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"unique\", self))\n\n\n def levels(self):\n \"\"\"\n Get the factor levels.\n\n :returns: A list of lists, one list per column, of levels.\n \"\"\"\n lol = H2OFrame._expr(expr=ExprNode(\"levels\", self)).as_data_frame(False)\n lol.pop(0) # Remove column headers\n lol = list(zip(*lol))\n return [[ll for ll in l if ll != ''] for l in lol]\n\n\n def nlevels(self):\n \"\"\"\n Get the number of factor levels for each categorical column.\n\n :returns: A list of the number of levels per column.\n \"\"\"\n levels = self.levels()\n return [len(l) for l in levels] if levels else 0\n\n\n def set_level(self, level):\n \"\"\"\n A method to set all column values to one of the levels.\n\n :param str level: The level at which the column will be set (a string)\n\n :returns: H2OFrame with entries set to the desired level.\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"setLevel\", self, level), cache=self._ex._cache)\n\n\n def set_levels(self, levels):\n \"\"\"\n Replace the levels of a categorical column.\n\n New levels must be aligned with the old domain. This call has copy-on-write semantics.\n\n :param List[str] levels: A list of strings specifying the new levels. The number of new\n levels must match the number of old levels.\n :returns: A single-column H2OFrame with the desired levels.\n \"\"\"\n assert_is_type(levels, [str])\n return H2OFrame._expr(expr=ExprNode(\"setDomain\", self, False, levels), cache=self._ex._cache)\n\n\n def set_names(self, names):\n \"\"\"\n Change names of all columns in the frame.\n\n :param List[str] names: The list of new names for every column in the frame.\n \"\"\"\n assert_is_type(names, [str])\n assert_satisfies(names, len(names) == self.ncol)\n self._ex = ExprNode(\"colnames=\", self, range(self.ncol), names) # Update-in-place, but still lazy\n return self\n\n\n def set_name(self, col=None, name=None):\n \"\"\"\n Set a new name for a column.\n\n :param col: index or name of the column whose name is to be set; may be skipped for 1-column frames\n :param name: the new name of the column\n \"\"\"\n assert_is_type(col, None, int, str)\n assert_is_type(name, str)\n ncols = self.ncols\n\n col_index = None\n if is_type(col, int):\n if not(-ncols <= col < ncols):\n raise H2OValueError(\"Index %d is out of bounds for a frame with %d columns\" % (col, ncols))\n col_index = (col + ncols) % ncols # handle negative indices\n elif is_type(col, str):\n if col not in self.names:\n raise H2OValueError(\"Column %s doesn't exist in the frame.\" % col)\n col_index = self.names.index(col) # lookup the name\n else:\n assert col is None\n if ncols != 1:\n raise H2OValueError(\"The frame has %d columns; please specify which one to rename\" % ncols)\n col_index = 0\n if name != self.names[col_index] and name in self.types:\n raise H2OValueError(\"Column '%s' already exists in the frame\" % name)\n\n oldname = self.names[col_index]\n old_cache = self._ex._cache\n self._ex = ExprNode(\"colnames=\", self, col_index, name) # Update-in-place, but still lazy\n self._ex._cache.fill_from(old_cache)\n if self.names is None:\n self._frame()._ex._cache.fill()\n else:\n self._ex._cache._names = self.names[:col] + [name] + self.names[col + 1:]\n self._ex._cache._types[name] = self._ex._cache._types.pop(oldname)\n return\n\n\n def as_date(self, format):\n \"\"\"\n Convert the frame (containing strings / categoricals) into the ``date`` format.\n\n :param str format: the format string (e.g. \"YYYY-mm-dd\")\n :returns: new H2OFrame with \"date\" column types\n \"\"\"\n fr = H2OFrame._expr(expr=ExprNode(\"as.Date\", self, format), cache=self._ex._cache)\n if fr._ex._cache.types_valid():\n fr._ex._cache.types = {k: \"int\" for k in self._ex._cache.types.keys()}\n return fr\n\n\n def cumsum(self, axis=0):\n \"\"\"\n Compute cumulative sum over rows / columns of the frame.\n\n :param int axis: 0 for column-wise, 1 for row-wise\n :returns: new H2OFrame with cumulative sums of the original frame.\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"cumsum\", self, axis), cache=self._ex._cache)\n\n\n def cumprod(self, axis=0):\n \"\"\"\n Compute cumulative product over rows / columns of the frame.\n\n :param int axis: 0 for column-wise, 1 for row-wise\n :returns: new H2OFrame with cumulative products of the original frame.\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"cumprod\", self, axis), cache=self._ex._cache)\n\n\n def cummin(self, axis=0):\n \"\"\"\n Compute cumulative minimum over rows / columns of the frame.\n\n :param int axis: 0 for column-wise, 1 for row-wise\n :returns: new H2OFrame with running minimums of the original frame.\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"cummin\", self, axis), cache=self._ex._cache)\n\n\n def cummax(self, axis=0):\n \"\"\"\n Compute cumulative maximum over rows / columns of the frame.\n\n :param int axis: 0 for column-wise, 1 for row-wise\n :returns: new H2OFrame with running maximums of the original frame.\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"cummax\", self, axis), cache=self._ex._cache)\n\n\n def prod(self, na_rm=False):\n \"\"\"\n Compute the product of all values in the frame.\n\n :param bool na_rm: If True then NAs will be ignored during the computation.\n :returns: product of all values in the frame (a float)\n \"\"\"\n return ExprNode(\"prod.na\" if na_rm else \"prod\", self)._eager_scalar()\n\n\n def any(self):\n \"\"\"Return True if any element in the frame is either True or NA.\"\"\"\n return bool(ExprNode(\"any\", self)._eager_scalar())\n\n\n def any_na_rm(self):\n \"\"\"Return True if any value in the frame is non-zero (disregarding all NAs).\"\"\"\n return bool(ExprNode(\"any.na\", self)._eager_scalar())\n\n\n def all(self):\n \"\"\"Return True if every element in the frame is either True or NA.\"\"\"\n return bool(ExprNode(\"all\", self)._eager_scalar())\n\n\n def isnumeric(self):\n \"\"\"\n Test which columns in the frame are numeric.\n\n :returns: a list of True/False indicating for each column in the frame whether it is numeric.\n \"\"\"\n return [bool(o) for o in ExprNode(\"is.numeric\", self)._eager_scalar()]\n\n\n def isstring(self):\n \"\"\"\n Test which columns in the frame are string.\n\n :returns: a list of True/False indicating for each column in the frame whether it is numeric.\n \"\"\"\n return [bool(o) for o in ExprNode(\"is.character\", self)._eager_scalar()]\n\n\n def isin(self, item):\n \"\"\"\n Test whether elements of an H2OFrame are contained in the ``item``.\n\n :param items: An item or a list of items to compare the H2OFrame against.\n\n :returns: An H2OFrame of 0s and 1s showing whether each element in the original H2OFrame is contained in item.\n \"\"\"\n if is_type(item, list, tuple, set):\n return functools.reduce(H2OFrame.__or__, (self == i for i in item))\n else:\n return self == item\n\n\n def kfold_column(self, n_folds=3, seed=-1):\n \"\"\"\n Build a fold assignments column for cross-validation.\n\n This method will produce a column having the same data layout as the source frame.\n\n :param int n_folds: An integer specifying the number of validation sets to split the training data into.\n :param int seed: Seed for random numbers as fold IDs are randomly assigned.\n\n :returns: A single column H2OFrame with the fold assignments.\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"kfold_column\", self, n_folds, seed))._frame() # want this to be eager!\n\n\n def modulo_kfold_column(self, n_folds=3):\n \"\"\"\n Build a fold assignments column for cross-validation.\n\n Rows are assigned a fold according to the current row number modulo ``n_folds``.\n\n :param int n_folds: An integer specifying the number of validation sets to split the training data into.\n :returns: A single-column H2OFrame with the fold assignments.\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"modulo_kfold_column\", self, n_folds))._frame() # want this to be eager!\n\n\n def stratified_kfold_column(self, n_folds=3, seed=-1):\n \"\"\"\n Build a fold assignment column with the constraint that each fold has the same class\n distribution as the fold column.\n\n :param int n_folds: The number of folds to build.\n :param int seed: A seed for the random number generator.\n\n :returns: A single column H2OFrame with the fold assignments.\n \"\"\"\n return H2OFrame._expr(\n expr=ExprNode(\"stratified_kfold_column\", self, n_folds, seed))._frame() # want this to be eager!\n\n\n def structure(self):\n \"\"\"Compactly display the internal structure of an H2OFrame.\"\"\"\n df = self.as_data_frame(use_pandas=False)\n cn = df.pop(0)\n nr = self.nrow\n nc = self.ncol\n width = max([len(c) for c in cn])\n isfactor = self.isfactor()\n numlevels = self.nlevels()\n lvls = self.levels()\n print(\"H2OFrame: '{}' \\nDimensions: {} obs. of {} variables\".format(self.frame_id, nr, nc))\n for i in range(nc):\n print(\"$ {} {}: \".format(cn[i], ' ' * (width - max(0, len(cn[i])))), end=' ')\n if isfactor[i]:\n nl = numlevels[i]\n print(\"Factor w/ {} level(s) {} \".format(nl, '\"' + '\",\"'.join(lvls[i]) + '\"'), end='\\n')\n else:\n print(\"num {}\".format(\" \".join(it[0] if it else \"nan\" for it in h2o.as_list(self[:10, i], False)[1:])))\n\n\n def as_data_frame(self, use_pandas=True, header=True):\n \"\"\"\n Obtain the dataset as a python-local object.\n\n :param bool use_pandas: If True (default) then return the H2OFrame as a pandas DataFrame (requires that the\n ``pandas`` library was installed). If False, then return the contents of the H2OFrame as plain nested\n list, in a row-wise order.\n :param bool header: If True (default), then column names will be appended as the first row in list\n\n :returns: A python object (a list of lists of strings, each list is a row, if use_pandas=False, otherwise\n a pandas DataFrame) containing this H2OFrame instance's data.\n \"\"\"\n if can_use_pandas() and use_pandas:\n import pandas\n return pandas.read_csv(StringIO(self.get_frame_data()), low_memory=False)\n frame = [row for row in csv.reader(StringIO(self.get_frame_data()))]\n if not header:\n frame.pop(0)\n return frame\n\n\n def get_frame_data(self):\n \"\"\"\n Get frame data as a string in csv format.\n\n This will create a multiline string, where each line will contain a separate row of frame's data, with\n individual values separated by commas.\n \"\"\"\n return h2o.api(\"GET /3/DownloadDataset\", data={\"frame_id\": self.frame_id, \"hex_string\": False})\n\n\n def __getitem__(self, item):\n \"\"\"\n Frame slicing, supports row and column slicing.\n\n :param item: selector of a subframe. This can be one of the following:\n\n - an int, indicating selection of a single column at the specified index (0-based)\n - a string, selecting a column with the given name\n - a list of ints or strings, selecting several columns with the given indices / names\n - a slice, selecting columns with the indices within this slice\n - a single-column boolean frame, selecting rows for which the selector is true\n - a 2-element tuple, where the first element is a row selector, and the second element is the\n column selector. Here the row selector may be one of: an int, a list of ints, a slice, or\n a boolean frame. The column selector is similarly one of: an int, a list of ints, a string,\n a list of strings, or a slice. It is also possible to use the empty slice (``:``) to select\n all elements within one of the dimensions.\n\n :returns: A new frame comprised of some rows / columns of the source frame.\n\n :examples:\n >>> fr[2] # All rows, 3rd column\n >>> fr[-2] # All rows, 2nd column from end\n >>> fr[:, -1] # All rows, last column\n >>> fr[0:5, :] # First 5 rows, all columns\n >>> fr[fr[0] > 1, :] # Only rows where first cell is greater than 1, all columns\n >>> fr[[1, 5, 6]] # Columns 2, 6, and 7\n >>> fr[0:50, [1,2,3]] # First 50 rows, columns 2, 3, and 4\n \"\"\"\n # Select columns based on a string, a list of strings, an int or a slice.\n # Note that the python column selector handles the case of negative\n # selections, or out-of-range selections - without having to compute\n # self._ncols in the front-end - which would force eager evaluation just to\n # range check in the front-end.\n new_ncols = -1\n new_nrows = -1\n new_names = None\n new_types = None\n fr = None\n flatten = False\n if isinstance(item, slice):\n item = normalize_slice(item, self.ncols)\n if is_type(item, str, int, list, slice):\n new_ncols, new_names, new_types, item = self._compute_ncol_update(item)\n new_nrows = self.nrow\n fr = H2OFrame._expr(expr=ExprNode(\"cols_py\", self, item))\n elif isinstance(item, (ExprNode, H2OFrame)):\n new_ncols = self.ncol\n new_names = self.names\n new_types = self.types\n new_nrows = -1 # have a \"big\" predicate column -- update cache later on...\n fr = H2OFrame._expr(expr=ExprNode(\"rows\", self, item))\n elif isinstance(item, tuple):\n rows, cols = item\n allrows = allcols = False\n if isinstance(cols, slice):\n cols = normalize_slice(cols, self.ncols)\n allcols = cols == slice(0, self.ncols, 1)\n if isinstance(rows, slice):\n rows = normalize_slice(rows, self.nrows)\n allrows = rows == slice(0, self.nrows, 1)\n\n if allrows and allcols: return self # fr[:,:] -> all rows and columns.. return self\n if allrows:\n new_ncols, new_names, new_types, cols = self._compute_ncol_update(cols)\n new_nrows = self.nrow\n fr = H2OFrame._expr(expr=ExprNode(\"cols_py\", self, cols)) # fr[:,cols] -> really just a column slice\n if allcols:\n new_ncols = self.ncols\n new_names = self.names\n new_types = self.types\n new_nrows, rows = self._compute_nrow_update(rows)\n fr = H2OFrame._expr(expr=ExprNode(\"rows\", self, rows)) # fr[rows,:] -> really just a row slices\n\n if not allrows and not allcols:\n new_ncols, new_names, new_types, cols = self._compute_ncol_update(cols)\n new_nrows, rows = self._compute_nrow_update(rows)\n fr = H2OFrame._expr(expr=ExprNode(\"rows\", ExprNode(\"cols_py\", self, cols), rows))\n\n flatten = is_type(rows, int) and is_type(cols, str, int)\n else:\n raise ValueError(\"Unexpected __getitem__ selector: \" + str(type(item)) + \" \" + str(item.__class__))\n\n assert fr is not None\n # Pythonic: if the row & col selector turn into ints (or a single col\n # name), then extract the single element out of the Frame. Otherwise\n # return a Frame, EVEN IF the selectors are e.g. slices-of-1-value.\n if flatten:\n return fr.flatten()\n\n fr._ex._cache.ncols = new_ncols\n fr._ex._cache.nrows = new_nrows\n fr._ex._cache.names = new_names\n fr._ex._cache.types = new_types\n fr._is_frame = self._is_frame\n return fr\n\n def _compute_ncol_update(self, item): # computes new ncol, names, and types\n try:\n new_ncols = -1\n if isinstance(item, list):\n new_ncols = len(item)\n if _is_str_list(item):\n new_types = {k: self.types[k] for k in item}\n new_names = item\n else:\n new_names = [self.names[i] for i in item]\n new_types = {name: self.types[name] for name in new_names}\n elif isinstance(item, slice):\n assert slice_is_normalized(item)\n new_names = self.names[item]\n new_types = {name: self.types[name] for name in new_names}\n elif is_type(item, str, int):\n new_ncols = 1\n if is_type(item, str):\n new_names = [item]\n new_types = None if item not in self.types else {item: self.types[item]}\n else:\n new_names = [self.names[item]]\n new_types = {new_names[0]: self.types[new_names[0]]}\n else:\n raise ValueError(\"Unexpected type: \" + str(type(item)))\n return (new_ncols, new_names, new_types, item)\n except:\n return (-1, None, None, item)\n\n def _compute_nrow_update(self, item):\n try:\n new_nrows = -1\n if isinstance(item, list):\n new_nrows = len(item)\n elif isinstance(item, slice):\n assert slice_is_normalized(item)\n new_nrows = (item.stop - item.start + item.step - 1) // item.step\n elif isinstance(item, H2OFrame):\n new_nrows = -1\n else:\n new_nrows = 1\n return [new_nrows, item]\n except:\n return [-1, item]\n\n\n def __setitem__(self, item, value):\n \"\"\"\n Replace, update or add column(s) in an H2OFrame.\n\n :param item: A 0-based index of a column, or a column name, or a list of column names, or a slice.\n Alternatively, this may also be a two-element tuple where the first element in the tuple is a row selector,\n and the second element is a row selector. Finally, this can also be a boolean frame indicating which\n rows/columns to modify. If ``item`` is a column name that does not exist in the frame, then a new column\n will be appended to the current frame.\n :param value: The value replacing elements at positions given by ``item``. This can be either a constant, or\n another frame.\n \"\"\"\n # TODO: add far stronger type checks, so that we never run in a situation where the server has to\n # tell us that we requested an illegal operation.\n assert_is_type(item, str, int, tuple, list, H2OFrame)\n assert_is_type(value, None, numeric, str, H2OFrame)\n col_expr = None\n row_expr = None\n colname = None # When set, we are doing an append\n\n if is_type(item, str): # String column name, could be new or old\n if item in self.names:\n col_expr = self.names.index(item) # Update an existing column\n else:\n col_expr = self.ncols\n colname = item # New, append\n elif is_type(item, int):\n if not(-self.ncols <= item < self.ncols):\n raise H2OValueError(\"Incorrect column index: %d\" % item)\n col_expr = item # Column by number\n if col_expr < 0:\n col_expr += self.ncols\n elif isinstance(item, tuple): # Both row and col specifiers\n # Need more type checks\n row_expr = item[0]\n col_expr = item[1]\n if is_type(col_expr, str): # Col by name\n if col_expr not in self.names: # Append\n colname = col_expr\n col_expr = self.ncol\n elif is_type(col_expr, int):\n if not(-self.ncols <= col_expr < self.ncols):\n raise H2OValueError(\"Incorrect column index: %d\" % item)\n if col_expr < 0:\n col_expr += self.ncols\n elif isinstance(col_expr, slice): # Col by slice\n if col_expr.start is None and col_expr.stop is None:\n col_expr = slice(0, self.ncol) # Slice of all\n if isinstance(row_expr, slice):\n start = row_expr.start\n step = row_expr.step\n stop = row_expr.stop\n if start is None: start = 0\n if stop is None: stop = self.nrows\n row_expr = slice(start, stop, step)\n elif isinstance(item, H2OFrame):\n row_expr = item # Row slicing\n elif isinstance(item, list):\n col_expr = item\n\n if value is None: value = float(\"nan\")\n value_is_own_subframe = isinstance(value, H2OFrame) and self._is_frame_in_self(value)\n old_cache = self._ex._cache\n if colname is None:\n self._ex = ExprNode(\":=\", self, value, col_expr, row_expr)\n self._ex._cache.fill_from(old_cache)\n if isinstance(value, H2OFrame) and \\\n value._ex._cache.types_valid() and \\\n self._ex._cache.types_valid():\n self._ex._cache._types.update(value._ex._cache.types)\n else:\n self._ex._cache.types = None\n else:\n self._ex = ExprNode(\"append\", self, value, colname)\n self._ex._cache.fill_from(old_cache)\n self._ex._cache.names = self.names + [colname]\n self._ex._cache._ncols += 1\n if self._ex._cache.types_valid() and isinstance(value, H2OFrame) and value._ex._cache.types_valid():\n self._ex._cache._types[colname] = list(viewvalues(value._ex._cache.types))[0]\n else:\n self._ex._cache.types = None\n if value_is_own_subframe:\n value._ex = None # wipe out to keep ref counts correct\n\n\n def _is_frame_in_self(self, frame):\n if self._ex is frame._ex: return True\n if frame._ex._children is None: return False\n return any(self._is_expr_in_self(ch) for ch in frame._ex._children)\n\n def _is_expr_in_self(self, expr):\n if not isinstance(expr, ExprNode): return False\n if self._ex is expr: return True\n if expr._children is None: return False\n return any(self._is_expr_in_self(ch) for ch in expr._children)\n\n\n def drop(self, index, axis=1):\n \"\"\"\n Drop a single column or row or a set of columns or rows from a H2OFrame.\n\n Dropping a column or row is not in-place.\n Indices of rows and columns are zero-based.\n\n :param index: A list of column indices, column names, or row indices to drop; or\n a string to drop a single column by name; or an int to drop a single column by index.\n\n :param int axis: If 1 (default), then drop columns; if 0 then drop rows.\n\n :returns: a new H2OFrame with the respective dropped columns or rows. The original H2OFrame remains\n unchanged.\n \"\"\"\n if axis == 1:\n if not isinstance(index, list):\n #If input is a string, i.e., \"C1\":\n if is_type(index, str):\n #Check if index is an actual column(s) in the frame\n if index not in self.names:\n raise H2OValueError(\"Column(s) selected to drop are not in original frame: %r\" % index)\n index = self.names.index(index)\n #If input is an int indicating a column index, i.e., 3:\n elif is_type(index, int):\n #Check if index is an actual column index in the frame\n if index > self.ncol:\n raise H2OValueError(\"Column index selected to drop is not part of the frame: %r\" % index)\n if index < 0:\n raise H2OValueError(\"Column index selected to drop is not positive: %r\" % index)\n\n fr = H2OFrame._expr(expr=ExprNode(\"cols\", self, -(index + 1)), cache=self._ex._cache)\n fr._ex._cache.ncols -= 1\n fr._ex._cache.names = self.names[:index] + self.names[index + 1:]\n fr._ex._cache.types = {name: self.types[name] for name in fr._ex._cache.names}\n return fr\n\n elif isinstance(index, list):\n #If input is an int array indicating a column index, i.e., [3] or [1,2,3]:\n if is_type(index, [int]):\n if max(index) > self.ncol:\n raise H2OValueError(\"Column index selected to drop is not part of the frame: %r\" % index)\n if min(index) < 0:\n raise H2OValueError(\"Column index selected to drop is not positive: %r\" % index)\n for i in range(len(index)):\n index[i] = -(index[i] + 1)\n #If index is a string array, i.e., [\"C1\", \"C2\"]\n elif is_type(index, [str]):\n #Check if index is an actual column(s) in the frame\n if not set(index).issubset(self.names):\n raise H2OValueError(\"Column(s) selected to drop are not in original frame: %r\" % index)\n for i in range(len(index)):\n index[i] = -(self.names.index(index[i]) + 1)\n fr = H2OFrame._expr(expr=ExprNode(\"cols\", self, index), cache=self._ex._cache)\n fr._ex._cache.ncols -= len(index)\n fr._ex._cache.names = [i for i in self.names\n if self.names.index(i) not in list(map(lambda x: abs(x) - 1, index))]\n fr._ex._cache.types = {name: fr.types[name] for name in fr._ex._cache.names}\n\n else:\n raise ValueError(\"Invalid column index types. Must either be a list of all int indexes, \"\n \"a string list of all column names, a single int index, or\"\n \"a single string for dropping columns.\")\n return fr\n elif axis == 0:\n if is_type(index, [int]):\n #Check if index is an actual column index in the frame\n if max(index) > self.nrow:\n raise H2OValueError(\"Row index selected to drop is not part of the frame: %r\" % index)\n if min(index) < 0:\n raise H2OValueError(\"Row index selected to drop is not positive: %r\" % index)\n index = [-(x + 1) for x in index]\n fr = H2OFrame._expr(expr=ExprNode(\"rows\", self, index), cache=self._ex._cache)\n fr._ex._cache.nrows -= len(index)\n else:\n raise ValueError(\"Invalid row indexes. Must be a list of int row indexes to drop from the H2OFrame.\")\n return fr\n\n\n def pop(self, i):\n \"\"\"\n Pop a column from the H2OFrame at index i.\n\n :param i: The index (int) or name (str) of the column to pop.\n :returns: an H2OFrame containing the column dropped from the current frame; the current frame is modified\n in-place and loses the column.\n \"\"\"\n if is_type(i, str): i = self.names.index(i)\n col = H2OFrame._expr(expr=ExprNode(\"cols\", self, i))\n old_cache = self._ex._cache\n self._ex = ExprNode(\"cols\", self, -(i + 1))\n self._ex._cache.ncols -= 1\n self._ex._cache.names = old_cache.names[:i] + old_cache.names[i + 1:]\n self._ex._cache.types = {name: old_cache.types[name] for name in self._ex._cache.names}\n self._ex._cache._data = None\n col._ex._cache.ncols = 1\n col._ex._cache.names = [old_cache.names[i]]\n return col\n\n\n def quantile(self, prob=None, combine_method=\"interpolate\", weights_column=None):\n \"\"\"\n Compute quantiles.\n\n :param List[float] prob: list of probabilities for which quantiles should be computed.\n :param str combine_method: for even samples this setting determines how to combine quantiles. This can be\n one of ``\"interpolate\"``, ``\"average\"``, ``\"low\"``, ``\"high\"``.\n :param weights_column: optional weights for each row. If not given, all rows are assumed to have equal\n importance. This parameter can be either the name of column containing the observation weights in\n this frame, or a single-column separate H2OFrame of observation weights.\n\n :returns: a new H2OFrame containing the quantiles and probabilities.\n \"\"\"\n if len(self) == 0: return self\n if prob is None: prob = [0.01, 0.1, 0.25, 0.333, 0.5, 0.667, 0.75, 0.9, 0.99]\n if weights_column is None:\n weights_column = \"_\"\n else:\n assert_is_type(weights_column, str, I(H2OFrame, lambda wc: wc.ncol == 1 and wc.nrow == self.nrow))\n if isinstance(weights_column, H2OFrame):\n merged = self.cbind(weights_column)\n weights_column = merged.names[-1]\n return H2OFrame._expr(expr=ExprNode(\"quantile\", merged, prob, combine_method, weights_column))\n return H2OFrame._expr(expr=ExprNode(\"quantile\", self, prob, combine_method, weights_column))\n\n\n def concat(self, frames, axis=1):\n \"\"\"\n Append multiple H2OFrames to this frame, column-wise or row-wise.\n\n :param List[H2OFrame] frames: list of frames that should be appended to the current frame.\n :param int axis: if 1 then append column-wise (default), if 0 then append row-wise.\n\n :returns: an H2OFrame of the combined datasets.\n \"\"\"\n if len(frames) == 0:\n raise ValueError(\"Input list of frames is empty! Nothing to concat.\")\n\n if axis == 1:\n df = self.cbind(frames)\n else:\n df = self.rbind(frames)\n return df\n\n\n def cbind(self, data):\n \"\"\"\n Append data to this frame column-wise.\n\n :param H2OFrame data: append columns of frame ``data`` to the current frame. You can also cbind a number,\n in which case it will get converted into a constant column.\n\n :returns: new H2OFrame with all frames in ``data`` appended column-wise.\n \"\"\"\n assert_is_type(data, H2OFrame, numeric, [H2OFrame, numeric])\n frames = [data] if not isinstance(data, list) else data\n new_cols = list(self.columns)\n new_types = dict(self.types)\n for frame in frames:\n if isinstance(frame, H2OFrame):\n if frame.nrow != self.nrow:\n raise H2OValueError(\"Cannot bind a dataframe with %d rows to a data frame with %d rows: \"\n \"the number of rows should match\" % (frame.nrow, self.nrow))\n new_cols += frame.columns\n new_types.update(frame.types)\n else:\n new_cols += [None]\n unique_cols = set(new_cols)\n fr = H2OFrame._expr(expr=ExprNode(\"cbind\", self, *frames), cache=self._ex._cache)\n fr._ex._cache.ncols = len(new_cols)\n if len(new_cols) == len(unique_cols) and None not in unique_cols:\n fr._ex._cache.names = new_cols\n fr._ex._cache.types = new_types\n else:\n # Invalidate names and types since they contain duplicate / unknown names, and the server will choose those.\n fr._ex._cache.names = None\n fr._ex._cache.types = None\n return fr\n\n\n def rbind(self, data):\n \"\"\"\n Append data to this frame row-wise.\n\n :param data: an H2OFrame or a list of H2OFrame's to be combined with current frame row-wise.\n :returns: this H2OFrame with all frames in data appended row-wise.\n \"\"\"\n assert_is_type(data, H2OFrame, [H2OFrame])\n frames = [data] if not isinstance(data, list) else data\n for frame in frames:\n if frame.ncol != self.ncol:\n raise H2OValueError(\"Cannot row-bind a dataframe with %d columns to a data frame with %d columns: \"\n \"the columns must match\" % (frame.ncol, self.ncol))\n if frame.columns != self.columns or frame.types != self.types:\n raise H2OValueError(\"Column names and types must match for rbind() to work\")\n fr = H2OFrame._expr(expr=ExprNode(\"rbind\", self, *frames), cache=self._ex._cache)\n fr._ex._cache.nrows = self.nrow + sum(frame.nrow for frame in frames)\n return fr\n\n\n def split_frame(self, ratios=None, destination_frames=None, seed=None):\n \"\"\"\n Split a frame into distinct subsets of size determined by the given ratios.\n\n The number of subsets is always 1 more than the number of ratios given. Note that\n this does not give an exact split. H2O is designed to be efficient on big data\n using a probabilistic splitting method rather than an exact split. For example\n when specifying a split of 0.75/0.25, H2O will produce a test/train split with\n an expected value of 0.75/0.25 rather than exactly 0.75/0.25. On small datasets,\n the sizes of the resulting splits will deviate from the expected value more than\n on big data, where they will be very close to exact.\n\n :param List[float] ratios: The fractions of rows for each split.\n :param List[str] destination_frames: The names of the split frames.\n :param int seed: seed for the random number generator\n\n :returns: A list of H2OFrames\n \"\"\"\n assert_is_type(ratios, [numeric], None)\n assert_is_type(destination_frames, [str], None)\n assert_is_type(seed, int, None)\n\n if ratios is None:\n ratios = [0.75]\n if not ratios:\n raise ValueError(\"Ratios array may not be empty\")\n\n if destination_frames is not None:\n if len(ratios) + 1 != len(destination_frames):\n raise ValueError(\"The number of provided destination_frames must be one more \"\n \"than the number of provided ratios\")\n\n num_slices = len(ratios) + 1\n boundaries = []\n\n last_boundary = 0\n i = 0\n while i < num_slices - 1:\n ratio = ratios[i]\n if ratio < 0:\n raise ValueError(\"Ratio must be greater than 0\")\n boundary = last_boundary + ratio\n if boundary >= 1.0:\n raise ValueError(\"Ratios must add up to less than 1.0\")\n boundaries.append(boundary)\n last_boundary = boundary\n i += 1\n\n splits = []\n tmp_runif = self.runif(seed)\n tmp_runif.frame_id = \"%s_splitter\" % _py_tmp_key(h2o.connection().session_id)\n\n i = 0\n while i < num_slices:\n if i == 0:\n # lower_boundary is 0.0\n upper_boundary = boundaries[i]\n tmp_slice = self[(tmp_runif <= upper_boundary), :]\n elif i == num_slices - 1:\n lower_boundary = boundaries[i - 1]\n # upper_boundary is 1.0\n tmp_slice = self[(tmp_runif > lower_boundary), :]\n else:\n lower_boundary = boundaries[i - 1]\n upper_boundary = boundaries[i]\n tmp_slice = self[((tmp_runif > lower_boundary) & (tmp_runif <= upper_boundary)), :]\n\n if destination_frames is None:\n splits.append(tmp_slice)\n else:\n destination_frame_id = destination_frames[i]\n tmp_slice.frame_id = destination_frame_id\n splits.append(tmp_slice)\n\n i += 1\n\n del tmp_runif\n return splits\n\n\n def group_by(self, by):\n \"\"\"\n Return a new ``GroupBy`` object using this frame and the desired grouping columns.\n\n The returned groups are sorted by the natural group-by column sort.\n\n :param by: The columns to group on (either a single column name, or a list of column names, or\n a list of column indices).\n \"\"\"\n assert_is_type(by, str, int, [str, int])\n return GroupBy(self, by)\n\n def sort(self, by):\n \"\"\"\n Return a new Frame that is sorted by column(s) in ascending order. A fully distributed and parallel sort.\n However, the original frame must not contain any String columns.\n :param by: The column to sort by (either a single column name, or a list of column names, or\n a list of column indices)\n :return: a new sorted Frame\n \"\"\"\n assert_is_type(by, str, int, [str, int])\n if type(by) != list: by = [by]\n for c in by:\n if self.type(c) not in [\"enum\",\"time\",\"int\"]:\n raise H2OValueError(\"Sort by column: \" + str(c) + \" not of enum, time, or int type\")\n return H2OFrame._expr(expr=ExprNode(\"sort\",self,by))\n\n def fillna(self,method=\"forward\",axis=0,maxlen=1):\n \"\"\"\n Return a new Frame that fills NA along a given axis and along a given direction with a maximum fill length\n :param method: ``\"forward\"`` or ``\"backward\"``\n :param axis: 0 for columnar-wise or 1 for row-wise fill\n :param maxlen: Max number of consecutive NA's to fill\n :return: \n \"\"\"\n assert_is_type(axis, 0, 1)\n assert_is_type(method,str)\n assert_is_type(maxlen, int)\n return H2OFrame._expr(expr=ExprNode(\"h2o.fillna\",self,method,axis,maxlen))\n\n def impute(self, column=-1, method=\"mean\", combine_method=\"interpolate\", by=None, group_by_frame=None, values=None):\n \"\"\"\n Impute missing values into the frame, modifying it in-place.\n\n :param int column: Index of the column to impute, or -1 to impute the entire frame.\n :param str method: The method of imputation: ``\"mean\"``, ``\"median\"``, or ``\"mode\"``.\n :param str combine_method: When the method is ``\"median\"``, this setting dictates how to combine quantiles\n for even samples. One of ``\"interpolate\"``, ``\"average\"``, ``\"low\"``, ``\"high\"``.\n :param by: The list of columns to group on.\n :param H2OFrame group_by_frame: Impute the values with this pre-computed grouped frame.\n :param List values: The list of impute values, one per column. None indicates to skip the column.\n\n :returns: A list of values used in the imputation or the group-by result used in imputation.\n \"\"\"\n if is_type(column, str): column = self.names.index(column)\n if is_type(by, str): by = self.names.index(by)\n\n if values is None:\n values = \"_\"\n else:\n assert len(values) == len(self.columns), \"Length of values does not match length of columns\"\n # convert string values to categorical num values\n values2 = []\n for i in range(0,len(values)):\n if self.type(i) == \"enum\":\n try:\n values2.append(self.levels()[i].index(values[i]))\n except:\n raise H2OValueError(\"Impute value of: \" + values[i] + \" not found in existing levels of\"\n \" column: \" + self.col_names[i])\n else:\n values2.append(values[i])\n values = values2\n if group_by_frame is None: group_by_frame = \"_\"\n\n\n # This code below is needed to ensure the frame (self) exists on the server. Without it, self._ex._cache.fill()\n # fails with an assertion that ._id is None.\n # This code should be removed / reworked once we have a more consistent strategy of dealing with frames.\n self._ex._eager_frame()\n\n if by is not None or group_by_frame is not \"_\":\n res = H2OFrame._expr(\n expr=ExprNode(\"h2o.impute\", self, column, method, combine_method, by, group_by_frame, values))._frame()\n else:\n res = ExprNode(\"h2o.impute\", self, column, method, combine_method, by, group_by_frame,\n values)._eager_scalar()\n\n self._ex._cache.flush()\n self._ex._cache.fill(10)\n return res\n\n\n def merge(self, other, all_x=False, all_y=False, by_x=None, by_y=None, method=\"auto\"):\n \"\"\"\n Merge two datasets based on common column names.\n\n :param H2OFrame other: The frame to merge to the current one. By default, must have at least one column in common with\n this frame, and all columns in common are used as the merge key. If you want to use only a subset of the\n columns in common, rename the other columns so the columns are unique in the merged result.\n :param bool all_x: If True, include all rows from the left/self frame\n :param bool all_y: If True, include all rows from the right/other frame\n :param by_x: list of columns in the current frame to use as a merge key.\n :param by_y: list of columns in the ``other`` frame to use as a merge key. Should have the same number of\n columns as in the ``by_x`` list.\n\n :returns: New H2OFrame with the result of merging the current frame with the ``other`` frame.\n \"\"\"\n\n if by_x is None and by_y is None:\n common_names = list(set(self.names) & set(other.names))\n if not common_names:\n raise H2OValueError(\"No columns in common to merge on!\")\n\n if by_x is None:\n by_x = [self.names.index(c) for c in common_names]\n else:\n by_x = _getValidCols(by_x,self)\n\n if by_y is None:\n by_y = [other.names.index(c) for c in common_names]\n else:\n by_y = _getValidCols(by_y,other)\n\n\n return H2OFrame._expr(expr=ExprNode(\"merge\", self, other, all_x, all_y, by_x, by_y, method))\n\n\n def relevel(self, y):\n \"\"\"\n Reorder levels of an H2O factor.\n\n The levels of a factor are reordered such that the reference level is at level 0, all remaining levels are\n moved down as needed.\n\n :param str y: The reference level\n :returns: New reordered factor column\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"relevel\", self, quote(y)))\n\n\n def insert_missing_values(self, fraction=0.1, seed=None):\n \"\"\"\n Insert missing values into the current frame, modifying it in-place.\n\n Randomly replaces a user-specified fraction of entries in a H2O dataset with missing\n values.\n\n :param float fraction: A number between 0 and 1 indicating the fraction of entries to replace with missing.\n :param int seed: The seed for the random number generator used to determine which values to make missing.\n\n :returns: the original H2OFrame with missing values inserted.\n \"\"\"\n kwargs = {}\n kwargs['dataset'] = self.frame_id # Eager; forces eval now for following REST call\n kwargs['fraction'] = fraction\n if seed is not None: kwargs['seed'] = seed\n job = {}\n job['job'] = h2o.api(\"POST /3/MissingInserter\", data=kwargs)\n H2OJob(job, job_type=(\"Insert Missing Values\")).poll()\n self._ex._cache.flush()\n return self\n\n\n def min(self):\n \"\"\"The minimum value of all frame entries.\"\"\"\n return ExprNode(\"min\", self)._eager_scalar()\n\n\n def max(self):\n \"\"\"The maximum value of all frame entries.\"\"\"\n return ExprNode(\"max\", self)._eager_scalar()\n\n\n def sum(self, skipna=True, axis=0, **kwargs):\n \"\"\"\n Compute the frame's sum by-column (or by-row).\n\n :param bool skipna: If True (default), then NAs are ignored during the computation. Otherwise presence\n of NAs renders the entire result NA.\n :param int axis: Direction of sum computation. If 0 (default), then sum is computed columnwise, and the result\n is a frame with 1 row and number of columns as in the original frame. If 1, then sum is computed rowwise\n and the result is a frame with 1 column (called \"sum\"), and number of rows equal to the number of rows\n in the original frame.\n :returns: either a list of sum of values per-column (old semantic); or an H2OFrame containing sum of values\n per-column/per-row in the original frame (new semantic). The new semantic is triggered by either\n providing the ``return_frame=True`` parameter, or having the ``general.allow_breaking_changed`` config\n option turned on.\n \"\"\"\n assert_is_type(skipna, bool)\n assert_is_type(axis, 0, 1)\n # Deprecated since 2016-10-14,\n if \"na_rm\" in kwargs:\n warnings.warn(\"Parameter na_rm is deprecated; use skipna instead\", category=DeprecationWarning)\n na_rm = kwargs.pop(\"na_rm\")\n assert_is_type(na_rm, bool)\n skipna = na_rm # don't assign to skipna directly, to help with error reporting\n # Determine whether to return a frame or a list\n return_frame = get_config_value(\"general.allow_breaking_changes\", False)\n if \"return_frame\" in kwargs:\n return_frame = kwargs.pop(\"return_frame\")\n assert_is_type(return_frame, bool)\n if kwargs:\n raise H2OValueError(\"Unknown parameters %r\" % list(kwargs))\n\n if return_frame:\n return H2OFrame._expr(ExprNode(\"sumaxis\", self, skipna, axis))\n else:\n return ExprNode(\"sumNA\" if skipna else \"sum\", self)._eager_scalar()\n\n\n def mean(self, skipna=True, axis=0, **kwargs):\n \"\"\"\n Compute the frame's means by-column (or by-row).\n\n :param bool skipna: If True (default), then NAs are ignored during the computation. Otherwise presence\n of NAs renders the entire result NA.\n :param int axis: Direction of mean computation. If 0 (default), then mean is computed columnwise, and the\n result is a frame with 1 row and number of columns as in the original frame. If 1, then mean is computed\n rowwise and the result is a frame with 1 column (called \"mean\"), and number of rows equal to the number\n of rows in the original frame.\n :returns: either a list of mean values per-column (old semantic); or an H2OFrame containing mean values\n per-column/per-row from the original frame (new semantic). The new semantic is triggered by either\n providing the ``return_frame=True`` parameter, or having the ``general.allow_breaking_changed`` config\n option turned on.\n \"\"\"\n assert_is_type(skipna, bool)\n assert_is_type(axis, 0, 1)\n # Deprecated since 2016-10-14,\n if \"na_rm\" in kwargs:\n warnings.warn(\"Parameter na_rm is deprecated; use skipna instead\", category=DeprecationWarning)\n na_rm = kwargs.pop(\"na_rm\")\n assert_is_type(na_rm, bool)\n skipna = na_rm # don't assign to skipna directly, to help with error reporting\n # Determine whether to return a frame or a list\n return_frame = get_config_value(\"general.allow_breaking_changes\", False)\n if \"return_frame\" in kwargs:\n return_frame = kwargs.pop(\"return_frame\")\n assert_is_type(return_frame, bool)\n if kwargs:\n raise H2OValueError(\"Unknown parameters %r\" % list(kwargs))\n\n new_frame = H2OFrame._expr(ExprNode(\"mean\", self, skipna, axis))\n if return_frame:\n return new_frame\n else:\n return new_frame.getrow()\n\n\n def skewness(self, na_rm=False):\n \"\"\"\n Compute the skewness of each column in the frame.\n\n :param bool na_rm: If True, then ignore NAs during the computation.\n :returns: A list containing the skewness for each column (NaN for non-numeric columns).\n \"\"\"\n return ExprNode(\"skewness\", self, na_rm)._eager_scalar()\n\n\n def kurtosis(self, na_rm=False):\n \"\"\"\n Compute the kurtosis of each column in the frame.\n\n We calculate the common kurtosis, such that kurtosis(normal distribution) is 3.\n\n :param bool na_rm: If True, then ignore NAs during the computation.\n :returns: A list containing the kurtosis for each column (NaN for non-numeric columns).\n \"\"\"\n return ExprNode(\"kurtosis\", self, na_rm)._eager_scalar()\n\n\n def nacnt(self):\n \"\"\"\n Count of NAs for each column in this H2OFrame.\n\n :returns: A list of the na counts (one entry per column).\n \"\"\"\n return ExprNode(\"naCnt\", self)._eager_scalar()\n\n\n def median(self, na_rm=False):\n \"\"\"\n Compute the median of each column in the frame.\n\n :param bool na_rm: If True, then ignore NAs during the computation.\n :returns: A list containing the median for each column (NaN for non-numeric columns).\n \"\"\"\n return ExprNode(\"median\", self, na_rm)._eager_scalar()\n\n\n def var(self, y=None, na_rm=False, use=None):\n \"\"\"\n Compute the variance-covariance matrix of one or two H2OFrames.\n\n :param H2OFrame y: If this parameter is given, then a covariance matrix between the columns of the target\n frame and the columns of ``y`` is computed. If this parameter is not provided then the covariance matrix\n of the target frame is returned. If target frame has just a single column, then return the scalar variance\n instead of the matrix. Single rows are treated as single columns.\n :param str use: A string indicating how to handle missing values. This could be one of the following:\n\n - ``\"everything\"``: outputs NaNs whenever one of its contributing observations is missing\n - ``\"all.obs\"``: presence of missing observations will throw an error\n - ``\"complete.obs\"``: discards missing values along with all observations in their rows so that only\n complete observations are used\n :param bool na_rm: an alternative to ``use``: when this is True then default value for ``use`` is\n ``\"everything\"``; and if False then default ``use`` is ``\"complete.obs\"``. This parameter has no effect\n if ``use`` is given explicitly.\n\n :returns: An H2OFrame of the covariance matrix of the columns of this frame (if ``y`` is not given),\n or with the columns of ``y`` (if ``y`` is given). However when this frame and ``y`` are both single rows\n or single columns, then the variance is returned as a scalar.\n \"\"\"\n symmetric = False\n if y is None:\n y = self\n symmetric = True\n if use is None: use = \"complete.obs\" if na_rm else \"everything\"\n if self.nrow == 1 or (self.ncol == 1 and y.ncol == 1):\n return ExprNode(\"var\", self, y, use, symmetric)._eager_scalar()\n return H2OFrame._expr(expr=ExprNode(\"var\", self, y, use, symmetric))._frame()\n\n\n def sd(self, na_rm=False):\n \"\"\"\n Compute the standard deviation for each column in the frame.\n\n :param bool na_rm: if True, then NAs will be removed from the computation.\n :returns: A list containing the standard deviation for each column (NaN for non-numeric columns).\n \"\"\"\n return ExprNode(\"sd\", self, na_rm)._eager_scalar()\n\n\n def cor(self, y=None, na_rm=False, use=None):\n \"\"\"\n Compute the correlation matrix of one or two H2OFrames.\n\n :param H2OFrame y: If this parameter is provided, then compute correlation between the columns of ``y``\n and the columns of the current frame. If this parameter is not given, then just compute the correlation\n matrix for the columns of the current frame.\n :param str use: A string indicating how to handle missing values. This could be one of the following:\n\n - ``\"everything\"``: outputs NaNs whenever one of its contributing observations is missing\n - ``\"all.obs\"``: presence of missing observations will throw an error\n - ``\"complete.obs\"``: discards missing values along with all observations in their rows so that only\n complete observations are used\n :param bool na_rm: an alternative to ``use``: when this is True then default value for ``use`` is\n ``\"everything\"``; and if False then default ``use`` is ``\"complete.obs\"``. This parameter has no effect\n if ``use`` is given explicitly.\n\n :returns: An H2OFrame of the correlation matrix of the columns of this frame (if ``y`` is not given),\n or with the columns of ``y`` (if ``y`` is given). However when this frame and ``y`` are both single rows\n or single columns, then the correlation is returned as a scalar.\n \"\"\"\n assert_is_type(y, H2OFrame, None)\n assert_is_type(na_rm, bool)\n assert_is_type(use, None, \"everything\", \"all.obs\", \"complete.obs\")\n if y is None:\n y = self\n if use is None: use = \"complete.obs\" if na_rm else \"everything\"\n if self.nrow == 1 or (self.ncol == 1 and y.ncol == 1): return ExprNode(\"cor\", self, y, use)._eager_scalar()\n return H2OFrame._expr(expr=ExprNode(\"cor\", self, y, use))._frame()\n\n\n def distance(self, y, measure=None):\n \"\"\"\n Compute a pairwise distance measure between all rows of two numeric H2OFrames.\n\n :param H2OFrame y: Frame containing queries (small)\n :param str use: A string indicating what distance measure to use. Must be one of:\n\n - ``\"l1\"``: Absolute distance (L1-norm, >=0)\n - ``\"l2\"``: Euclidean distance (L2-norm, >=0)\n - ``\"cosine\"``: Cosine similarity (-1...1)\n - ``\"cosine_sq\"``: Squared Cosine similarity (0...1)\n\n :examples:\n >>>\n >>> iris_h2o = h2o.import_file(path=pyunit_utils.locate(\"smalldata/iris/iris.csv\"))\n >>> references = iris_h2o[10:150,0:4\n >>> queries = iris_h2o[0:10,0:4]\n >>> A = references.distance(queries, \"l1\")\n >>> B = references.distance(queries, \"l2\")\n >>> C = references.distance(queries, \"cosine\")\n >>> D = references.distance(queries, \"cosine_sq\")\n >>> E = queries.distance(references, \"l1\")\n >>> (E.transpose() == A).all()\n\n :returns: An H2OFrame of the matrix containing pairwise distance / similarity between the \n rows of this frame (N x p) and ``y`` (M x p), with dimensions (N x M).\n \"\"\"\n assert_is_type(y, H2OFrame)\n if measure is None: measure = \"l2\"\n return H2OFrame._expr(expr=ExprNode(\"distance\", self, y, measure))._frame()\n\n\n def strdistance(self, y, measure=None):\n \"\"\"\n Compute element-wise string distances between two H2OFrames. Both frames need to have the same\n shape and only contain string/factor columns.\n\n :param H2OFrame y: A comparison frame.\n :param str measure: A string identifier indicating what string distance measure to use. Must be one of:\n\n - ``\"lv\"``: Levenshtein distance\n - ``\"lcs\"``: Longest common substring distance\n - ``\"qgram\"``: q-gram distance\n - ``\"jaccard\"``: Jaccard distance between q-gram profiles\n - ``\"jw\"``: Jaro, or Jaro-Winker distance\n - ``\"soundex\"``: Distance based on soundex encoding\n\n :examples:\n >>>\n >>> x = h2o.H2OFrame.from_python(['Martha', 'Dwayne', 'Dixon'], column_types=['factor'])\n >>> y = h2o.H2OFrame.from_python(['Marhta', 'Duane', 'Dicksonx'], column_types=['string'])\n >>> x.strdistance(y, measure=\"jw\")\n\n :returns: An H2OFrame of the matrix containing element-wise distance between the\n strings of this frame and ``y``. The returned frame has the same shape as the input frames.\n \"\"\"\n assert_is_type(y, H2OFrame)\n assert_is_type(measure, Enum('lv', 'lcs', 'qgram', 'jaccard', 'jw', 'soundex'))\n return H2OFrame._expr(expr=ExprNode(\"strDistance\", self, y, measure))._frame()\n\n\n def asfactor(self):\n \"\"\"\n Convert columns in the current frame to categoricals.\n\n :returns: new H2OFrame with columns of the \"enum\" type.\n \"\"\"\n for colname in self.names:\n t = self.types[colname]\n if t not in {\"bool\", \"int\", \"string\", \"enum\"}:\n raise H2OValueError(\"Only 'int' or 'string' are allowed for \"\n \"asfactor(), got %s:%s \" % (colname, t))\n fr = H2OFrame._expr(expr=ExprNode(\"as.factor\", self), cache=self._ex._cache)\n if fr._ex._cache.types_valid():\n fr._ex._cache.types = {name: \"enum\" for name in self.types}\n else:\n raise H2OTypeError(\"Types are not available in result\")\n \n return fr\n\n\n def isfactor(self):\n \"\"\"\n Test which columns in the current frame are categorical.\n\n :returns: a list of True/False indicating for each column in the frame whether it is categorical.\n \"\"\"\n return [bool(o) for o in ExprNode(\"is.factor\", self)._eager_scalar()]\n\n\n def anyfactor(self):\n \"\"\"Return True if there are any categorical columns in the frame.\"\"\"\n return bool(ExprNode(\"any.factor\", self)._eager_scalar())\n\n\n def categories(self):\n \"\"\"\n Return the list of levels for an enum (categorical) column.\n\n This function can only be applied to single-column categorical frame.\n \"\"\"\n if self.ncols != 1:\n raise H2OValueError(\"This operation only applies to a single factor column\")\n if self.types[self.names[0]] != \"enum\":\n raise H2OValueError(\"Input is not a factor. This operation only applies to a single factor column\")\n return self.levels()[0]\n\n\n def transpose(self):\n \"\"\"\n Transpose rows and columns of this frame.\n\n :returns: new H2OFrame where with rows/columns from the original frame transposed.\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"t\", self))\n\n\n def strsplit(self, pattern):\n \"\"\"\n Split the strings in the target column on the given regular expression pattern.\n\n :param str pattern: The split pattern.\n :returns: H2OFrame containing columns of the split strings.\n \"\"\"\n fr = H2OFrame._expr(expr=ExprNode(\"strsplit\", self, pattern))\n fr._ex._cache.nrows = self.nrow\n return fr\n\n def tokenize(self, split):\n \"\"\"\n Tokenize String\n\n tokenize() is similar to strsplit(), the difference between them is that tokenize() will store the tokenized\n text into a single column making it easier for additional processing (filtering stop words, word2vec algo, ...).\n\n :param str split The regular expression to split on.\n @return An H2OFrame with a single column representing the tokenized Strings. Original rows of the input DF are separated by NA.\n \"\"\"\n fr = H2OFrame._expr(expr=ExprNode(\"tokenize\", self, split))\n return fr\n\n def countmatches(self, pattern):\n \"\"\"\n For each string in the frame, count the occurrences of the provided pattern.\n\n The pattern here is a plain string, not a regular expression. We will search for the occurrences of the\n pattern as a substring in element of the frame. This function is applicable to frames containing only\n string or categorical columns.\n\n :param str pattern: The pattern to count matches on in each string. This can also be a list of strings,\n in which case all of them will be searched for.\n :returns: numeric H2OFrame with the same shape as the original, containing counts of matches of the\n pattern for each cell in the original frame.\n \"\"\"\n assert_is_type(pattern, str, [str])\n fr = H2OFrame._expr(expr=ExprNode(\"countmatches\", self, pattern))\n fr._ex._cache.nrows = self.nrow\n fr._ex._cache.ncols = self.ncol\n return fr\n\n\n def trim(self):\n \"\"\"\n Trim white space on the left and right of strings in a single-column H2OFrame.\n\n :returns: H2OFrame with trimmed strings.\n \"\"\"\n fr = H2OFrame._expr(expr=ExprNode(\"trim\", self))\n fr._ex._cache.nrows = self.nrow\n fr._ex._cache.ncol = self.ncol\n return fr\n\n\n def substring(self, start_index, end_index=None):\n \"\"\"\n For each string, return a new string that is a substring of the original string.\n\n If end_index is not specified, then the substring extends to the end of the original string. If the start_index\n is longer than the length of the string, or is greater than or equal to the end_index, an empty string is\n returned. Negative start_index is coerced to 0.\n\n :param int start_index: The index of the original string at which to start the substring, inclusive.\n :param int end_index: The index of the original string at which to end the substring, exclusive.\n :returns: An H2OFrame containing the specified substrings.\n \"\"\"\n fr = H2OFrame._expr(expr=ExprNode(\"substring\", self, start_index, end_index))\n fr._ex._cache.nrows = self.nrow\n fr._ex._cache.ncol = self.ncol\n return fr\n\n\n def lstrip(self, set=\" \"):\n \"\"\"\n Return a copy of the column with leading characters removed.\n\n The set argument is a string specifying the set of characters to be removed.\n If omitted, the set argument defaults to removing whitespace.\n\n :param str set: The set of characters to lstrip from strings in column\n :returns: a new H2OFrame with the same shape as the original frame and having all its values\n trimmed from the left (equivalent of Python's ``str.lstrip()``).\n \"\"\"\n # work w/ None; parity with python lstrip\n if set is None: set = \" \"\n\n fr = H2OFrame._expr(expr=ExprNode(\"lstrip\", self, set))\n fr._ex._cache.nrows = self.nrow\n fr._ex._cache.ncol = self.ncol\n return fr\n\n\n def rstrip(self, set=\" \"):\n \"\"\"\n Return a copy of the column with trailing characters removed.\n\n The set argument is a string specifying the set of characters to be removed.\n If omitted, the set argument defaults to removing whitespace.\n\n :param str set: The set of characters to rstrip from strings in column\n :returns: a new H2OFrame with the same shape as the original frame and having all its values\n trimmed from the right (equivalent of Python's ``str.rstrip()``).\n \"\"\"\n # work w/ None; parity with python rstrip\n if set is None: set = \" \"\n\n fr = H2OFrame._expr(expr=ExprNode(\"rstrip\", self, set))\n fr._ex._cache.nrows = self.nrow\n fr._ex._cache.ncol = self.ncol\n return fr\n\n\n def entropy(self):\n \"\"\"\n For each string compute its Shannon entropy, if the string is empty the entropy is 0.\n\n :returns: an H2OFrame of Shannon entropies.\n \"\"\"\n fr = H2OFrame._expr(expr=ExprNode(\"entropy\", self))\n fr._ex._cache.nrows = self.nrow\n fr._ex._cache.ncol = self.ncol\n return fr\n\n\n def num_valid_substrings(self, path_to_words):\n \"\"\"\n For each string, find the count of all possible substrings with 2 characters or more that are contained in\n the line-separated text file whose path is given.\n\n :param str path_to_words: Path to file that contains a line-separated list of strings considered valid.\n :returns: An H2OFrame with the number of substrings that are contained in the given word list.\n \"\"\"\n assert_is_type(path_to_words, str)\n fr = H2OFrame._expr(expr=ExprNode(\"num_valid_substrings\", self, path_to_words))\n fr._ex._cache.nrows = self.nrow\n fr._ex._cache.ncol = self.ncol\n return fr\n\n\n def nchar(self):\n \"\"\"\n Count the length of each string in a single-column H2OFrame of string type.\n\n :returns: A single-column H2OFrame containing the per-row character count.\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"strlen\", self))\n\n\n def table(self, data2=None, dense=True):\n \"\"\"\n Compute the counts of values appearing in a column, or co-occurence counts between two columns.\n\n :param H2OFrame data2: An optional single column to aggregate counts by.\n :param bool dense: If True (default) then use dense representation, which lists only non-zero counts,\n 1 combination per row. Set to False to expand counts across all combinations.\n\n :returns: H2OFrame of the counts at each combination of factor levels\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"table\", self, data2, dense)) if data2 is not None else H2OFrame._expr(\n expr=ExprNode(\"table\", self, dense))\n\n\n def hist(self, breaks=\"sturges\", plot=True, **kwargs):\n \"\"\"\n Compute a histogram over a numeric column.\n\n :param breaks: Can be one of ``\"sturges\"``, ``\"rice\"``, ``\"sqrt\"``, ``\"doane\"``, ``\"fd\"``, ``\"scott\"``;\n or a single number for the number of breaks; or a list containing the split points, e.g:\n ``[-50, 213.2123, 9324834]``. If breaks is \"fd\", the MAD is used over the IQR in computing bin width.\n :param bool plot: If True (default), then a plot will be generated using ``matplotlib``.\n\n :returns: If ``plot`` is False, return H2OFrame with these columns: breaks, counts, mids_true,\n mids, and density; otherwise this method draws a plot and returns nothing.\n \"\"\"\n server = kwargs.pop(\"server\") if \"server\" in kwargs else False\n assert_is_type(breaks, int, [numeric], Enum(\"sturges\", \"rice\", \"sqrt\", \"doane\", \"fd\", \"scott\"))\n assert_is_type(plot, bool)\n assert_is_type(server, bool)\n if kwargs:\n raise H2OValueError(\"Unknown parameters to hist(): %r\" % kwargs)\n hist = H2OFrame._expr(expr=ExprNode(\"hist\", self, breaks))._frame()\n\n if plot:\n try:\n import matplotlib\n if server:\n matplotlib.use(\"Agg\", warn=False)\n import matplotlib.pyplot as plt\n except ImportError:\n print(\"ERROR: matplotlib is required to make the histogram plot. \"\n \"Set `plot` to False, if a plot is not desired.\")\n return\n\n hist[\"widths\"] = hist[\"breaks\"].difflag1()\n # [2:] because we're removing the title and the first row (which consists of NaNs)\n lefts = [float(c[0]) for c in h2o.as_list(hist[\"breaks\"], use_pandas=False)[2:]]\n widths = [float(c[0]) for c in h2o.as_list(hist[\"widths\"], use_pandas=False)[2:]]\n counts = [float(c[0]) for c in h2o.as_list(hist[\"counts\"], use_pandas=False)[2:]]\n\n plt.xlabel(self.names[0])\n plt.ylabel(\"Frequency\")\n plt.title(\"Histogram of %s\" % self.names[0])\n plt.bar(left=lefts, width=widths, height=counts, bottom=0)\n if not server:\n plt.show()\n else:\n hist[\"density\"] = hist[\"counts\"] / (hist[\"breaks\"].difflag1() * hist[\"counts\"].sum())\n return hist\n\n\n def isax(self, num_words, max_cardinality, optimize_card=False, **kwargs):\n \"\"\"\n Compute the iSAX index for DataFrame which is assumed to be numeric time series data.\n\n References:\n\n - http://www.cs.ucr.edu/~eamonn/SAX.pdf\n - http://www.cs.ucr.edu/~eamonn/iSAX_2.0.pdf\n\n :param int num_words: Number of iSAX words for the timeseries, i.e. granularity along the time series\n :param int max_cardinality: Maximum cardinality of the iSAX word. Each word can have less than the max\n :param bool optimized_card: An optimization flag that will find the max cardinality regardless of what is\n passed in for ``max_cardinality``.\n\n :returns: An H2OFrame with the name of time series, string representation of iSAX word, followed by\n binary representation.\n \"\"\"\n if num_words <= 0: raise H2OValueError(\"num_words must be greater than 0\")\n if max_cardinality <= 0: raise H2OValueError(\"max_cardinality must be greater than 0\")\n return H2OFrame._expr(expr=ExprNode(\"isax\", self, num_words, max_cardinality, optimize_card))\n\n def pivot(self, index, column, value):\n \"\"\"\n Pivot the frame designated by the three columns: index, column, and value. Index and column should be\n of type enum, int, or time.\n For cases of multiple indexes for a column label, the aggregation method is to pick the first occurrence in the data frame\n\n :param index: Index is a column that will be the row label\n :param column: The labels for the columns in the pivoted Frame\n :param value: The column of values for the given index and column label\n :returns:\n \"\"\"\n assert_is_type(index, str)\n assert_is_type(column, str)\n assert_is_type(value, str)\n col_names = self.names\n if index not in col_names:\n raise H2OValueError(\"Index not in H2OFrame\")\n if column not in col_names:\n raise H2OValueError(\"Column not in H2OFrame\")\n if value not in col_names:\n raise H2OValueError(\"Value column not in H2OFrame\")\n if self.type(column) not in [\"enum\",\"time\",\"int\"]:\n raise H2OValueError(\"'column' argument is not type enum, time or int\")\n if self.type(index) not in [\"enum\",\"time\",\"int\"]:\n raise H2OValueError(\"'index' argument is not type enum, time or int\")\n return H2OFrame._expr(expr=ExprNode(\"pivot\",self,index,column,value))\n\n def topNBottomN(self, column=0, nPercent=10, grabTopN=-1):\n \"\"\"\n Given a column name or one column index, a percent N, this function will return the top or bottom N% of the\n values of the column of a frame. The column must be a numerical column.\n \n :param column: a string for column name or an integer index\n :param nPercent: a top or bottom percentage of the column values to return\n :param grabTopN: -1 to grab bottom N percent and 1 to grab top N percent\n :returns: a H2OFrame containing two columns. The first column contains the original row indices where\n the top/bottom values are extracted from. The second column contains the values.\n \"\"\"\n assert (nPercent >= 0) and (nPercent<=100.0), \"nPercent must be between 0.0 and 100.0\"\n assert round(nPercent*0.01*self.nrows)>0, \"Increase nPercent. Current value will result in top 0 row.\"\n\n if isinstance(column, int):\n if (column < 0) or (column>=self.ncols):\n raise H2OValueError(\"Invalid column index H2OFrame\")\n else:\n colIndex = column\n else: # column is a column name\n col_names = self.names\n if column not in col_names:\n raise H2OValueError(\"Column name not found H2OFrame\")\n else:\n colIndex = col_names.index(column)\n\n if not(self[colIndex].isnumeric()):\n raise H2OValueError(\"Wrong column type! Selected column must be numeric.\")\n\n return H2OFrame._expr(expr=ExprNode(\"topn\", self, colIndex, nPercent, grabTopN))\n\n def topN(self, column=0, nPercent=10):\n \"\"\"\n Given a column name or one column index, a percent N, this function will return the top N% of the values\n of the column of a frame. The column must be a numerical column.\n \n :param column: a string for column name or an integer index\n :param nPercent: a top percentage of the column values to return\n :returns: a H2OFrame containing two columns. The first column contains the original row indices where\n the top values are extracted from. The second column contains the top nPercent values.\n \"\"\"\n return self.topNBottomN(column, nPercent, 1)\n\n def bottomN(self, column=0, nPercent=10):\n \"\"\"\n Given a column name or one column index, a percent N, this function will return the bottom N% of the values\n of the column of a frame. The column must be a numerical column.\n \n :param column: a string for column name or an integer index\n :param nPercent: a bottom percentage of the column values to return\n :returns: a H2OFrame containing two columns. The first column contains the original row indices where\n the bottom values are extracted from. The second column contains the bottom nPercent values.\n \"\"\"\n return self.topNBottomN(column, nPercent, -1)\n\n def sub(self, pattern, replacement, ignore_case=False):\n \"\"\"\n Substitute the first occurrence of pattern in a string with replacement.\n\n :param str pattern: A regular expression.\n :param str replacement: A replacement string.\n :param bool ignore_case: If True then pattern will match case-insensitively.\n :returns: an H2OFrame with all values matching ``pattern`` replaced with ``replacement``.\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"replacefirst\", self, pattern, replacement, ignore_case))\n\n\n def gsub(self, pattern, replacement, ignore_case=False):\n \"\"\"\n Globally substitute occurrences of pattern in a string with replacement.\n\n :param str pattern: A regular expression.\n :param str replacement: A replacement string.\n :param bool ignore_case: If True then pattern will match case-insensitively.\n :returns: an H2OFrame with all occurrences of ``pattern`` in all values replaced with ``replacement``.\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"replaceall\", self, pattern, replacement, ignore_case))\n\n\n def interaction(self, factors, pairwise, max_factors, min_occurrence, destination_frame=None):\n \"\"\"\n Categorical Interaction Feature Creation in H2O.\n\n Creates a frame in H2O with n-th order interaction features between categorical columns, as specified by\n the user.\n\n :param factors: list of factor columns (either indices or column names).\n :param bool pairwise: Whether to create pairwise interactions between factors (otherwise create one\n higher-order interaction). Only applicable if there are 3 or more factors.\n :param int max_factors: Max. number of factor levels in pair-wise interaction terms (if enforced, one extra\n catch-all factor will be made).\n :param int min_occurrence: Min. occurrence threshold for factor levels in pair-wise interaction terms.\n :param str destination_frame: (internal) string indicating the key for the frame created.\n\n :returns: an H2OFrame\n \"\"\"\n return h2o.interaction(data=self, factors=factors, pairwise=pairwise, max_factors=max_factors,\n min_occurrence=min_occurrence, destination_frame=destination_frame)\n\n\n def toupper(self):\n \"\"\"\n Translate characters from lower to upper case for a particular column.\n\n :returns: new H2OFrame with all strings in the current frame converted to the uppercase.\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"toupper\", self), cache=self._ex._cache)\n\n def grep(self,pattern, ignore_case = False, invert = False, output_logical = False):\n \"\"\"\n Searches for matches to argument `pattern` within each element\n of a string column.\n\n Default behavior is to return indices of the elements matching the pattern. Parameter\n `output_logical` can be used to return a logical vector indicating if the element matches\n the pattern (1) or not (0).\n\n :param str pattern: A character string containing a regular expression.\n :param bool ignore_case: If True, then case is ignored during matching.\n :param bool invert: If True, then identify elements that do not match the pattern.\n :param bool output_logical: If True, then return logical vector of indicators instead of list of matching positions\n :return: H2OFrame holding the matching positions or a logical list if `output_logical` is enabled.\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"grep\", self, pattern, ignore_case, invert, output_logical))\n\n def tolower(self):\n \"\"\"\n Translate characters from upper to lower case for a particular column.\n\n :returns: new H2OFrame with all strings in the current frame converted to the lowercase.\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"tolower\", self), cache=self._ex._cache)\n\n\n def rep_len(self, length_out):\n \"\"\"\n Create a new frame replicating the current frame.\n\n If the source frame has a single column, then the new frame will be replicating rows and its dimensions\n will be ``length_out x 1``. However if the source frame has more than 1 column, then then new frame\n will be replicating data in columnwise direction, and its dimensions will be ``nrows x length_out``,\n where ``nrows`` is the number of rows in the source frame. Also note that if ``length_out`` is smaller\n than the corresponding dimension of the source frame, then the new frame will actually be a truncated\n version of the original.\n\n :param int length_out: Number of columns (rows) of the resulting H2OFrame\n :returns: new H2OFrame with repeated data from the current frame.\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"rep_len\", self, length_out))\n\n\n def scale(self, center=True, scale=True):\n \"\"\"\n Center and/or scale the columns of the current frame.\n\n :param center: If True, then demean the data. If False, no shifting is done. If ``center`` is a list of\n numbers then shift each column by the corresponding amount.\n :param scale: If True, then scale the data by each column's standard deviation. If False, no scaling\n is done. If ``scale`` is a list of numbers, then scale each column by the requested amount.\n :returns: an H2OFrame with scaled values from the current frame.\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"scale\", self, center, scale), cache=self._ex._cache)\n\n\n def signif(self, digits=6):\n \"\"\"\n Round doubles/floats to the given number of significant digits.\n\n :param int digits: Number of significant digits to retain.\n :returns: new H2OFrame with rounded values from the original frame.\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"signif\", self, digits), cache=self._ex._cache)\n\n\n def round(self, digits=0):\n \"\"\"\n Round doubles/floats to the given number of decimal places.\n\n :param int digits: The number of decimal places to retain. Rounding to a negative number of decimal places is\n not supported. For rounding we use the \"round half to even\" mode (IEC 60559 standard), so that\n ``round(2.5) = 2`` and ``round(3.5) = 4``.\n :returns: new H2OFrame with rounded values from the original frame.\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"round\", self, digits), cache=self._ex._cache)\n\n\n def asnumeric(self):\n \"\"\"Return new frame with all columns converted to numeric.\"\"\"\n fr = H2OFrame._expr(expr=ExprNode(\"as.numeric\", self), cache=self._ex._cache)\n if fr._ex._cache.types_valid():\n fr._ex._cache.types = {k: \"real\" for k in fr._ex._cache.types.keys()}\n return fr\n\n\n def ascharacter(self):\n \"\"\"\n Convert all columns in the frame into strings.\n\n :returns: new H2OFrame with columns of \"string\" type.\n \"\"\"\n fr = H2OFrame._expr(expr=ExprNode(\"as.character\", self), cache=self._ex._cache)\n if fr._ex._cache.types_valid():\n fr._ex._cache.types = {k: \"string\" for k in fr._ex._cache.types.keys()}\n return fr\n\n\n def na_omit(self):\n \"\"\"\n Remove rows with NAs from the H2OFrame.\n\n :returns: new H2OFrame with all rows from the original frame containing any NAs removed.\n \"\"\"\n fr = H2OFrame._expr(expr=ExprNode(\"na.omit\", self), cache=self._ex._cache)\n fr._ex._cache.nrows = -1\n return fr\n\n\n def difflag1(self):\n \"\"\"\n Conduct a diff-1 transform on a numeric frame column.\n\n :returns: an H2OFrame where each element is equal to the corresponding element in the source\n frame minus the previous-row element in the same frame.\n \"\"\"\n fr = H2OFrame._expr(expr=ExprNode(\"difflag1\", self), cache=self._ex._cache)\n return fr\n\n\n def isna(self):\n \"\"\"\n For each element in an H2OFrame, determine if it is NA or not.\n\n :returns: an H2OFrame of 1s and 0s, where 1s mean the values were NAs.\n \"\"\"\n fr = H2OFrame._expr(expr=ExprNode(\"is.na\", self))\n fr._ex._cache.nrows = self._ex._cache.nrows\n fr._ex._cache.ncols = self._ex._cache.ncols\n if self._ex._cache.names:\n fr._ex._cache.names = [\"isNA(%s)\" % n for n in self._ex._cache.names]\n fr._ex._cache.types = {\"isNA(%s)\" % n: \"int\" for n in self._ex._cache.names}\n return fr\n\n\n def year(self):\n \"\"\"\n Extract the \"year\" part from a date column.\n\n :returns: a single-column H2OFrame containing the \"year\" part from the source frame.\n \"\"\"\n fr = H2OFrame._expr(expr=ExprNode(\"year\", self), cache=self._ex._cache)\n if fr._ex._cache.types_valid():\n fr._ex._cache.types = {k: \"int\" for k in self._ex._cache.types.keys()}\n return fr\n\n\n def month(self):\n \"\"\"\n Extract the \"month\" part from a date column.\n\n :returns: a single-column H2OFrame containing the \"month\" part from the source frame.\n \"\"\"\n fr = H2OFrame._expr(expr=ExprNode(\"month\", self), cache=self._ex._cache)\n if fr._ex._cache.types_valid():\n fr._ex._cache.types = {k: \"int\" for k in self._ex._cache.types.keys()}\n return fr\n\n\n def week(self):\n \"\"\"\n Extract the \"week\" part from a date column.\n\n :returns: a single-column H2OFrame containing the \"week\" part from the source frame.\n \"\"\"\n fr = H2OFrame._expr(expr=ExprNode(\"week\", self), cache=self._ex._cache)\n if fr._ex._cache.types_valid():\n fr._ex._cache.types = {k: \"int\" for k in self._ex._cache.types.keys()}\n return fr\n\n\n def day(self):\n \"\"\"\n Extract the \"day\" part from a date column.\n\n :returns: a single-column H2OFrame containing the \"day\" part from the source frame.\n \"\"\"\n fr = H2OFrame._expr(expr=ExprNode(\"day\", self), cache=self._ex._cache)\n if fr._ex._cache.types_valid():\n fr._ex._cache.types = {k: \"int\" for k in self._ex._cache.types.keys()}\n return fr\n\n\n def dayOfWeek(self):\n \"\"\"\n Extract the \"day-of-week\" part from a date column.\n\n :returns: a single-column H2OFrame containing the \"day-of-week\" part from the source frame.\n \"\"\"\n fr = H2OFrame._expr(expr=ExprNode(\"dayOfWeek\", self), cache=self._ex._cache)\n if fr._ex._cache.types_valid():\n fr._ex._cache.types = {k: \"int\" for k in self._ex._cache.types.keys()}\n return fr\n\n\n def hour(self):\n \"\"\"\n Extract the \"hour-of-day\" part from a date column.\n\n :returns: a single-column H2OFrame containing the \"hour-of-day\" part from the source frame.\n \"\"\"\n fr = H2OFrame._expr(expr=ExprNode(\"hour\", self), cache=self._ex._cache)\n if fr._ex._cache.types_valid():\n fr._ex._cache.types = {k: \"int\" for k in self._ex._cache.types.keys()}\n return fr\n\n\n def minute(self):\n \"\"\"\n Extract the \"minute\" part from a date column.\n\n :returns: a single-column H2OFrame containing the \"minute\" part from the source frame.\n \"\"\"\n fr = H2OFrame._expr(expr=ExprNode(\"minute\", self), cache=self._ex._cache)\n if fr._ex._cache.types_valid():\n fr._ex._cache.types = {k: \"int\" for k in self._ex._cache.types.keys()}\n return fr\n\n\n def second(self):\n \"\"\"\n Extract the \"second\" part from a date column.\n\n :returns: a single-column H2OFrame containing the \"second\" part from the source frame.\n \"\"\"\n fr = H2OFrame._expr(expr=ExprNode(\"second\", self), cache=self._ex._cache)\n if fr._ex._cache.types_valid():\n fr._ex._cache.types = {k: \"int\" for k in self._ex._cache.types.keys()}\n return fr\n\n\n def runif(self, seed=None):\n \"\"\"\n Generate a column of random numbers drawn from a uniform distribution [0,1) and\n having the same data layout as the source frame.\n\n :param int seed: seed for the random number generator.\n\n :returns: Single-column H2OFrame filled with doubles sampled uniformly from [0,1).\n \"\"\"\n fr = H2OFrame._expr(expr=ExprNode(\"h2o.runif\", self, -1 if seed is None else seed))\n fr._ex._cache.ncols = 1\n fr._ex._cache.nrows = self.nrow\n return fr\n\n\n def stratified_split(self, test_frac=0.2, seed=-1):\n \"\"\"\n Construct a column that can be used to perform a random stratified split.\n\n :param float test_frac: The fraction of rows that will belong to the \"test\".\n :param int seed: The seed for the random number generator.\n\n :returns: an H2OFrame having single categorical column with two levels: ``\"train\"`` and ``\"test\"``.\n\n :examples:\n >>> stratsplit = df[\"y\"].stratified_split(test_frac=0.3, seed=12349453)\n >>> train = df[stratsplit==\"train\"]\n >>> test = df[stratsplit==\"test\"]\n >>>\n >>> # check that the distributions among the initial frame, and the\n >>> # train/test frames match\n >>> df[\"y\"].table()[\"Count\"] / df[\"y\"].table()[\"Count\"].sum()\n >>> train[\"y\"].table()[\"Count\"] / train[\"y\"].table()[\"Count\"].sum()\n >>> test[\"y\"].table()[\"Count\"] / test[\"y\"].table()[\"Count\"].sum()\n \"\"\"\n return H2OFrame._expr(expr=ExprNode('h2o.random_stratified_split', self, test_frac, seed))\n\n\n def match(self, table, nomatch=0):\n \"\"\"\n Make a vector of the positions of (first) matches of its first argument in its second.\n\n Only applicable to single-column categorical/string frames.\n\n :param List table: the list of items to match against\n :param int nomatch: value that should be returned when there is no match.\n :returns: a new H2OFrame containing for each cell from the source frame the index where\n the pattern ``table`` first occurs within that cell.\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"match\", self, table, nomatch, None))\n\n\n def cut(self, breaks, labels=None, include_lowest=False, right=True, dig_lab=3):\n \"\"\"\n Cut a numeric vector into categorical \"buckets\".\n\n This method is only applicable to a single-column numeric frame.\n\n :param List[float] breaks: The cut points in the numeric vector.\n :param List[str] labels: Labels for categorical levels produced. Defaults to set notation of\n intervals defined by the breaks.\n :param bool include_lowest: By default, cuts are defined as intervals ``(lo, hi]``. If this parameter\n is True, then the interval becomes ``[lo, hi]``.\n :param bool right: Include the high value: ``(lo, hi]``. If False, get ``(lo, hi)``.\n :param int dig_lab: Number of digits following the decimal point to consider.\n\n :returns: Single-column H2OFrame of categorical data.\n \"\"\"\n assert_is_type(breaks, [numeric])\n if self.ncols != 1: raise H2OValueError(\"Single-column frame is expected\")\n if self.types[self.names[0]] not in {\"int\", \"real\"}: raise H2OValueError(\"A numeric column is expected\")\n fr = H2OFrame._expr(expr=ExprNode(\"cut\", self, breaks, labels, include_lowest, right, dig_lab),\n cache=self._ex._cache)\n fr._ex._cache.types = {k: \"enum\" for k in self.names}\n return fr\n\n\n def which(self):\n \"\"\"\n Compose the list of row indices for which the frame contains non-zero values.\n\n Only applicable to integer single-column frames.\n Equivalent to comprehension ``[index for index, value in enumerate(self) if value]``.\n\n :returns: a new single-column H2OFrame containing indices of those rows in the original frame\n that contained non-zero values.\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"which\", self))\n\n def idxmax(self,skipna=True, axis=0):\n \"\"\"\n Get the index of the max value in a column or row\n\n :param bool skipna: If True (default), then NAs are ignored during the search. Otherwise presence\n of NAs renders the entire result NA.\n :param int axis: Direction of finding the max index. If 0 (default), then the max index is searched columnwise, and the\n result is a frame with 1 row and number of columns as in the original frame. If 1, then the max index is searched\n rowwise and the result is a frame with 1 column, and number of rows equal to the number of rows in the original frame.\n :returns: either a list of max index values per-column or an H2OFrame containing max index values\n per-row from the original frame.\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"which.max\", self, skipna, axis))\n\n def idxmin(self,skipna=True, axis=0):\n \"\"\"\n Get the index of the min value in a column or row\n\n :param bool skipna: If True (default), then NAs are ignored during the search. Otherwise presence\n of NAs renders the entire result NA.\n :param int axis: Direction of finding the min index. If 0 (default), then the min index is searched columnwise, and the\n result is a frame with 1 row and number of columns as in the original frame. If 1, then the min index is searched\n rowwise and the result is a frame with 1 column, and number of rows equal to the number of rows in the original frame.\n :returns: either a list of min index values per-column or an H2OFrame containing min index values\n per-row from the original frame.\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"which.min\", self, skipna, axis))\n\n\n def ifelse(self, yes, no):\n \"\"\"\n Equivalent to ``[y if t else n for t,y,n in zip(self,yes,no)]``.\n\n Based on the booleans in the test vector, the output has the values of the\n yes and no vectors interleaved (or merged together). All Frames must have\n the same row count. Single column frames are broadened to match wider\n Frames. Scalars are allowed, and are also broadened to match wider frames.\n\n :param yes: Frame to use if ``test`` is true; may be a scalar or single column\n :param no: Frame to use if ``test`` is false; may be a scalar or single column\n\n :returns: an H2OFrame of the merged yes/no frames/scalars according to the test input frame.\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"ifelse\", self, yes, no))\n\n\n def apply(self, fun=None, axis=0):\n \"\"\"\n Apply a lambda expression to an H2OFrame.\n\n :param fun: a lambda expression to be applied per row or per column.\n :param axis: 0 = apply to each column; 1 = apply to each row\n :returns: a new H2OFrame with the results of applying ``fun`` to the current frame.\n \"\"\"\n from .astfun import _bytecode_decompile_lambda\n assert_is_type(axis, 0, 1)\n assert_is_type(fun, FunctionType)\n assert_satisfies(fun, fun.__name__ == \"<lambda>\")\n res = _bytecode_decompile_lambda(fun.__code__)\n return H2OFrame._expr(expr=ExprNode(\"apply\", self, 1 + (axis == 0), *res))\n\n\n #-------------------------------------------------------------------------------------------------------------------\n # Synonyms + Deprecated\n #-------------------------------------------------------------------------------------------------------------------\n # Here we have all methods that are provided as alternative names to some other names defined above. This also\n # includes methods that we rename as part of the deprecation process (but keeping the old name for the sake of\n # backward compatibility). We gather them all down here to have a slightly cleaner code.\n\n @staticmethod\n def mktime(year=1970, month=0, day=0, hour=0, minute=0, second=0, msec=0):\n \"\"\"\n Deprecated, use :func:`moment` instead.\n\n This function was left for backward-compatibility purposes only. It is\n not very stable, and counterintuitively uses 0-based months and days,\n so \"January 4th, 2001\" should be entered as ``mktime(2001, 0, 3)``.\n \"\"\"\n return H2OFrame._expr(ExprNode(\"mktime\", year, month, day, hour, minute, second, msec))\n\n @property\n def columns(self):\n \"\"\"Same as ``self.names``.\"\"\"\n return self.names\n\n @columns.setter\n def columns(self, value):\n self.set_names(value)\n\n @property\n def col_names(self):\n \"\"\"Same as ``self.names``.\"\"\"\n return self.names\n\n @col_names.setter\n def col_names(self, value):\n self.set_names(value)\n\n def __len__(self):\n \"\"\"Number of rows in the dataframe, same as ``self.nrows``.\"\"\"\n return self.nrows\n\n @property\n def nrow(self):\n \"\"\"Same as ``self.nrows``.\"\"\"\n return self.nrows\n\n @property\n def ncol(self):\n \"\"\"Same as ``self.ncols``.\"\"\"\n return self.ncols\n\n @property\n def dim(self):\n \"\"\"Same as ``list(self.shape)``.\"\"\"\n return [self.nrow, self.ncol]\n\n #@property\n #def frame_id(self):\n # \"\"\"Same as ``frame.id``.\"\"\"\n # return self.id\n\n #@frame_id.setter\n #def frame_id(self, value):\n # self.id = value\n\n @staticmethod\n def from_python(python_obj, destination_frame=None, header=0, separator=\",\", column_names=None,\n column_types=None, na_strings=None):\n \"\"\"[DEPRECATED] Use constructor ``H2OFrame()`` instead.\"\"\"\n return H2OFrame(python_obj, destination_frame, header, separator, column_names, column_types,\n na_strings)\n\n\n def ischaracter(self):\n \"\"\"[DEPRECATED] Use ``frame.isstring()``.\"\"\"\n return self.isstring()\n\n\n\n#-----------------------------------------------------------------------------------------------------------------------\n# Helpers\n#-----------------------------------------------------------------------------------------------------------------------\n\ndef _getValidCols(by_idx, fr): # so user can input names of the columns as well is idx num\n tmp = []\n for i in by_idx:\n if type(i) == str:\n if i not in fr.names:\n raise H2OValueError(\"Column: \" + i + \" not in frame.\")\n tmp.append(fr.names.index(i))\n elif type(i) != int:\n raise H2OValueError(\"Join on column: \" + i + \" not of type int\")\n else:\n tmp.append(i)\n return list(set(tmp))\n\ndef _binop(lhs, op, rhs, rtype=None):\n assert_is_type(lhs, str, numeric, datetime.date, pandas_timestamp, numpy_datetime, H2OFrame)\n assert_is_type(rhs, str, numeric, datetime.date, pandas_timestamp, numpy_datetime, H2OFrame)\n if isinstance(lhs, H2OFrame) and isinstance(rhs, H2OFrame) and lhs._is_frame and rhs._is_frame:\n lrows, lcols = lhs.shape\n rrows, rcols = rhs.shape\n compatible = ((lcols == rcols and lrows == rrows) or\n (lcols == 1 and lrows == rrows) or\n (lcols == 1 and lrows == 1) or\n (rcols == 1 and lrows == rrows) or\n (rcols == 1 and rrows == 1) or\n (lrows == 1 and lcols == rcols) or\n (rrows == 1 and lcols == rcols)\n )\n if not compatible:\n raise H2OValueError(\"Attempting to operate on incompatible frames: (%d x %d) and (%d x %d)\"\n % (lrows, lcols, rrows, rcols))\n\n if is_type(lhs, pandas_timestamp, numpy_datetime, datetime.date):\n lhs = H2OFrame.moment(date=lhs)\n if is_type(rhs, pandas_timestamp, numpy_datetime, datetime.date):\n rhs = H2OFrame.moment(date=rhs)\n\n cache = lhs._ex._cache if isinstance(lhs, H2OFrame) else rhs._ex._cache\n res = H2OFrame._expr(expr=ExprNode(op, lhs, rhs), cache=cache)\n if rtype is not None and res._ex._cache._names is not None:\n res._ex._cache._types = {name: rtype for name in res._ex._cache._names}\n return res\n"
] |
[
[
"scipy.sparse.issparse",
"matplotlib.pyplot.title",
"scipy.sparse.find",
"matplotlib.use",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
alibaba/Retrieval-based-Pre-training-for-Machine-Reading-Comprehension
|
[
"9ccad31bd0bf2216004cf729d1d511fc3e0b77c9"
] |
[
"general_util/utils.py"
] |
[
"import collections\r\nimport json\r\nimport math\r\nimport random\r\nimport re\r\nimport string\r\nimport torch\r\nfrom collections import Counter\r\nfrom torch.nn.functional import softmax\r\nfrom typing import List, Callable, Tuple, Any, Optional\r\n\r\ntry:\r\n from pytorch_pretrained_bert.tokenization import BasicTokenizer\r\nexcept ImportError:\r\n from transformers import BasicTokenizer\r\n\r\n# Named Turple List\r\nDocSpan = collections.namedtuple(\"DocSpan\", [\"start\", \"length\"])\r\n\r\n\r\ndef add_sentence_separator(doc_tokens: List[str], sentence_span_list: List[Tuple[int, int]], separator: str = '[SEP]'):\r\n new_doc_tokens = []\r\n separator_positions = []\r\n new_sentence_span_list = []\r\n for sen_idx, (span_start, span_end) in enumerate(sentence_span_list):\r\n new_doc_tokens.extend(doc_tokens[span_start: span_end + 1])\r\n if sen_idx != 0:\r\n span_start = span_start - 1\r\n new_sentence_span_list.append((span_start, span_end))\r\n separator_positions.append(len(new_doc_tokens))\r\n new_doc_tokens.append(separator)\r\n return new_doc_tokens, separator_positions[:-1], new_sentence_span_list\r\n\r\n\r\n# def set_random_seed(seed: int = None):\r\n# random.seed(seed)\r\n\r\ndef remove_all_evidence(sentence_span_list, doc_tokens, evidences):\r\n evidences.sort(reverse=False)\r\n for index, evidence in enumerate(evidences):\r\n evi_token_s, evi_token_e = sentence_span_list[evidence]\r\n doc_tokens = doc_tokens[:evi_token_s] + doc_tokens[(evi_token_e + 1):]\r\n reduce_offset = evi_token_e - evi_token_s + 1\r\n sentence_span_list = sentence_span_list[:evidence] + [(s - reduce_offset, e - reduce_offset)\r\n for s, e in sentence_span_list[(evidence + 1):]]\r\n for pointer in range(index + 1, len(evidences)):\r\n evidences[pointer] -= 1\r\n return doc_tokens, sentence_span_list\r\n\r\n\r\ndef generate_random_seq(seq_len_a: int, seq_len_b: int):\r\n seq_a = [0] * seq_len_a\r\n seq_b = [1] * seq_len_b\r\n seq = seq_a + seq_b\r\n\r\n # _set_random_seed(seed)\r\n random.shuffle(seq)\r\n return seq\r\n\r\n\r\ndef random_sample(seq, sample_length: int):\r\n # _set_random_seed(seed)\r\n return random.sample(seq, sample_length)\r\n\r\n\r\ndef generate_seq_with_negative_sample(initial_seq: List[Any], negative_seq: List[Any], sample_ratio: float,\r\n target_index: int = -1):\r\n sampling_length = int(len(initial_seq) * sample_ratio)\r\n negative_samples = random_sample(negative_seq, sampling_length)\r\n random_new_seq_label = generate_random_seq(len(initial_seq), sampling_length)\r\n random_new_seq = []\r\n new_target_index = -1\r\n positive_pointer = 0\r\n negative_pointer = 0\r\n orig_token_map = []\r\n\r\n orig_total_tokens = 0\r\n new_total_tokens = 0\r\n for idx, num in enumerate(random_new_seq_label):\r\n if num == 0:\r\n for i in range(len(initial_seq[positive_pointer])):\r\n orig_token_map.append(new_total_tokens + i)\r\n orig_total_tokens += len(initial_seq[positive_pointer])\r\n new_total_tokens += len(initial_seq[positive_pointer])\r\n\r\n random_new_seq.append(initial_seq[positive_pointer])\r\n if new_target_index == -1 and positive_pointer == target_index:\r\n new_target_index = len(random_new_seq) - 1\r\n positive_pointer += 1\r\n\r\n else:\r\n new_total_tokens += len(negative_samples[negative_pointer])\r\n random_new_seq.append(negative_samples[negative_pointer])\r\n negative_pointer += 1\r\n\r\n random_new_tokens = []\r\n sentence_span_list = []\r\n for sentence in random_new_seq:\r\n start = len(random_new_tokens)\r\n end = start + len(sentence) - 1\r\n sentence_span_list.append((start, end))\r\n random_new_tokens.extend(sentence)\r\n\r\n assert len(sentence_span_list) == len(random_new_seq)\r\n assert len(sentence_span_list) == len(random_new_seq_label)\r\n\r\n return random_new_tokens, random_new_seq_label, new_target_index, sentence_span_list, orig_token_map\r\n\r\n\r\ndef normalize_answer(s):\r\n \"\"\"Lower text and remove punctuation, articles and extra whitespace.\"\"\"\r\n\r\n def remove_articles(text):\r\n return re.sub(r'\\b(a|an|the)\\b', ' ', text)\r\n\r\n def white_space_fix(text):\r\n return ' '.join(text.split())\r\n\r\n def remove_punc(text):\r\n exclude = set(string.punctuation)\r\n return ''.join(ch for ch in text if ch not in exclude)\r\n\r\n def lower(text):\r\n return text.lower()\r\n\r\n return white_space_fix(remove_articles(remove_punc(lower(s))))\r\n\r\n\r\ndef is_whitespace(c):\r\n if c == \" \" or c == \"\\t\" or c == \"\\r\" or c == \"\\n\" or ord(c) == 0x202F:\r\n return True\r\n return False\r\n\r\n\r\ndef is_punctuation(c):\r\n # Don't contains '-' compared with string.punctuation\r\n punc = ['!', '\"', '#', '$', '%', '&', \"'\", '(', ')', '*', '+', ',', '-', '.', '/',\r\n ':', ';', '<', '=', '>', '?', '@', '[', '\\\\', ']', '^', '_', '`', '{',\r\n '|', '}', '~']\r\n if c in punc:\r\n return True\r\n return False\r\n\r\n\r\ndef split_sentence(context, sen_tokenizer):\r\n sentences = sen_tokenizer.tokenize(context)\r\n sen_start_list = []\r\n sen_end_list = []\r\n for sen in sentences:\r\n s = context.find(sen)\r\n assert s != -1\r\n e = s + len(sen) - 1\r\n sen_start_list.append(s)\r\n sen_end_list.append(e)\r\n return sen_start_list, sen_end_list\r\n\r\n\r\ndef improve_answer_span(doc_tokens, input_start, input_end, tokenizer,\r\n orig_answer_text):\r\n \"\"\"Returns tokenized answer spans that better match the annotated answer.\"\"\"\r\n\r\n # The SQuAD annotations are character based. We first project them to\r\n # whitespace-tokenized words. But then after WordPiece tokenization, we can\r\n # often find a \"better match\". For example:\r\n #\r\n # Question: What year was John Smith born?\r\n # Context: The leader was John Smith (1895-1943).\r\n # Answer: 1895\r\n #\r\n # The original whitespace-tokenized answer will be \"(1895-1943).\". However\r\n # after tokenization, our tokens will be \"( 1895 - 1943 ) .\". So we can match\r\n # the exact answer, 1895.\r\n #\r\n # However, this is not always possible. Consider the following:\r\n #\r\n # Question: What country is the top exporter of electornics?\r\n # Context: The Japanese electronics industry is the lagest in the world.\r\n # Answer: Japan\r\n #\r\n # In this case, the annotator chose \"Japan\" as a character sub-span of\r\n # the word \"Japanese\". Since our WordPiece tokenizer does not split\r\n # \"Japanese\", we just use \"Japanese\" as the annotation. This is fairly rare\r\n # in SQuAD, but does happen.\r\n tok_answer_text = \" \".join(tokenizer.tokenize(orig_answer_text))\r\n\r\n for new_start in range(input_start, input_end + 1):\r\n for new_end in range(input_end, new_start - 1, -1):\r\n text_span = \" \".join(doc_tokens[new_start:(new_end + 1)])\r\n if text_span == tok_answer_text:\r\n return new_start, new_end\r\n\r\n return input_start, input_end\r\n\r\n\r\ndef check_is_max_context(doc_spans, cur_span_index, position):\r\n \"\"\"Check if this is the 'max context' doc span for the token.\"\"\"\r\n\r\n # Because of the sliding window approach taken to scoring documents, a single\r\n # token can appear in multiple documents. E.g.\r\n # Doc: the man went to the store and bought a gallon of milk\r\n # Span A: the man went to the\r\n # Span B: to the store and bought\r\n # Span C: and bought a gallon of\r\n # ...\r\n #\r\n # Now the word 'bought' will have two scores from spans B and C. We only\r\n # want to consider the score with \"maximum context\", which we define as\r\n # the *minimum* of its left and right context (the *sum* of left and\r\n # right context will always be the same, of course).\r\n #\r\n # In the example the maximum context for 'bought' would be span C since\r\n # it has 1 left context and 3 right context, while span B has 4 left context\r\n # and 0 right context.\r\n best_score = None\r\n best_span_index = None\r\n for (span_index, doc_span) in enumerate(doc_spans):\r\n end = doc_span.start + doc_span.length - 1\r\n if position < doc_span.start:\r\n continue\r\n if position > end:\r\n continue\r\n num_left_context = position - doc_span.start\r\n num_right_context = end - position\r\n score = min(num_left_context, num_right_context) + 0.01 * doc_span.length\r\n if best_score is None or score > best_score:\r\n best_score = score\r\n best_span_index = span_index\r\n\r\n return cur_span_index == best_span_index\r\n\r\n\r\ndef get_best_indexes(logits, n_best_size):\r\n \"\"\"Get the n-best logits from a list.\"\"\"\r\n index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)\r\n\r\n best_indexes = []\r\n for i in range(len(index_and_score)):\r\n if i >= n_best_size:\r\n break\r\n best_indexes.append(index_and_score[i][0])\r\n return best_indexes\r\n\r\n\r\ndef get_final_text(pred_text, orig_text, do_lower_case, logger, verbose_logging=False):\r\n \"\"\"Project the tokenized prediction back to the original text.\"\"\"\r\n\r\n # When we created the data, we kept track of the alignment between original\r\n # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So\r\n # now `orig_text` contains the span of our original text corresponding to the\r\n # span that we predicted.\r\n #\r\n # However, `orig_text` may contain extra characters that we don't want in\r\n # our prediction.\r\n #\r\n # For example, let's say:\r\n # pred_text = steve smith\r\n # orig_text = Steve Smith's\r\n #\r\n # We don't want to return `orig_text` because it contains the extra \"'s\".\r\n #\r\n # We don't want to return `pred_text` because it's already been normalized\r\n # (the SQuAD eval script also does punctuation stripping/lower casing but\r\n # our tokenizer does additional normalization like stripping accent\r\n # characters).\r\n #\r\n # What we really want to return is \"Steve Smith\".\r\n #\r\n # Therefore, we have to apply a semi-complicated alignment heruistic between\r\n # `pred_text` and `orig_text` to get a character-to-charcter alignment. This\r\n # can fail in certain cases in which case we just return `orig_text`.\r\n\r\n def _strip_spaces(text):\r\n ns_chars = []\r\n ns_to_s_map = collections.OrderedDict()\r\n for (i, c) in enumerate(text):\r\n if c == \" \":\r\n continue\r\n ns_to_s_map[len(ns_chars)] = i\r\n ns_chars.append(c)\r\n ns_text = \"\".join(ns_chars)\r\n return ns_text, ns_to_s_map\r\n\r\n # We first tokenize `orig_text`, strip whitespace from the result\r\n # and `pred_text`, and check if they are the same length. If they are\r\n # NOT the same length, the heuristic has failed. If they are the same\r\n # length, we assume the characters are one-to-one aligned.\r\n tokenizer = BasicTokenizer(do_lower_case=do_lower_case)\r\n\r\n tok_text = \" \".join(tokenizer.tokenize(orig_text))\r\n\r\n start_position = tok_text.find(pred_text)\r\n if start_position == -1:\r\n if verbose_logging:\r\n logger.info(\r\n \"Unable to find text: '%s' in '%s'\" % (pred_text, orig_text))\r\n return orig_text\r\n end_position = start_position + len(pred_text) - 1\r\n\r\n (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)\r\n (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)\r\n\r\n if len(orig_ns_text) != len(tok_ns_text):\r\n if verbose_logging:\r\n logger.info(\"Length not equal after stripping spaces: '%s' vs '%s'\",\r\n orig_ns_text, tok_ns_text)\r\n return orig_text\r\n\r\n # We then project the characters in `pred_text` back to `orig_text` using\r\n # the character-to-character alignment.\r\n tok_s_to_ns_map = {}\r\n for (i, tok_index) in tok_ns_to_s_map.items():\r\n tok_s_to_ns_map[tok_index] = i\r\n\r\n orig_start_position = None\r\n if start_position in tok_s_to_ns_map:\r\n ns_start_position = tok_s_to_ns_map[start_position]\r\n if ns_start_position in orig_ns_to_s_map:\r\n orig_start_position = orig_ns_to_s_map[ns_start_position]\r\n\r\n if orig_start_position is None:\r\n if verbose_logging:\r\n logger.info(\"Couldn't map start position\")\r\n return orig_text\r\n\r\n orig_end_position = None\r\n if end_position in tok_s_to_ns_map:\r\n ns_end_position = tok_s_to_ns_map[end_position]\r\n if ns_end_position in orig_ns_to_s_map:\r\n orig_end_position = orig_ns_to_s_map[ns_end_position]\r\n\r\n if orig_end_position is None:\r\n if verbose_logging:\r\n logger.info(\"Couldn't map end position\")\r\n return orig_text\r\n\r\n output_text = orig_text[orig_start_position:(orig_end_position + 1)]\r\n return output_text\r\n\r\n\r\ndef compute_softmax(scores):\r\n \"\"\"Compute softmax probability over raw logits.\"\"\"\r\n if not scores:\r\n return []\r\n\r\n max_score = None\r\n for score in scores:\r\n if max_score is None or score > max_score:\r\n max_score = score\r\n\r\n exp_scores = []\r\n total_sum = 0.0\r\n for score in scores:\r\n x = math.exp(score - max_score)\r\n exp_scores.append(x)\r\n total_sum += x\r\n\r\n probs = []\r\n for score in exp_scores:\r\n probs.append(score / total_sum)\r\n return probs\r\n\r\n\r\ndef find_evidence_sentence(sentence_span_list: List[Tuple], rationale_start_position: int, rationale_end_position: int):\r\n sentence_id = -1\r\n over_size = 0\r\n for sen_idx, (t_start, t_end) in enumerate(sentence_span_list):\r\n if t_end < rationale_start_position:\r\n continue\r\n if t_start > rationale_end_position:\r\n break\r\n if rationale_start_position <= t_end <= rationale_end_position:\r\n cur_size = t_end - max(rationale_start_position, t_start) + 1\r\n if cur_size > over_size:\r\n over_size = cur_size\r\n sentence_id = sen_idx\r\n elif rationale_start_position <= t_start <= rationale_end_position:\r\n cur_size = rationale_end_position - max(rationale_start_position, t_start) + 1\r\n if cur_size > over_size:\r\n over_size = cur_size\r\n sentence_id = sen_idx\r\n return sentence_id\r\n\r\n\r\ndef truncate_seq_pair(tokens_a, tokens_b, max_length):\r\n \"\"\"Truncates a sequence pair in place to the maximum length.\"\"\"\r\n\r\n # This is a simple heuristic which will always truncate the longer sequence\r\n # one token at a time. This makes more sense than truncating an equal percent\r\n # of tokens from each, since if one sequence is very short then each token\r\n # that's truncated likely contains more information than a longer sequence.\r\n while True:\r\n total_length = len(tokens_a) + len(tokens_b)\r\n if total_length <= max_length:\r\n break\r\n if len(tokens_a) > len(tokens_b):\r\n tokens_a.pop()\r\n else:\r\n tokens_b.pop()\r\n\r\n\r\nclass AverageMeter(object):\r\n \"\"\"Computes and stores the average and current value.\"\"\"\r\n\r\n def __init__(self):\r\n self.val = 0\r\n self.avg = 0\r\n self.sum = 0\r\n self.count = 0\r\n\r\n def reset(self):\r\n self.val = 0\r\n self.avg = 0\r\n self.sum = 0\r\n self.count = 0\r\n\r\n def update(self, val, n=1):\r\n if isinstance(val, torch.Tensor):\r\n val = val.item()\r\n if isinstance(n, torch.Tensor):\r\n n = n.item()\r\n\r\n self.val = val\r\n self.sum += val * n\r\n self.count += n\r\n self.avg = self.sum / self.count\r\n\r\n def save(self):\r\n return {\r\n 'val': self.val,\r\n 'avg': self.avg,\r\n 'sum': self.sum,\r\n 'count': self.count\r\n }\r\n\r\n def load(self, value: dict):\r\n if value is None:\r\n self.reset()\r\n self.val = value['val'] if 'val' in value else 0\r\n self.avg = value['avg'] if 'avg' in value else 0\r\n self.sum = value['sum'] if 'sum' in value else 0\r\n self.count = value['count'] if 'count' in value else 0\r\n\r\n\r\nclass LogMetric(object):\r\n \"\"\"\r\n Record all metrics for logging.\r\n \"\"\"\r\n\r\n def __init__(self, *metric_names):\r\n \r\n self.metrics = {\r\n key: AverageMeter() for key in metric_names\r\n }\r\n\r\n def update(self, metric_name, val, n=1):\r\n \r\n self.metrics[metric_name].update(val, n)\r\n\r\n def reset(self, metric_name=None):\r\n if metric_name is None:\r\n for key in self.metrics.keys():\r\n self.metrics[key].reset()\r\n return\r\n \r\n self.metrics[metric_name].reset()\r\n\r\n def get_log(self):\r\n \r\n log = {\r\n key: self.metrics[key].avg for key in self.metrics\r\n }\r\n return log\r\n\r\n\r\nclass CategoricalAccuracy(object):\r\n def __init__(self, label_list: List[str]):\r\n self.predictions = Counter()\r\n self.label_list = [label.lower() for label in label_list]\r\n self.reset()\r\n\r\n def reset(self):\r\n self.predictions.clear()\r\n\r\n @staticmethod\r\n def _get_key(gold, pred) -> str:\r\n return '{} - {}'.format(str(gold).lower(), str(pred).lower())\r\n\r\n @staticmethod\r\n def _split_key(key: str) -> (str, str):\r\n strs = key.split(' - ')\r\n return strs[0], strs[1]\r\n\r\n def update(self, gold, pred):\r\n self.predictions[self._get_key(gold, pred)] += 1\r\n\r\n def __repr__(self):\r\n return json.dumps(self.predictions, indent=2)\r\n\r\n def f1_measure(self, positive_label, negative_label):\r\n true_positive = self.predictions[self._get_key(positive_label, positive_label)]\r\n false_positive = self.predictions[self._get_key(negative_label, positive_label)]\r\n true_negative = self.predictions[self._get_key(negative_label, negative_label)]\r\n false_negative = self.predictions[self._get_key(positive_label, negative_label)]\r\n\r\n precision = float(true_positive) / float(true_positive + false_positive + 1e-13)\r\n recall = float(true_positive) / float(true_positive + false_negative + 1e-13)\r\n f1_measure = 2. * ((precision * recall) / (precision + recall + 1e-13))\r\n accuracy = 1.0 * (true_positive + true_negative) / (\r\n true_positive + true_negative + false_positive + false_negative)\r\n result = {'precision': precision, 'recall': recall, 'f1': f1_measure, 'accuracy': accuracy}\r\n return result\r\n\r\n def read_predictions(self, ground_truths, predictions):\r\n \"\"\"\r\n :param ground_truths: ground_truths[(story_id, qid)]=List[List[answer_text]]\r\n :param predictions: official format predictions\r\n :return:\r\n \"\"\"\r\n for pred in predictions:\r\n story_id = pred['id']\r\n turn_id = pred['turn_id']\r\n\r\n pred_text = CoQAEvaluator.normalize_answer(pred['answer'])\r\n gold_text = CoQAEvaluator.normalize_answer(ground_truths[(story_id, turn_id)][0])\r\n\r\n label_list = self.label_list\r\n if pred_text not in label_list:\r\n pred_label = 'not'\r\n else:\r\n pred_label = pred_text\r\n if gold_text not in label_list:\r\n gold_label = 'not'\r\n else:\r\n gold_label = gold_text\r\n\r\n self.update(gold_label, pred_label)\r\n\r\n\r\nclass AttentionWeightWriter(object):\r\n def __init__(self, log_file):\r\n self.log_file = open(log_file, 'w')\r\n\r\n def write_weights(self, attention_matrix: torch.Tensor, col_ids: torch.Tensor = None, row_ids: torch.Tensor = None,\r\n col_mask: torch.Tensor = None, row_mask: torch.Tensor = None, id_to_str: Callable = None,\r\n do_softmax: bool = False):\r\n\r\n attn_matrix = attention_matrix.detach().cpu()\r\n if do_softmax:\r\n attn_matrix = softmax(attn_matrix, dim=-1)\r\n else:\r\n attn_matrix.exp_()\r\n batch, len1, len2 = attn_matrix.size()\r\n if col_ids is not None:\r\n col_ids = col_ids.detach().cpu()\r\n if row_ids is not None:\r\n row_ids = row_ids.detach().cpu()\r\n if col_mask is None:\r\n col_mask = torch.zeros(batch, len1)\r\n else:\r\n col_mask = col_mask.detach().cpu()\r\n if row_mask is None:\r\n row_mask = torch.zeros(batch, len2)\r\n else:\r\n row_mask = row_mask.detach().cpu()\r\n\r\n for batch_id in range(batch):\r\n print('batch_id = {}\\t'.format(batch_id), file=self.log_file)\r\n row_is_null = []\r\n for j in range(len2):\r\n t_str = self.index_to_token(index=(batch_id, j), ids=row_ids, mask=row_mask, id_to_str=id_to_str)\r\n if t_str is None:\r\n row_is_null.append(True)\r\n continue\r\n else:\r\n row_is_null.append(False)\r\n print(t_str, end='\\t', file=self.log_file)\r\n print(file=self.log_file)\r\n for i in range(len1):\r\n col_t_str = self.index_to_token(index=(batch_id, i), ids=col_ids, mask=col_mask, id_to_str=id_to_str)\r\n if col_t_str is None:\r\n continue\r\n else:\r\n print(col_t_str, end='\\t', file=self.log_file)\r\n for j in range(len2):\r\n if row_is_null[j]:\r\n continue\r\n else:\r\n print(attn_matrix[batch_id, i, j].item(), end='\\t', file=self.log_file)\r\n print(file=self.log_file)\r\n print('======================', file=self.log_file)\r\n\r\n @staticmethod\r\n def index_to_token(index, ids: torch.Tensor, mask: torch.Tensor, id_to_str: Callable = None):\r\n if mask[index] == 1:\r\n return None\r\n else:\r\n if ids is None:\r\n token_id = index[-1]\r\n return token_id\r\n\r\n token_id = ids[index].item()\r\n if id_to_str is not None:\r\n return id_to_str(token_id)\r\n else:\r\n return token_id\r\n\r\n\r\nclass CategoricalAccuracyAllen(object):\r\n \"\"\"\r\n Categorical Top-K accuracy. Assumes integer labels, with\r\n each item to be classified having a single correct class.\r\n Tie break enables equal distribution of scores among the\r\n classes with same maximum predicted scores.\r\n \"\"\"\r\n\r\n def __init__(self, top_k: int = 1, tie_break: bool = False) -> None:\r\n if top_k > 1 and tie_break:\r\n raise RuntimeError(\"Tie break in Categorical Accuracy \"\r\n \"can be done only for maximum (top_k = 1)\")\r\n if top_k <= 0:\r\n raise RuntimeError(\"top_k passed to Categorical Accuracy must be > 0\")\r\n self._top_k = top_k\r\n self._tie_break = tie_break\r\n self.correct_count = 0.\r\n self.total_count = 0.\r\n\r\n def __call__(self,\r\n predictions: torch.Tensor,\r\n gold_labels: torch.Tensor,\r\n mask: Optional[torch.Tensor] = None):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n predictions : ``torch.Tensor``, required.\r\n A tensor of predictions of shape (batch_size, ..., num_classes).\r\n gold_labels : ``torch.Tensor``, required.\r\n A tensor of integer class label of shape (batch_size, ...). It must be the same\r\n shape as the ``predictions`` tensor without the ``num_classes`` dimension.\r\n mask: ``torch.Tensor``, optional (default = None).\r\n A masking tensor the same size as ``gold_labels``.\r\n \"\"\"\r\n predictions, gold_labels, mask = self.unwrap_to_tensors(predictions, gold_labels, mask)\r\n\r\n # Some sanity checks.\r\n num_classes = predictions.size(-1)\r\n if gold_labels.dim() != predictions.dim() - 1:\r\n raise RuntimeError(\"gold_labels must have dimension == predictions.size() - 1 but \"\r\n \"found tensor of shape: {}\".format(predictions.size()))\r\n if (gold_labels >= num_classes).any():\r\n raise RuntimeError(\"A gold label passed to Categorical Accuracy contains an id >= {}, \"\r\n \"the number of classes.\".format(num_classes))\r\n\r\n predictions = predictions.view((-1, num_classes))\r\n gold_labels = gold_labels.view(-1).long()\r\n if not self._tie_break:\r\n # Top K indexes of the predictions (or fewer, if there aren't K of them).\r\n # Special case topk == 1, because it's common and .max() is much faster than .topk().\r\n if self._top_k == 1:\r\n top_k = predictions.max(-1)[1].unsqueeze(-1)\r\n else:\r\n top_k = predictions.topk(min(self._top_k, predictions.shape[-1]), -1)[1]\r\n\r\n # This is of shape (batch_size, ..., top_k).\r\n correct = top_k.eq(gold_labels.unsqueeze(-1)).float()\r\n else:\r\n # prediction is correct if gold label falls on any of the max scores. distribute score by tie_counts\r\n max_predictions = predictions.max(-1)[0]\r\n max_predictions_mask = predictions.eq(max_predictions.unsqueeze(-1))\r\n # max_predictions_mask is (rows X num_classes) and gold_labels is (batch_size)\r\n # ith entry in gold_labels points to index (0-num_classes) for ith row in max_predictions\r\n # For each row check if index pointed by gold_label is was 1 or not (among max scored classes)\r\n correct = max_predictions_mask[torch.arange(gold_labels.numel()).long(), gold_labels].float()\r\n tie_counts = max_predictions_mask.sum(-1)\r\n correct /= tie_counts.float()\r\n correct.unsqueeze_(-1)\r\n\r\n if mask is not None:\r\n correct *= mask.view(-1, 1).float()\r\n self.total_count += mask.sum()\r\n else:\r\n self.total_count += gold_labels.numel()\r\n self.correct_count += correct.sum()\r\n\r\n def get_metric(self, reset: bool = False):\r\n \"\"\"\r\n Returns\r\n -------\r\n The accumulated accuracy.\r\n \"\"\"\r\n if self.total_count > 1e-12:\r\n accuracy = float(self.correct_count) / float(self.total_count)\r\n else:\r\n accuracy = 0.0\r\n if reset:\r\n self.reset()\r\n return accuracy\r\n\r\n def reset(self):\r\n self.correct_count = 0.0\r\n self.total_count = 0.0\r\n\r\n @staticmethod\r\n def unwrap_to_tensors(*tensors: torch.Tensor):\r\n \"\"\"\r\n If you actually passed gradient-tracking Tensors to a Metric, there will be\r\n a huge memory leak, because it will prevent garbage collection for the computation\r\n graph. This method ensures that you're using tensors directly and that they are on\r\n the CPU.\r\n \"\"\"\r\n return (x.detach().cpu() if isinstance(x, torch.Tensor) else x for x in tensors)\r\n"
] |
[
[
"torch.nn.functional.softmax",
"torch.zeros"
]
] |
dmitryrubtsov/Predictions-of-calls-in-Moscow-Megafon
|
[
"260bb49e859694d6a7c0dfb8cb13cd39d05ed597"
] |
[
"runner_predict_proba.py"
] |
[
"import os\nimport pickle\nimport time\n\nimport pandas as pd\n\nfrom sklearn.base import BaseEstimator, TransformerMixin\n\n\nclass ColumnSelector(BaseEstimator, TransformerMixin):\n def __init__(self, columns):\n self.columns = columns\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n assert isinstance(X, pd.DataFrame)\n\n try:\n return X[self.columns]\n except KeyError:\n cols_error = list(set(self.columns) - set(X.columns))\n raise KeyError(\n f'DataFrame does not contain the following columns: {cols_error}')\n\n\nclass AddFeatures(BaseEstimator, TransformerMixin):\n def __init__(self, features, silent=True):\n self.features = features\n self.silent = silent\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n if not self.silent:\n start_t = time.time()\n print('Start adding features'.center(100, '*'))\n assert isinstance(X, pd.DataFrame), 'This is not a pandas dataframe'\n\n X_features = self.features.loc[self.features.index.isin(\n X.index.unique())]\n\n X_features = X_features.sort_values('buy_time') \\\n .groupby('id').last()\n\n X_merge = X.reset_index() \\\n .merge(X_features.reset_index(), on=X.index.name, how='left', suffixes=('_train', '_features')) \\\n .set_index(X.index.name)\n\n assert X_merge.shape[0] == X.shape[\n 0], f'Shapes of dataframe don\\'t match: {X_merge.shape[0]} and {X.shape[0]}'\n assert (X_merge.index == X.index).all(), 'Index Sort Error'\n if not self.silent:\n print(\n f'End adding features, run time: {time_format(time.time()-start_t)}'.center(100, '*'))\n print()\n\n return X_merge\n\n\nclass MemUseOptimizing(BaseEstimator, TransformerMixin):\n def __init__(self, silent=True):\n self.silent = silent\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n start_t = time.time()\n\n assert isinstance(X, pd.DataFrame), 'This is not a pandas dataframe'\n\n if not self.silent:\n print('Start of dataframe memory use optimizing'.center(100, '*'))\n start_memory_usage = X.memory_usage(deep=True).sum() / 1024**2\n\n X_dtype = pd.DataFrame(\n X.dtypes, columns=['dtype'], index=X.columns)\n\n X_dtype['min'] = X.select_dtypes(['int', 'float']).min()\n X_dtype['max'] = X.select_dtypes(['int', 'float']).max()\n X_dtype['is_int'] = ~(X.select_dtypes(['int', 'float']).astype(\n int).sum() - X.select_dtypes(['int', 'float']).sum()).astype('bool_')\n\n X_dtype.loc[(X_dtype['is_int'] == True), 'dtype'] = 'int64'\n X_dtype.loc[(X_dtype['is_int'] == True) & (X_dtype['min'] >= np.iinfo(\n 'int32').min) & (X_dtype['max'] <= np.iinfo('int32').max), 'dtype'] = 'int32'\n X_dtype.loc[(X_dtype['is_int'] == True) & (X_dtype['min'] >= np.iinfo(\n 'int16').min) & (X_dtype['max'] <= np.iinfo('int16').max), 'dtype'] = 'int16'\n X_dtype.loc[(X_dtype['is_int'] == True) & (X_dtype['min'] >= np.iinfo(\n 'int8').min) & (X_dtype['max'] <= np.iinfo('int8').max), 'dtype'] = 'int8'\n\n X_dtype.loc[(X_dtype['is_int'] == True) & (\n X_dtype['min'] >= np.iinfo('uint64').min), 'dtype'] = 'uint64'\n X_dtype.loc[(X_dtype['is_int'] == True) & (X_dtype['min'] >= np.iinfo(\n 'uint32').min) & (X_dtype['max'] <= np.iinfo('uint32').max), 'dtype'] = 'uint32'\n X_dtype.loc[(X_dtype['is_int'] == True) & (X_dtype['min'] >= np.iinfo(\n 'uint16').min) & (X_dtype['max'] <= np.iinfo('uint16').max), 'dtype'] = 'uint16'\n X_dtype.loc[(X_dtype['is_int'] == True) & (X_dtype['min'] >= np.iinfo(\n 'uint8').min) & (X_dtype['max'] <= np.iinfo('uint8').max), 'dtype'] = 'uint8'\n\n X_dtype.loc[(X_dtype['is_int'] == True) & (X_dtype['min'] == 0) & (\n X_dtype['max'] == 1), 'dtype'] = 'bool_'\n\n X_dtype.loc[(X_dtype['is_int'] == False), 'dtype'] = 'float64'\n X_dtype.loc[(X_dtype['is_int'] == False) & (X_dtype['min'] >= np.finfo(\n 'float32').min) & (X_dtype['max'] <= np.finfo('float32').max), 'dtype'] = 'float32'\n X_dtype.loc[(X_dtype['is_int'] == False) & (X_dtype['min'] >= np.finfo(\n 'float16').min) & (X_dtype['max'] <= np.finfo('float16').max), 'dtype'] = 'float16'\n\n for col in X.select_dtypes('object').columns:\n num_unique_values = len(X[col].unique())\n num_total_values = len(X[col])\n if num_unique_values / num_total_values < 0.5:\n X_dtype.loc[col, 'dtype'] = 'category'\n\n dtype = X_dtype['dtype'].to_dict()\n\n X = X.astype(dtype)\n\n if not self.silent:\n memory_usage = X.memory_usage(deep=True).sum() / 1024**2\n print('Memory use optimizing'.center(100, '*'))\n print(\n f'Memory usage of properties dataframe before optimizing: {start_memory_usage:.02f} MB')\n print(\n f'Memory usage of properties dataframe after optimizing: {memory_usage:.02f} MB')\n print(\n f'This is {100*memory_usage/start_memory_usage:.02f} % of the initial size')\n print(\n f'End of dataframe memory use optimizing, run time: {time_format(time.time()-start_t)}'.center(64, '*'))\n print()\n\n return X\n\n\nclass GetDate(BaseEstimator, TransformerMixin):\n def __init__(self, silent=True):\n self.silent = silent\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X):\n if not self.silent:\n start_t = time.time()\n print('Start geting date from timestamp'.center(100, '*'))\n if isinstance(X, pd.Series):\n X = pd.DataFrame(X)\n\n assert isinstance(\n X, pd.DataFrame), 'This is not a pandas dataframe or series'\n\n df = pd.DataFrame()\n\n for col in X.columns:\n df[f'{col}_day'] = pd.to_datetime(X[col], unit='s').dt.day\n df[f'{col}_month'] = pd.to_datetime(X[col], unit='s').dt.month\n df[f'{col}_week'] = pd.to_datetime(X[col], unit='s').dt.week\n\n if not self.silent:\n print(\n f'End geting date from timestamp, run time: {time_format(time.time()-start_t)}'.center(100, '*'))\n print()\n return df\n\n\nTARGET = 'target'\n\ndf = pd.read_csv('data_test.csv', index_col=[1]) \\\n .drop('Unnamed: 0', axis=1)\n\nwith open('model.pkl', 'rb') as f:\n model = pickle.load(f)\n\ndf[TARGET] = model.predict_proba(df)[:, 1]\n\ndf.to_csv('answers_test.csv')\n"
] |
[
[
"pandas.read_csv",
"pandas.to_datetime",
"pandas.DataFrame"
]
] |
prijatelj/bayesian_eval_ground_truth-free
|
[
"c0e569c78d63beb79f5e1e727c322293c3584323"
] |
[
"psych_metric/datasets/crowd_layer/convert_txt_to_csv_ner.py"
] |
[
"\"\"\"Converts given data txt file into a more parsable formatted csv.\n\"\"\"\nimport argparse\n\nimport numpy as np\nimport pandas as pd\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Convert the given ner-mturk txt file into an easier to parse csv file.')\n\n parser.add_argument('input_file', help='Enter the file path to the csv of author names')\n\n parser.add_argument('output_file', help='Enter the file path to the desired output directory')\n\n return parser.parse_args()\n\nif __name__ == '__main__':\n args = parse_args()\n\n txt = pd.read_csv(args.input_file, header=None, sep=' ', na_values='?', dtype=str, skip_blank_lines=False)\n # rename columns\n txt.columns = ['token'] + list(range(len(txt.columns) - 1))\n\n # add sequence column\n count = 0\n seq = np.empty(len(txt))\n for i in range(len(txt)):\n if txt.iloc[i].isna().all():\n seq[i] = np.nan\n count += 1\n else:\n seq[i] = count\n txt.insert(0, 'sequence', seq)\n\n # Remove all rows with only nas\n txt.dropna('index', 'all', inplace=True)\n\n # make sequence of dtype int\n txt['sequence'] = txt['sequence'].astype(int)\n\n # revert nas in token column to '?'\n txt['token'] = txt['token'].fillna('?')\n\n print(args.output_file)\n txt.to_csv(args.output_file, sep=' ', index=False, na_rep='NA')\n"
] |
[
[
"pandas.read_csv"
]
] |
tsudalab/rxngenerator
|
[
"6f459828c03485926adb390e5bfbd4a6d91de30b",
"6f459828c03485926adb390e5bfbd4a6d91de30b"
] |
[
"sample.py",
"bo/gauss.py"
] |
[
"import sys\nsys.path.append('./rxnft_vae')\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.optim.lr_scheduler as lr_scheduler\nfrom torch.utils.data import DataLoader\nfrom torch.autograd import Variable\n\nimport math, random, sys\nfrom optparse import OptionParser\nfrom collections import deque\n\nfrom reaction_utils import read_multistep_rxns\nfrom reaction import ReactionTree, extract_starting_reactants, StartingReactants, Templates, extract_templates,stats\nfrom fragment import FragmentVocab, FragmentTree, FragmentNode, can_be_decomposed\nfrom vae import FTRXNVAE, set_batch_nodeID\nfrom mpn import MPN,PP,Discriminator\nfrom evaluate import Evaluator\nimport random\n\n\nparser = OptionParser()\nparser.add_option(\"-w\", \"--hidden\", dest=\"hidden_size\", default=200)\nparser.add_option(\"-l\", \"--latent\", dest=\"latent_size\", default=50)\nparser.add_option(\"-d\", \"--depth\", dest=\"depth\", default=2)\nparser.add_option(\"-b\", \"--batch\", dest=\"batch_size\", default = 32)\nparser.add_option(\"-s\", \"--save_dir\", dest=\"save_path\")\nparser.add_option(\"-t\", \"--data_path\", dest=\"data_path\")\nparser.add_option(\"-v\", \"--vocab_path\", dest=\"vocab_path\")\nparser.add_option(\"-o\", \"--output_file\", dest=\"output_file\", default = \"Results/sampled_rxns.txt\")\n\n\nopts, _ = parser.parse_args()\n\n# get parameters\nbatch_size = int(opts.batch_size)\nhidden_size = int(opts.hidden_size)\nlatent_size = int(opts.latent_size)\ndepth = int(opts.depth)\nvocab_path = opts.vocab_path\ndata_filename = opts.data_path\nw_save_path = opts.save_path\noutput_file = opts.output_file\n\nif torch.cuda.is_available():\n\t#device = torch.device(\"cuda:1\")\n\tdevice = torch.device(\"cuda\")\n\ttorch.cuda.set_device(1)\nelse:\n\tdevice = torch.device(\"cpu\")\n\n\nprint(\"hidden size:\", hidden_size, \"latent_size:\", latent_size, \"depth:\", depth)\nprint(\"loading data.....\")\ndata_filename = opts.data_path\nroutes, scores = read_multistep_rxns(data_filename)\nrxn_trees = [ReactionTree(route) for route in routes]\nmolecules = [rxn_tree.molecule_nodes[0].smiles for rxn_tree in rxn_trees]\nreactants = extract_starting_reactants(rxn_trees)\ntemplates, n_reacts = extract_templates(rxn_trees)\nreactantDic = StartingReactants(reactants)\ntemplateDic = Templates(templates, n_reacts)\n\nprint(\"size of reactant dic:\", reactantDic.size())\nprint(\"size of template dic:\", templateDic.size())\n#print(templateDic.template_list)\n\nn_pairs = len(routes)\nind_list = [i for i in range(n_pairs)]\nfgm_trees = [FragmentTree(rxn_trees[i].molecule_nodes[0].smiles) for i in ind_list]\nrxn_trees = [rxn_trees[i] for i in ind_list]\ndata_pairs=[]\nfor fgm_tree, rxn_tree in zip(fgm_trees, rxn_trees):\n\tdata_pairs.append((fgm_tree, rxn_tree))\ncset=set()\nfor fgm_tree in fgm_trees:\n\tfor node in fgm_tree.nodes:\n\t\tcset.add(node.smiles)\ncset = list(cset)\nif vocab_path is None:\n\tfragmentDic = FragmentVocab(cset)\nelse:\n\tfragmentDic = FragmentVocab(cset, filename =vocab_path)\n\nprint(\"size of fragment dic:\", fragmentDic.size())\n\n\n\n# loading model\n\nmpn = MPN(hidden_size, depth)\nmodel = FTRXNVAE(fragmentDic, reactantDic, templateDic, hidden_size, latent_size, depth, fragment_embedding=None, reactant_embedding=None, template_embedding=None)\ncheckpoint = torch.load(w_save_path, map_location=device)\nmodel.load_state_dict(checkpoint)\nprint(\"loaded model....\")\nevaluator = Evaluator(latent_size, model)\nevaluator.validate_and_save(rxn_trees, output_file=output_file)\n#evaluator.novelty_and_uniqueness([output_file], rxn_trees)\n\n\n\n",
"import theano\nimport theano.tensor as T\n\nimport numpy as np\nfrom scipy.spatial.distance import cdist\n\ndef casting(x):\n return np.array(x).astype(theano.config.floatX)\n\ndef compute_kernel(lls, lsf, x, z):\n\n ls = T.exp(lls)\n sf = T.exp(lsf)\n\n if x.ndim == 1:\n x = x[ None, : ]\n\n if z.ndim == 1:\n z = z[ None, : ]\n\n lsre = T.outer(T.ones_like(x[ :, 0 ]), ls)\n\n r2 = T.outer(T.sum(x * x / lsre, 1), T.ones_like(z[ : , 0 : 1 ])) - np.float32(2) * \\\n T.dot(x / lsre, T.transpose(z)) + T.dot(np.float32(1.0) / lsre, T.transpose(z)**2)\n\n k = sf * T.exp(-np.float32(0.5) * r2)\n\n return k\n\ndef compute_kernel_numpy(lls, lsf, x, z):\n\n ls = np.exp(lls)\n sf = np.exp(lsf)\n\n if x.ndim == 1:\n x= x[ None, : ]\n\n if z.ndim == 1:\n z= z[ None, : ]\n\n lsre = np.outer(np.ones(x.shape[ 0 ]), ls)\n\n r2 = np.outer(np.sum(x * x / lsre, 1), np.ones(z.shape[ 0 ])) - 2 * np.dot(x / lsre, z.T) + np.dot(1.0 / lsre, z.T **2)\n\n k = sf * np.exp(-0.5*r2)\n\n return k\n\n##\n# xmean and xvar can be vectors of input points\n#\n# This is the expected value of the kernel\n#\n\ndef compute_psi1(lls, lsf, xmean, xvar, z):\n\n if xmean.ndim == 1:\n xmean = xmean[ None, : ]\n\n ls = T.exp(lls)\n sf = T.exp(lsf)\n lspxvar = ls + xvar\n constterm1 = ls / lspxvar\n constterm2 = T.prod(T.sqrt(constterm1), 1)\n r2_psi1 = T.outer(T.sum(xmean * xmean / lspxvar, 1), T.ones_like(z[ : , 0 : 1 ])) \\\n - np.float32(2) * T.dot(xmean / lspxvar, T.transpose(z)) + \\\n T.dot(np.float32(1.0) / lspxvar, T.transpose(z)**2)\n psi1 = sf * T.outer(constterm2, T.ones_like(z[ : , 0 : 1 ])) * T.exp(-np.float32(0.5) * r2_psi1)\n\n return psi1\n\ndef compute_psi1_numpy(lls, lsf, xmean, xvar, z):\n\n if xmean.ndim == 1:\n xmean = xmean[ None, : ]\n\n ls = np.exp(lls)\n sf = np.exp(lsf)\n lspxvar = ls + xvar\n constterm1 = ls / lspxvar\n constterm2 = np.prod(np.sqrt(constterm1), 1)\n r2_psi1 = np.outer(np.sum(xmean * xmean / lspxvar, 1), \\\n np.ones(z.shape[ 0 ])) - 2 * np.dot(xmean / lspxvar, z.T) + \\\n np.dot(1.0 / lspxvar, z.T **2)\n psi1 = sf * np.outer(constterm2, np.ones(z.shape[ 0 ])) * np.exp(-0.5 * r2_psi1)\n return psi1\n\ndef compute_psi2(lls, lsf, z, input_means, input_vars):\n\n ls = T.exp(lls)\n sf = T.exp(lsf)\n b = ls / casting(2.0)\n term_1 = T.prod(T.sqrt(b / (b + input_vars)), 1)\n\n scale = T.sqrt(4 * (2 * b[ None, : ] + 0 * input_vars))\n scaled_z = z[ None, : , : ] / scale[ : , None , : ]\n scaled_z_minus_m = scaled_z\n r2b = T.sum(scaled_z_minus_m**2, 2)[ :, None, : ] + T.sum(scaled_z_minus_m**2, 2)[ :, : , None ] - \\\n 2 * T.batched_dot(scaled_z_minus_m, np.transpose(scaled_z_minus_m, [ 0, 2, 1 ]))\n term_2 = T.exp(-r2b)\n\n scale = T.sqrt(4 * (2 * b[ None, : ] + 2 * input_vars))\n scaled_z = z[ None, : , : ] / scale[ : , None , : ]\n scaled_m = input_means / scale\n scaled_m = T.tile(scaled_m[ : , None, : ], [ 1, z.shape[ 0 ], 1])\n scaled_z_minus_m = scaled_z - scaled_m\n r2b = T.sum(scaled_z_minus_m**2, 2)[ :, None, : ] + T.sum(scaled_z_minus_m**2, 2)[ :, : , None ] + \\\n 2 * T.batched_dot(scaled_z_minus_m, np.transpose(scaled_z_minus_m, [ 0, 2, 1 ]))\n term_3 = T.exp(-r2b)\n \n psi2_computed = sf**casting(2.0) * term_1[ :, None, None ] * term_2 * term_3\n\n return T.transpose(psi2_computed, [ 1, 2, 0 ])\n\ndef compute_psi2_numpy(lls, lsf, z, input_means, input_vars):\n\n ls = np.exp(lls)\n sf = np.exp(lsf)\n b = ls / casting(2.0)\n term_1 = np.prod(np.sqrt(b / (b + input_vars)), 1)\n\n scale = np.sqrt(4 * (2 * b[ None, : ] + 0 * input_vars))\n scaled_z = z[ None, : , : ] / scale[ : , None , : ]\n scaled_z_minus_m = scaled_z\n r2b = np.sum(scaled_z_minus_m**2, 2)[ :, None, : ] + np.sum(scaled_z_minus_m**2, 2)[ :, : , None ] - \\\n 2 * np.einsum('ijk,ikl->ijl', scaled_z_minus_m, np.transpose(scaled_z_minus_m, [ 0, 2, 1 ]))\n term_2 = np.exp(-r2b)\n\n scale = np.sqrt(4 * (2 * b[ None, : ] + 2 * input_vars))\n scaled_z = z[ None, : , : ] / scale[ : , None , : ]\n scaled_m = input_means / scale\n scaled_m = np.tile(scaled_m[ : , None, : ], [ 1, z.shape[ 0 ], 1])\n scaled_z_minus_m = scaled_z - scaled_m\n r2b = np.sum(scaled_z_minus_m**2, 2)[ :, None, : ] + np.sum(scaled_z_minus_m**2, 2)[ :, : , None ] + \\\n 2 * np.einsum('ijk,ikl->ijl', scaled_z_minus_m, np.transpose(scaled_z_minus_m, [ 0, 2, 1 ]))\n term_3 = np.exp(-r2b)\n \n psi2_computed = sf**casting(2.0) * term_1[ :, None, None ] * term_2 * term_3\n psi2_computed = np.transpose(psi2_computed, [ 1, 2, 0 ])\n\n return psi2_computed"
] |
[
[
"torch.device",
"torch.cuda.set_device",
"torch.cuda.is_available",
"torch.load"
],
[
"numpy.dot",
"numpy.sqrt",
"numpy.tile",
"numpy.ones",
"numpy.float32",
"numpy.transpose",
"numpy.array",
"numpy.exp",
"numpy.sum"
]
] |
stevencdang/AutoML-DS-Components
|
[
"b0490262d3db5307c37f82c92e25cd938dd3a242"
] |
[
"lib/ls_dataset/d3m_dataset.py"
] |
[
"\n# Author: Steven C. Dang\n\n# Class encapsulating operations on a remote d3m dataset\n\nimport logging\nimport os.path as path\nimport os\nfrom io import IOBase\nimport json\nimport csv\nimport pandas as pd\n\nfrom ls_dataset.ls_dataset import LSDataset\nfrom ls_dataset.dsr_table import DSRTable\nfrom ls_dataset.dsr_factory import DatasetResourceFactory\n\nlogger = logging.getLogger(__name__)\n\nclass D3MDataset(LSDataset):\n \"\"\"\n Class representing a remote dataset with prediction results\n\n \"\"\"\n\n\n def __init__(self, dspath, dsdata):\n \"\"\"\n inputs: \n dspath - the path to the dataset root\n dsdata - a dictionary containing the dataset metadata\n \n \"\"\"\n LSDataset.__init__(self, dspath)\n logger.debug(\"Initializing D3M dataset\")\n\n # Parse dataset metadata\n self.about = dsdata['about']\n self.id = dsdata['about']['datasetID']\n self.name = dsdata['about']['datasetName']\n\n # Parse data resources in the dataset\n self.dataResources = [DatasetResourceFactory.get_resource(dsr) for dsr in dsdata['dataResources']]\n\n # Store qualities field (currently noto used)A\n if 'qualities' in dsdata:\n self.qualities = dsdata['qualities']\n else:\n self.qualities = None\n \n @staticmethod\n def from_json(d):\n \"\"\"\n A static constructor of this class given a jsonified file\n\n \"\"\"\n if isinstance(d, str):\n logger.debug(\"Loading json string\")\n ds_json = json.loads(d)\n else:\n logger.debug(\"Handling input with type: %s\" % type(d))\n ds_json = d\n \n # logger.debug(\"got dataset json: %s\" % str(ds_json))\n # logger.debug(\"json about: %s\" % ds_json['about'])\n # logger.debug(\"json data resources: %s\" % ds_json['dataResources'])\n # json_doc = {'about': ds_json['about'],\n # 'dataResources': ds_json['dataResources']\n # }\n # return D3MDataset(ds_json['dataset_info']['root_path'], \n # json_doc)\n return D3MDataset(ds_json['dataset_info']['root_path'],\n ds_json)\n\n\n @staticmethod\n def from_dataset_json(fpath):\n \"\"\"\n A static constructor of this class given a dataset json\n\n \"\"\"\n if isinstance(fpath, str):\n if path.exists(fpath):\n #Get dataset path from json path\n dpath = path.dirname(fpath)\n # dpath = path.split(path.split(fpath)[0])[0] # Assumses root\n try:\n with open(fpath, 'r') as f:\n ds_json = json.load(f)\n return D3MDataset(dpath,\n ds_json)\n except:\n logger.error(\"Error while decoding dataset json: %s\" % fpath)\n\n else:\n logger.error(\"Found no dataset json at path: %s\" % str(fpath))\n raise Exception(\"Found no dataset json at path: %s\" % str(fpath))\n elif isinstance(fpath, IOBase):\n logger.debug(\"Loading dataset json from open file\")\n logger.debug(\"dataset path: %s\" % str(fpath))\n dpath = path.dirname(fpath)\n # dpath = path.split(path.split(fpath)[0])[0]\n # ds_json = json.load(fpath)\n ds_json = json.load(fpath, encoding='utf-16')\n return D3MDataset(dpath,\n ds_json)\n else:\n logger.error(\"Found no dataset json at path: %s\" % str(fpath))\n raise Exception(\"Found no dataset json at path: %s\" % str(fpath))\n\n @staticmethod\n def get_schema_path(dpath):\n name = path.split(dpath)[-1]\n fpath = path.join(dpath, name + '_dataset', LSDataset.__default_schema__)\n if path.exists(fpath):\n return fpath\n else:\n raise Exception(\"No schema doc found in dataset directory: %s\" % dpath)\n\n def to_component_out_file(self, fpath):\n \"\"\"\n Write the dataset to file for passing between components. \n Writes the first row of a tab separated file as the list of column names.\n The first cell of the second row is simply the \n\n \"\"\"\n for resource in self.dataResources:\n if resource.resType == 'table':\n logger.debug(\"Resource type: %s\\t %s\" % (str(type(resource.columns)), str(resource.columns)))\n for col in resource.columns:\n logger.debug(\"Type: %s\\t col: %s\" % (str(type(col)), str(col)))\n names = [col.colName for col in resource.columns]\n js = self.to_json()\n\n with open(fpath, 'w') as out_file:\n logger.debug(\"Writing dataset json to component out file: %s\" % fpath)\n writer = csv.writer(out_file, delimiter='\\t')\n writer.writerow(names)\n writer.writerow([js])\n\n @staticmethod\n def from_component_out_file(fpath):\n \"\"\"\n Load the dataset from an out file written to pass between workflow components\n\n \"\"\"\n if isinstance(fpath, str):\n in_file = open(fpath, 'r')\n reader = csv.reader(in_file, delimiter='\\t')\n rows = [row for row in reader]\n in_file.close()\n elif isinstance(fpath, IOBase):\n reader = csv.reader(fpath, delimiter='\\t')\n rows = [row for row in reader]\n fpath.close()\n col_names = rows[0]\n logger.debug(\"Got columns names: %s\" % str(col_names))\n # logger.debug(\"Got dataset row with type %s:\\t %s\" % (str(type(rows[1][0])), str(rows[1][0])))\n # logger.debug(len(rows[1]))\n # logger.debug(rows[1][0])\n # logger.debug(type(rows[1][0]))\n return D3MDataset.from_json(rows[1][0])\n\n\n def to_json(self, fpath=None):\n \"\"\"\n Write the dataset to info to file and return a string with the json. If no path is given,\n then just returns a string with the json representation of the dataset json\n\n \"\"\"\n # logger.debug(\"D3MDataset to json\")\n\n out = json.loads(super().to_json())\n out['about'] = self.about\n out['dataResources'] = [json.loads(rc.to_json()) for rc in self.dataResources]\n out['qualities'] = self.qualities\n \n if fpath is not None:\n logger.debug(\"Writing dataset json to: %s\" % fpath)\n out_file = open(fpath, 'w')\n json.dump(out, out_file)\n out_file.close()\n\n return json.dumps(out)\n\n\n def __str__(self):\n return self.to_json()\n\n def load_dataset(self):\n \"\"\"\n Load the dataset table\n\n \"\"\"\n data = None\n for dr in [dr for dr in self.dataResources if type(dr) is DSRTable]:\n logger.debug(\"Found data resource table with ID: %s\\tpath: %s\" % (dr.resID, dr.resPath))\n if data is None:\n dpath = path.join(self.dpath, dr.resPath)\n data = pd.read_csv(dpath, ',')\n return data\n\n\n\n def get_data_columns(self):\n for dr in [dr for dr in self.dataResources if type(dr) is DSRTable]:\n logger.debug(\"Found data resource table with ID: %s\\tpath: %s\" % (dr.resID, dr.resPath))\n return [col for col in dr.columns if col.colName != 'd3mIndex']\n\n\n\n\n"
] |
[
[
"pandas.read_csv"
]
] |
coruscating/qiskit-experiments
|
[
"dac1febf13be870d3bac16af22aa341a088e0766"
] |
[
"test/data_processing/test_nodes.py"
] |
[
"# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2021.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Data processor tests.\"\"\"\n\n# pylint: disable=unbalanced-tuple-unpacking\n\nimport numpy as np\n\nfrom qiskit.test import QiskitTestCase\nfrom qiskit_experiments.data_processing.nodes import (\n SVD,\n AverageData,\n MinMaxNormalize,\n Probability,\n)\nfrom qiskit_experiments.data_processing.data_processor import DataProcessor\n\nfrom . import BaseDataProcessorTest\n\n\nclass TestAveraging(BaseDataProcessorTest):\n \"\"\"Test the averaging nodes.\"\"\"\n\n def test_simple(self):\n \"\"\"Simple test of averaging.\"\"\"\n\n datum = np.array([[1, 2], [3, 4], [5, 6]])\n\n node = AverageData(axis=1)\n self.assertTrue(np.allclose(node(datum)[0], np.array([1.5, 3.5, 5.5])))\n self.assertTrue(np.allclose(node(datum)[1], np.array([0.5, 0.5, 0.5]) / np.sqrt(2)))\n\n node = AverageData(axis=0)\n self.assertTrue(np.allclose(node(datum)[0], np.array([3.0, 4.0])))\n std = np.std([1, 3, 5])\n self.assertTrue(np.allclose(node(datum)[1], np.array([std, std]) / np.sqrt(3)))\n\n def test_iq_averaging(self):\n \"\"\"Test averaging of IQ-data.\"\"\"\n\n iq_data = [\n [[-6.20601501e14, -1.33257051e15], [-1.70921324e15, -4.05881657e15]],\n [[-5.80546502e14, -1.33492509e15], [-1.65094637e15, -4.05926942e15]],\n [[-4.04649069e14, -1.33191056e15], [-1.29680377e15, -4.03604815e15]],\n [[-2.22203874e14, -1.30291309e15], [-8.57663429e14, -3.97784973e15]],\n [[-2.92074029e13, -1.28578530e15], [-9.78824053e13, -3.92071056e15]],\n [[1.98056981e14, -1.26883024e15], [3.77157017e14, -3.87460328e15]],\n [[4.29955888e14, -1.25022995e15], [1.02340118e15, -3.79508679e15]],\n [[6.38981344e14, -1.25084614e15], [1.68918514e15, -3.78961044e15]],\n [[7.09988897e14, -1.21906634e15], [1.91914171e15, -3.73670664e15]],\n [[7.63169115e14, -1.20797552e15], [2.03772603e15, -3.74653863e15]],\n ]\n\n self.create_experiment(iq_data, single_shot=True)\n\n avg_iq = AverageData(axis=0)\n\n avg_datum, error = avg_iq(self.iq_experiment.data(0)[\"memory\"])\n\n expected_avg = np.array([[8.82943876e13, -1.27850527e15], [1.43410186e14, -3.89952402e15]])\n\n expected_std = np.array(\n [[5.07650185e14, 4.44664719e13], [1.40522641e15, 1.22326831e14]]\n ) / np.sqrt(10)\n\n self.assertTrue(np.allclose(avg_datum, expected_avg))\n self.assertTrue(np.allclose(error, expected_std))\n\n\nclass TestNormalize(QiskitTestCase):\n \"\"\"Test the normalization node.\"\"\"\n\n def test_simple(self):\n \"\"\"Simple test of normalization node.\"\"\"\n\n data = np.array([1.0, 2.0, 3.0, 3.0])\n error = np.array([0.1, 0.2, 0.3, 0.3])\n\n expected_data = np.array([0.0, 0.5, 1.0, 1.0])\n expected_error = np.array([0.05, 0.1, 0.15, 0.15])\n\n node = MinMaxNormalize()\n\n self.assertTrue(np.allclose(node(data)[0], expected_data))\n self.assertTrue(np.allclose(node(data, error)[0], expected_data))\n self.assertTrue(np.allclose(node(data, error)[1], expected_error))\n\n\nclass TestSVD(BaseDataProcessorTest):\n \"\"\"Test the SVD nodes.\"\"\"\n\n def test_simple_data(self):\n \"\"\"\n A simple setting where the IQ data of qubit 0 is oriented along (1,1) and\n the IQ data of qubit 1 is oriented along (1,-1).\n \"\"\"\n\n iq_data = [[[0.0, 0.0], [0.0, 0.0]], [[1.0, 1.0], [-1.0, 1.0]], [[-1.0, -1.0], [1.0, -1.0]]]\n\n self.create_experiment(iq_data)\n\n iq_svd = SVD()\n iq_svd.train([datum[\"memory\"] for datum in self.iq_experiment.data()])\n\n # qubit 0 IQ data is oriented along (1,1)\n self.assertTrue(np.allclose(iq_svd._main_axes[0], np.array([-1, -1]) / np.sqrt(2)))\n\n # qubit 1 IQ data is oriented along (1, -1)\n self.assertTrue(np.allclose(iq_svd._main_axes[1], np.array([-1, 1]) / np.sqrt(2)))\n\n # Note: input data shape [n_circs, n_slots, n_iq] for avg mode simulation\n\n processed, _ = iq_svd(np.array([[[1, 1], [1, -1]]]))\n expected = np.array([-1, -1]) / np.sqrt(2)\n self.assertTrue(np.allclose(processed, expected))\n\n processed, _ = iq_svd(np.array([[[2, 2], [2, -2]]]))\n self.assertTrue(np.allclose(processed, expected * 2))\n\n # Check that orthogonal data gives 0.\n processed, _ = iq_svd(np.array([[[1, -1], [1, 1]]]))\n expected = np.array([0, 0])\n self.assertTrue(np.allclose(processed, expected))\n\n def test_svd(self):\n \"\"\"Use IQ data gathered from the hardware.\"\"\"\n\n # This data is primarily oriented along the real axis with a slight tilt.\n # There is a large offset in the imaginary dimension when comparing qubits\n # 0 and 1.\n iq_data = [\n [[-6.20601501e14, -1.33257051e15], [-1.70921324e15, -4.05881657e15]],\n [[-5.80546502e14, -1.33492509e15], [-1.65094637e15, -4.05926942e15]],\n [[-4.04649069e14, -1.33191056e15], [-1.29680377e15, -4.03604815e15]],\n [[-2.22203874e14, -1.30291309e15], [-8.57663429e14, -3.97784973e15]],\n [[-2.92074029e13, -1.28578530e15], [-9.78824053e13, -3.92071056e15]],\n [[1.98056981e14, -1.26883024e15], [3.77157017e14, -3.87460328e15]],\n [[4.29955888e14, -1.25022995e15], [1.02340118e15, -3.79508679e15]],\n [[6.38981344e14, -1.25084614e15], [1.68918514e15, -3.78961044e15]],\n [[7.09988897e14, -1.21906634e15], [1.91914171e15, -3.73670664e15]],\n [[7.63169115e14, -1.20797552e15], [2.03772603e15, -3.74653863e15]],\n ]\n\n self.create_experiment(iq_data)\n\n iq_svd = SVD()\n iq_svd.train([datum[\"memory\"] for datum in self.iq_experiment.data()])\n\n self.assertTrue(np.allclose(iq_svd._main_axes[0], np.array([-0.99633018, -0.08559302])))\n self.assertTrue(np.allclose(iq_svd._main_axes[1], np.array([-0.99627747, -0.0862044])))\n\n def test_svd_error(self):\n \"\"\"Test the error formula of the SVD.\"\"\"\n\n iq_svd = SVD()\n iq_svd._main_axes = np.array([[1.0, 0.0]])\n iq_svd._scales = [1.0]\n iq_svd._means = [[0.0, 0.0]]\n\n # Since the axis is along the real part the imaginary error is irrelevant.\n processed, error = iq_svd([[[1.0, 0.2]]], [[[0.2, 0.1]]])\n self.assertEqual(processed, np.array([1.0]))\n self.assertEqual(error, np.array([0.2]))\n\n # Since the axis is along the real part the imaginary error is irrelevant.\n processed, error = iq_svd([[[1.0, 0.2]]], [[[0.2, 0.3]]])\n self.assertEqual(processed, np.array([1.0]))\n self.assertEqual(error, np.array([0.2]))\n\n # Tilt the axis to an angle of 36.9... degrees\n iq_svd._main_axes = np.array([[0.8, 0.6]])\n processed, error = iq_svd([[[1.0, 0.0]]], [[[0.2, 0.3]]])\n cos_ = np.cos(np.arctan(0.6 / 0.8))\n sin_ = np.sin(np.arctan(0.6 / 0.8))\n self.assertEqual(processed, np.array([cos_]))\n expected_error = np.sqrt((0.2 * cos_) ** 2 + (0.3 * sin_) ** 2)\n self.assertEqual(error, np.array([expected_error]))\n\n def test_train_svd_processor(self):\n \"\"\"Test that we can train a DataProcessor with an SVD.\"\"\"\n\n processor = DataProcessor(\"memory\", [SVD()])\n\n self.assertFalse(processor.is_trained)\n\n iq_data = [[[0.0, 0.0], [0.0, 0.0]], [[1.0, 1.0], [-1.0, 1.0]], [[-1.0, -1.0], [1.0, -1.0]]]\n self.create_experiment(iq_data)\n\n processor.train(self.iq_experiment.data())\n\n self.assertTrue(processor.is_trained)\n\n # Check that we can use the SVD\n iq_data = [[[2, 2], [2, -2]]]\n self.create_experiment(iq_data)\n\n processed, _ = processor(self.iq_experiment.data(0))\n expected = np.array([-2, -2]) / np.sqrt(2)\n self.assertTrue(np.allclose(processed, expected))\n\n\nclass TestProbability(QiskitTestCase):\n \"\"\"Test probability computation.\"\"\"\n\n def test_variance_not_zero(self):\n \"\"\"Test if finite variance is computed at max or min probability.\"\"\"\n node = Probability(outcome=\"1\")\n\n data = {\"1\": 1024, \"0\": 0}\n mode, stderr = node(data)\n self.assertGreater(stderr, 0.0)\n self.assertLessEqual(mode, 1.0)\n\n data = {\"1\": 0, \"0\": 1024}\n mode, stderr = node(data)\n self.assertGreater(stderr, 0.0)\n self.assertGreaterEqual(mode, 0.0)\n\n def test_probability_balanced(self):\n \"\"\"Test if p=0.5 is returned when counts are balanced and prior is flat.\"\"\"\n node = Probability(outcome=\"1\")\n\n # balanced counts with a flat prior will yield p = 0.5\n data = {\"1\": 512, \"0\": 512}\n mode, _ = node(data)\n self.assertAlmostEqual(mode, 0.5)\n"
] |
[
[
"numpy.sqrt",
"numpy.arctan",
"numpy.allclose",
"numpy.std",
"numpy.array"
]
] |
bdsinger/PsyNeuLink
|
[
"71d8a0bb1691ff85061d4ad3de866d9930a69a73",
"71d8a0bb1691ff85061d4ad3de866d9930a69a73",
"71d8a0bb1691ff85061d4ad3de866d9930a69a73"
] |
[
"Scripts/rum.py",
"tests/states/test_parameter_states.py",
"tests/functions/test_selection.py"
] |
[
"\n# coding: utf-8\n\n# In[14]:\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n# get_ipython().run_line_magic('matplotlib', 'inline')\nimport psyneulink as pnl\n\n\n# In[15]:\nimport psyneulink.core.components.functions.transferfunctions\n\nnouns=['oak','pine','rose','daisy','canary','robin','salmon','sunfish']\nrelations=['is','has','can']\nis_list=['living','living thing','plant','animal','tree','flower','bird','fish','big','green','red','yellow']\nhas_list=['roots','leaves','bark','branches','skin','feathers','wings','gills','scales']\ncan_list=['grow','move','swim','fly','breathe','breathe underwater','breathe air','walk','photosynthesize']\ndescriptors=[nouns,is_list,has_list,can_list]\n\ntruth_nouns=np.identity(len(nouns))\n\ntruth_is=np.zeros((len(nouns),len(is_list)))\n\ntruth_is[0,:]=[1,1,1,0,1,0,0,0,1,0,0,0]\ntruth_is[1,:]=[1,1,1,0,1,0,0,0,1,0,0,0]\ntruth_is[2,:]=[1,1,1,0,0,1,0,0,0,0,0,0]\ntruth_is[3,:]=[1,1,1,0,0,1,0,0,0,0,0,0]\ntruth_is[4,:]=[1,1,0,1,0,0,1,0,0,0,0,1]\ntruth_is[5,:]=[1,1,0,1,0,0,1,0,0,0,0,1]\ntruth_is[6,:]= [1,1,0,1,0,0,0,1,1,0,1,0]\ntruth_is[7,:]= [1,1,0,1,0,0,0,1,1,0,0,0]\n\ntruth_has=np.zeros((len(nouns),len(has_list)))\n\ntruth_has[0,:]= [1,1,1,1,0,0,0,0,0]\ntruth_has[1,:]= [1,1,1,1,0,0,0,0,0]\ntruth_has[2,:]= [1,1,0,0,0,0,0,0,0]\ntruth_has[3,:]= [1,1,0,0,0,0,0,0,0]\ntruth_has[4,:]= [0,0,0,0,1,1,1,0,0]\ntruth_has[5,:]= [0,0,0,0,1,1,1,0,0]\ntruth_has[6,:]= [0,0,0,0,0,0,0,1,1]\ntruth_has[7,:]= [0,0,0,0,0,0,0,1,1]\n\ntruth_can=np.zeros((len(nouns),len(can_list)))\n\ntruth_can[0,:]= [1,0,0,0,0,0,0,0,1]\ntruth_can[1,:]= [1,0,0,0,0,0,0,0,1]\ntruth_can[2,:]= [1,0,0,0,0,0,0,0,1]\ntruth_can[3,:]= [1,0,0,0,0,0,0,0,1]\ntruth_can[4,:]= [1,1,0,1,1,0,1,1,0]\ntruth_can[5,:]= [1,1,0,1,1,0,1,1,0]\ntruth_can[6,:]= [1,1,1,0,1,1,0,0,0]\ntruth_can[7,:]= [1,1,1,0,1,1,0,0,0]\n\ntruths=[[truth_nouns],[truth_is],[truth_has],[truth_can]]\n\n#dict_is={'oak':truth_is[0,:],'pine':truth_is[1,:],'rose':truth_is[2,:],'daisy':truth_is[3,:],'canary':truth_is[4,:],'robin':truth_is[5,:],'salmon':truth_is[6,:],'sunfish':truth_is[7,:]}\n\n\n\n# In[16]:\n\n\ndef gen_input_vals (nouns,relations):\n X_1=np.identity(len(nouns))\n X_2=np.identity(len(relations))\n return(X_1,X_2)\n\n\n# In[17]:\n\n\nnouns_onehot,rels_onehot=gen_input_vals(nouns,relations)\n\nr_1=np.shape(nouns_onehot)[0]\nc_1=np.shape(nouns_onehot)[1]\nr_2=np.shape(rels_onehot)[0]\nc_2=np.shape(rels_onehot)[1]\n\n\n# In[18]:\n\n\n#gotta figure out how to make this PNL friendly (breathe deep, my dude. One thing at a time.)\n#later, we want to be able to change our bias, but for now, we're going to stick with a hard-coded one.\ndef step(variable,params,context):\n if np.sum(variable)<.5:\n out=0\n else:\n out=1\n return(out)\n\n\n# In[19]:\n\n\nStep=pnl.UserDefinedFunction(custom_function=step,\n default_variable=np.zeros(4))\n\n\n# In[20]:\n\n\n#we're on the part where we generalize this and apply it as the function for all the bins...\n#we'd like to generalize this for size so we can consistently just call the one UDF, but specifying size, to remove\n#redundancies and general clutter. lol\n\nstep_mech=pnl.ProcessingMechanism(function=pnl.UserDefinedFunction(custom_function=step, default_variable=np.zeros(4)),\n size=4,\n name='step_mech')\n\n\n# In[21]:\n\n\nnouns_in = pnl.TransferMechanism(name=\"nouns_input\",default_variable=np.zeros(r_1))\n\nrels_in = pnl.TransferMechanism(name=\"rels_input\",default_variable=np.zeros(r_2))\n\nh1 = pnl.TransferMechanism(name=\"hidden_nouns\",\n size=8,\n function=psyneulink.core.components.functions.transferfunctions.Logistic)\n\nh2 = pnl.TransferMechanism(name=\"hidden_mixed\",\n size=15,\n function=psyneulink.core.components.functions.transferfunctions.Logistic)\n\nout_sig_I = pnl.TransferMechanism(name=\"sig_outs_I\",\n size=len(nouns),\n function=psyneulink.core.components.functions.transferfunctions.Logistic)\n\nout_sig_is = pnl.TransferMechanism(name=\"sig_outs_is\",\n size=len(is_list),\n function=psyneulink.core.components.functions.transferfunctions.Logistic)\n\n\nout_sig_has = pnl.TransferMechanism(name=\"sig_outs_has\",\n size=len(has_list),\n function=psyneulink.core.components.functions.transferfunctions.Logistic)\n\n\nout_sig_can = pnl.TransferMechanism(name=\"sig_outs_can\",\n size=len(can_list),\n function=psyneulink.core.components.functions.transferfunctions.Logistic)\n\n#biases\nbh1 = pnl.TransferMechanism(name=\"bias_hidden_nouns\",\n default_variable=np.zeros(8))\n\nbh2 = pnl.TransferMechanism(name=\"bias_hidden_mixed\",\n default_variable=np.zeros(15))\n\nbosI = pnl.TransferMechanism(name=\"bias_osI\",\n default_variable=np.zeros(len(nouns)))\n\nbosi = pnl.TransferMechanism(name=\"bias_osi\",\n default_variable=np.zeros(len(is_list)))\n\nbosh = pnl.TransferMechanism(name=\"bias_osh\",\n default_variable=np.zeros(len(has_list)))\n\nbosc = pnl.TransferMechanism(name=\"bias_osc\",\n default_variable=np.zeros(len(can_list)))\n\n#later, we'll change the out_bin_x functions to a UDF that does a step function.\n# out_bin_I = pnl.TransferMechanism(name=\"binary_outs_I\",\n# size=len(nouns),\n# function=pnl.Linear)\n#\n# out_bin_is = pnl.TransferMechanism(name=\"binary_outs_is\",\n# size=len(is_list),\n# function=pnl.Linear)\n#\n# out_bin_has = pnl.TransferMechanism(name=\"binary_outs_has\",\n# size=len(has_list),\n# function=pnl.Linear)\n#\n# out_bin_can = pnl.TransferMechanism(name=\"binary_outs_can\",\n# size=len(can_list),\n# function=pnl.Linear)\n\n#we'll need to add in biases too. That will come later.\n\n\n# In[22]:\n\n\n#I want to put in a mapping projection that just ensures all our weight matrices between sigs and bins is I.\n\nmapII=pnl.MappingProjection(matrix=np.eye(len(nouns)),\n name=\"mapII\"\n )\n\nmapIi=pnl.MappingProjection(matrix=np.eye(len(is_list)),\n name=\"mapIi\"\n )\n\nmapIh=pnl.MappingProjection(matrix=np.eye(len(has_list)),\n name=\"mapIh\"\n )\n\nmapIc=pnl.MappingProjection(matrix=np.eye(len(can_list)),\n name=\"mapIc\"\n )\n\n\n# In[23]:\n\n\n#This is where we build the processes.\np11=pnl.Process(pathway=[nouns_in,h1,h2],\n learning=pnl.LEARNING)\n\np12=pnl.Process(pathway=[rels_in,h2],\n learning=pnl.LEARNING)\n\np21=pnl.Process(pathway=[h2,out_sig_I],\n learning=pnl.LEARNING)\n\np22=pnl.Process(pathway=[h2,out_sig_is],\n learning=pnl.LEARNING)\n\np23=pnl.Process(pathway=[h2,out_sig_has],\n learning=pnl.LEARNING)\n\np24=pnl.Process(pathway=[h2,out_sig_can],\n learning=pnl.LEARNING)\n\n\n# In[24]:\n\n\n#These are the processes that transform sigs to bins\n#\n# p31=pnl.Process(pathway=[out_sig_I,\n# mapII,\n# out_bin_I],\n# learning=pnl.LEARNING\n# )\n#\n# p32=pnl.Process(pathway=[out_sig_is,\n# mapIi,\n# out_bin_is],\n# learning=pnl.LEARNING\n# )\n#\n# p33=pnl.Process(pathway=[out_sig_has,\n# mapIh,\n# out_bin_has],\n# learning=pnl.LEARNING\n# )\n#\n# p34=pnl.Process(pathway=[out_sig_can,\n# mapIc,\n# out_bin_can],\n# learning=pnl.LEARNING\n# )\n\n\n# In[25]:\n\n\n#Bias processes go here\n\nbp1=pnl.Process(pathway=[bh1,h1],\n learning=pnl.LEARNING\n )\n\nbp2=pnl.Process(pathway=[bh2,h2],\n learning=pnl.LEARNING\n )\n\nbposI=pnl.Process(pathway=[bosI,out_sig_I],\n learning=pnl.LEARNING\n )\n\nbposi=pnl.Process(pathway=[bosi,out_sig_is],\n learning=pnl.LEARNING\n )\n\nbposh=pnl.Process(pathway=[bosh,out_sig_has],\n learning=pnl.LEARNING\n )\n\nbposc=pnl.Process(pathway=[bosc,out_sig_can],\n learning=pnl.LEARNING\n )\n\n\n# In[117]:\n\n\n#This is where we put them all into a system\n\nrumel_sys=pnl.System(processes=[p11,\n bp1,\n p12,\n bp2,\n p21,\n bposI,\n p22,\n bposi,\n p23,\n bposh,\n p24,\n bposc,\n # p31,\n # p32,\n # p33,\n # p34\n ])\n\nrumel_sys.show_graph(show_learning=True)\n\n# In[26]:\n\n\n#This is where we build multiple systems that separate the learning components from the non-learning components.\n#This only compiles when the one above it does not.\n#This might be a good bug to report...\n\n#Additionally, for some reason this system is a clone of the one above, regardless of whether or not we include\n#the one below. If the p3x processes are defined at all, they are automatically included in the system.\n#What is going on here?\n# rumel_sys2a=pnl.System(processes=[p11,\n# bp1,\n# p12,\n# bp2,\n# p21,\n# bposI,\n# p22,\n# bposi,\n# p23,\n# bposh,\n# p24,\n# bposc])\n\n\n# # In[147]:\n#\n#\n# rumel_sys2b=pnl.System(processes=[\n# p31,\n# p32,\n# p33,\n# p34])\n#\n#\n# # In[27]:\n#\n#\n# rumel_sys2a.show_graph(output_fmt='jupyter')\n#\n#\n# # In[97]:\n#\n#\n# #so far, so hoopy. What we want is to not enable learning on our binaries. Just on our sigs.\n#\n#\n# # In[100]:\n#\n#\n# for noun in range(len(nouns)):\n# for rel_out in range (3):\n# rumel_sys.run(inputs={nouns_in: nouns_onehot[noun],\n# rels_in: rels_onehot[rel_out],\n# bh1: np.zeros(8),\n# bh2: np.zeros(15),\n# bosI: np.zeros(len(nouns)),\n# bosi: np.zeros(len(is_list)),\n# bosh: np.zeros(len(has_list)),\n# bosc: np.zeros(len(can_list)),\n# },\n# targets={out_bin_I: nouns_onehot[noun],\n# out_bin_is: truth_is[noun],\n# out_bin_has: truth_has[noun],\n# out_bin_can: truth_can[noun]\n# }\n# )\n# #What we can do here, is build our inputs into a nested for loop\n#\n#\n# # In[103]:\n#\n#\nfor noun in range(len(nouns)):\n for rel_out in range (3):\n rumel_sys.run(inputs={nouns_in: nouns_onehot [noun],\n rels_in: rels_onehot [rel_out],\n bh1: np.zeros(8),\n bh2: np.zeros(15),\n bosI: np.zeros(len(nouns)),\n bosi: np.zeros(len(is_list)),\n bosh: np.zeros(len(has_list)),\n bosc: np.zeros(len(can_list)),\n },\n targets={out_sig_I: nouns_onehot [noun],\n out_sig_is: truth_is [noun],\n out_sig_has: truth_has [noun],\n out_sig_can: truth_can [noun]\n }\n )\n# #What we can do here, is build our inputs into a nested for loop\n#\n#\n# # So far, what I have left to do includes:\n# #\n# # getting a threshold function up and running.\n# # See \"step\", defined below. All we need to do is make it PNL friendly. :D\n# #\n# # # This is done\n# #\n# # also want to make sure that the weight matrices from sigs to bins is I and cannot learn\n# #\n# # # Setting them to I is done. But, if we turn off learning on them, we can't run the system at all, because nothing that can learn projects to a target mechanism. It doesn't matter where we set the targets. If we set targets at sigs, it says, sigs don't project to target mechanism (target mechs are getting attached to bins). If we set targets for bins, it says targets don't project to target mechanims (target mechs are attached to bins, but bins can't learn).\n# #\n# # # I think I know a work-around on this that doesn't require we make a whole new system. We use the same setup that we used for the duck-rabbit model, where we map the sig outputs to labels, which are 1 or 0, using the previously defined step function, and get the MSE out of that.\n# #\n# # # I think it's okay for us to still try to set up multiple systems with overlapping mechanisms...\n# # # Information on our capacity to do this should be available in \"compositions\" but github pages is down right now. :/\n# #\n# # figure out how to turn on learning for some mechs and not for others without losing previously learned weights, either by avoiding reinitializing the system or by saving previously learned weights. :)\n# # this might be something to talk to Jon about...\n# #\n# # In order to do this, we will definitely* need to figure out how to put them into different systems and run the whole thing together?\n# #\n# # Actually seeing how it performs?\n# #\n# # Is that it?\n# #\n# # I need to get my github shit working, too, so I can work in devel and the other branches. :| Still, good progress for today, I think. :)\n",
"import numpy as np\nimport pytest\n\nfrom psyneulink.core.components.component import ComponentError\nfrom psyneulink.core.components.functions.transferfunctions import Linear\nfrom psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism\n\nclass TestParameterStates:\n def test_inspect_function_params_slope_noise(self):\n A = TransferMechanism()\n B = TransferMechanism()\n assert A.function.slope == 1.0\n assert B.function.slope == 1.0\n assert A.mod_slope == [1.0]\n assert B.mod_slope == [1.0]\n\n assert A.noise == 0.0\n assert B.noise == 0.0\n assert A.mod_noise == 0.0\n assert B.mod_noise == 0.0\n\n A.function.slope = 0.2\n\n assert A.function.slope == 0.2\n assert B.function.slope == 1.0\n assert A.mod_slope == [1.0]\n assert B.mod_slope == [1.0]\n\n A.noise = 0.5\n\n assert A.noise == 0.5\n assert B.noise == 0.0\n assert A.mod_noise == 0.0\n assert B.mod_noise == 0.0\n\n B.function.slope = 0.7\n\n assert A.function.slope == 0.2\n assert B.function.slope == 0.7\n assert A.mod_slope == [1.0]\n assert B.mod_slope == [1.0]\n\n B.noise = 0.6\n\n assert A.noise == 0.5\n assert B.noise == 0.6\n assert A.mod_noise == 0.0\n assert B.mod_noise == 0.0\n\n A.execute(1.0)\n assert A.mod_slope == [0.2]\n\n B.execute(1.0)\n\n assert A.function.slope == 0.2\n assert B.function.slope == 0.7\n assert A.mod_slope == [0.2]\n assert B.mod_slope == [0.7]\n\n assert A.noise == 0.5\n assert B.noise == 0.6\n assert A.mod_noise == 0.5\n assert B.mod_noise == 0.6\n\nclass TestConfigurableParameters:\n def test_configurable_params(self):\n old_value = 0.2\n new_value = 0.7\n T = TransferMechanism(function=Linear(slope=old_value,\n intercept=old_value),\n noise=old_value,\n integration_rate=old_value)\n\n # SLOPE - - - - - - - -\n\n assert np.allclose(T.user_params[\"function_params\"][\"slope\"], old_value)\n assert np.allclose(T.function.slope, old_value)\n assert np.allclose(T.mod_slope, old_value)\n\n T.function.slope = new_value\n\n # KAM changed 3/2/18 --\n # function_params looks at parameter state value, so this will not update until next execution\n assert np.allclose(T.user_params[\"function_params\"][\"slope\"], old_value)\n assert np.allclose(T.function.slope, new_value)\n assert np.allclose(T.mod_slope, old_value)\n\n # INTERCEPT - - - - - - - -\n\n assert np.allclose(T.user_params[\"function_params\"][\"intercept\"], old_value)\n assert np.allclose(T.function.intercept, old_value)\n assert np.allclose(T.mod_intercept, old_value)\n\n T.function.intercept = new_value\n\n # KAM changed 3/2/18 --\n # function_params looks at parameter state value, so this will not update until next execution\n assert np.allclose(T.user_params[\"function_params\"][\"intercept\"], old_value)\n assert np.allclose(T.function.intercept, new_value)\n assert np.allclose(T.mod_intercept, old_value)\n\n # SMOOTHING FACTOR - - - - - - - -\n\n assert np.allclose(T.user_params[\"integration_rate\"], old_value)\n assert np.allclose(T.integration_rate, old_value)\n assert np.allclose(T.mod_integration_rate, old_value)\n\n T.integration_rate = new_value\n\n # KAM changed 3/2/18 --\n # function_params looks at parameter state value, so this will not update until next execution\n assert np.allclose(T.user_params[\"integration_rate\"], old_value)\n assert np.allclose(T.integration_rate, new_value)\n assert np.allclose(T.mod_integration_rate, old_value)\n\n # NOISE - - - - - - - -\n\n assert np.allclose(T.user_params[\"noise\"], old_value)\n assert np.allclose(T.noise, old_value)\n assert np.allclose(T.mod_noise, old_value)\n\n T.noise = new_value\n\n # KAM changed 3/2/18 --\n # function_params looks at parameter state value, so this will not update until next execution\n assert np.allclose(T.user_params[\"noise\"], old_value)\n assert np.allclose(T.noise, new_value)\n assert np.allclose(T.mod_noise, old_value)\n\n T.execute(1.0)\n\n assert np.allclose(T.user_params[\"function_params\"][\"slope\"], new_value)\n assert np.allclose(T.function.slope, new_value)\n assert np.allclose(T.mod_slope, new_value)\n\n assert np.allclose(T.user_params[\"function_params\"][\"intercept\"], new_value)\n assert np.allclose(T.function.intercept, new_value)\n assert np.allclose(T.mod_intercept, new_value)\n\n assert np.allclose(T.user_params[\"integration_rate\"], new_value)\n assert np.allclose(T.integration_rate, new_value)\n assert np.allclose(T.mod_integration_rate, new_value)\n\n assert np.allclose(T.user_params[\"noise\"], new_value)\n assert np.allclose(T.noise, new_value)\n assert np.allclose(T.mod_noise, new_value)\n\nclass TestModParams:\n def test_mod_param_error(self):\n T = TransferMechanism()\n with pytest.raises(ComponentError) as error_text:\n T.mod_slope = 20.0\n assert \"directly because it is computed by the ParameterState\" in str(error_text.value)\n",
"import numpy as np\nimport pytest\n\nimport psyneulink.core.components.functions.selectionfunctions as Functions\nimport psyneulink.core.globals.keywords as kw\nimport psyneulink.core.llvm as pnlvm\n\nnp.random.seed(0)\nSIZE=10\ntest_var = np.random.rand(SIZE) * 2.0 - 1.0\ntest_prob = np.random.rand(SIZE)\n\ntest_data = [\n (Functions.OneHot, test_var, {'mode':kw.MAX_VAL}, [0., 0., 0., 0., 0., 0., 0., 0., 0.92732552, 0.]),\n (Functions.OneHot, test_var, {'mode':kw.MAX_ABS_VAL}, [0., 0., 0., 0., 0., 0., 0., 0., 0.92732552, 0.]),\n (Functions.OneHot, test_var, {'mode':kw.MAX_INDICATOR}, [0., 0., 0., 0., 0., 0., 0., 0., 1., 0.]),\n (Functions.OneHot, test_var, {'mode':kw.MAX_ABS_INDICATOR}, [0., 0., 0., 0., 0., 0., 0., 0., 1., 0.]),\n (Functions.OneHot, test_var, {'mode':kw.MIN_VAL}, [0., 0., 0., 0., 0., 0., 0., 0., 0., -0.23311696]),\n (Functions.OneHot, test_var, {'mode':kw.MIN_ABS_VAL}, [0., 0., 0., 0.08976637, 0., 0., 0., 0., 0., 0.]),\n (Functions.OneHot, test_var, {'mode':kw.MIN_INDICATOR}, [0., 0., 0., 0., 0., 0., 0., 0., 0., 1.]),\n (Functions.OneHot, test_var, {'mode':kw.MIN_ABS_INDICATOR}, [0., 0., 0., 1.,0., 0., 0., 0., 0., 0.]),\n (Functions.OneHot, [test_var, test_prob], {'mode':kw.PROB}, [0.09762701, 0., 0., 0., 0., 0., 0., 0., 0., 0.]),\n (Functions.OneHot, [test_var, test_prob], {'mode':kw.PROB_INDICATOR}, [1., 0., 0., 0., 0., 0., 0., 0., 0., 0.]),\n]\n\n# use list, naming function produces ugly names\nnames = [\n \"OneHot MAX_VAL\",\n \"OneHot MAX_ABS_VAL\",\n \"OneHot MAX_INDICATOR\",\n \"OneHot MAX_ABS_INDICATOR\",\n \"OneHot MIN_VAL\",\n \"OneHot MIN_ABS_VAL\",\n \"OneHot MIN_INDICATOR\",\n \"OneHot MIN_ABS_INDICATOR\",\n \"OneHot PROB\",\n \"OneHot PROB_INDICATOR\",\n]\n\nGROUP_PREFIX=\"SelectionFunction \"\n\[email protected]\[email protected]_function\[email protected](\"func, variable, params, expected\", test_data, ids=names)\[email protected]\ndef test_basic(func, variable, params, expected, benchmark):\n f = func(default_variable=variable, **params)\n benchmark.group = GROUP_PREFIX + func.componentName + params['mode'];\n f(variable)\n res = f(variable)\n assert np.allclose(res, expected)\n benchmark(f, variable)\n\n\[email protected]\[email protected]\[email protected]_function\[email protected](\"func, variable, params, expected\", test_data, ids=names)\[email protected]\ndef test_llvm(func, variable, params, expected, benchmark):\n benchmark.group = GROUP_PREFIX + func.componentName + params['mode'];\n f = func(default_variable=variable, **params)\n m = pnlvm.execution.FuncExecution(f)\n m.execute(variable)\n res = m.execute(variable)\n assert np.allclose(res, expected)\n benchmark(m.execute, variable)\n\n\[email protected]\[email protected]\[email protected]\[email protected]_function\[email protected](\"func, variable, params, expected\", test_data, ids=names)\[email protected]\ndef test_ptx_cuda(func, variable, params, expected, benchmark):\n benchmark.group = GROUP_PREFIX + func.componentName + params['mode'];\n f = func(default_variable=variable, **params)\n m = pnlvm.execution.FuncExecution(f)\n m.cuda_execute(variable)\n res = m.cuda_execute(variable)\n assert np.allclose(res, expected)\n benchmark(m.cuda_execute, variable)\n"
] |
[
[
"numpy.shape",
"numpy.sum",
"numpy.zeros"
],
[
"numpy.allclose"
],
[
"numpy.random.rand",
"numpy.random.seed",
"numpy.allclose"
]
] |
DamianoGiani/EGVSRprova
|
[
"2cae74436f2bf864f061d63eadae079a328ed9ed"
] |
[
"codes/data/__init__.py"
] |
[
"import torch\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\n\nfrom .paired_lmdb_dataset import PairedLMDBDataset\nfrom .unpaired_lmdb_dataset import UnpairedLMDBDataset\nfrom .paired_folder_dataset import PairedFolderDataset\nfrom .mypaired_folder_dataset import MyPairedFolderDataset\n\ndef create_dataloader(opt, dataset_idx='train'):\n # setup params\n data_opt = opt['dataset'].get(dataset_idx)\n degradation_type = opt['dataset']['degradation']['type']\n\n # -------------- loader for training -------------- #\n if dataset_idx == 'train':\n # check dataset\n assert data_opt['name'] in ('VimeoTecoGAN', 'VimeoTecoGAN-sub'), \\\n 'Unknown Dataset: {}'.format(data_opt['name'])\n\n if degradation_type == 'BI':\n # create dataset\n dataset = PairedLMDBDataset(\n data_opt,\n scale=opt['scale'],\n tempo_extent=opt['train']['tempo_extent'],\n moving_first_frame=opt['train'].get('moving_first_frame', False),\n moving_factor=opt['train'].get('moving_factor', 1.0))\n\n elif degradation_type == 'BD':\n # enlarge crop size to incorporate border size\n sigma = opt['dataset']['degradation']['sigma']\n enlarged_crop_size = data_opt['crop_size'] + 2 * int(sigma * 3.0)\n\n # create dataset\n dataset = UnpairedLMDBDataset(\n data_opt,\n crop_size=enlarged_crop_size, # override\n tempo_extent=opt['train']['tempo_extent'],\n moving_first_frame=opt['train'].get('moving_first_frame', False),\n moving_factor=opt['train'].get('moving_factor', 1.0))\n\n else:\n raise ValueError('Unrecognized degradation type: {}'.format(\n degradation_type))\n\n # create data loader\n loader = DataLoader(\n dataset=dataset,\n batch_size=data_opt['batch_size'],\n shuffle=True,\n num_workers=data_opt['num_workers'],\n pin_memory=data_opt['pin_memory'])\n\n # -------------- loader for testing -------------- #\n elif dataset_idx.startswith('test'):\n # create data loader\n dataset = PairedFolderDataset(data_opt, scale=opt['scale'])\n loader = DataLoader(\n dataset=dataset,\n batch_size=1,\n shuffle=False,\n num_workers=data_opt['num_workers'],\n pin_memory=data_opt['pin_memory'])\n\n else:\n raise ValueError('Unrecognized dataset index: {}'.format(dataset_idx))\n\n return loader\n\ndef mycreate_dataloader(opt, dataset_idx='train'):\n # setup params\n data_opt = opt['dataset'].get(dataset_idx)\n degradation_type = opt['dataset']['degradation']['type']\n\n # -------------- loader for training -------------- #\n if dataset_idx == 'train':\n # check dataset\n assert data_opt['name'] in ('VimeoTecoGAN', 'VimeoTecoGAN-sub'), \\\n 'Unknown Dataset: {}'.format(data_opt['name'])\n\n if degradation_type == 'BI':\n # create dataset\n dataset = PairedLMDBDataset(\n data_opt,\n scale=opt['scale'],\n tempo_extent=opt['train']['tempo_extent'],\n moving_first_frame=opt['train'].get('moving_first_frame', False),\n moving_factor=opt['train'].get('moving_factor', 1.0))\n\n elif degradation_type == 'BD':\n # enlarge crop size to incorporate border size\n sigma = opt['dataset']['degradation']['sigma']\n enlarged_crop_size = data_opt['crop_size'] + 2 * int(sigma * 3.0)\n\n # create dataset\n dataset = UnpairedLMDBDataset(\n data_opt,\n crop_size=enlarged_crop_size, # override\n tempo_extent=opt['train']['tempo_extent'],\n moving_first_frame=opt['train'].get('moving_first_frame', False),\n moving_factor=opt['train'].get('moving_factor', 1.0))\n\n else:\n raise ValueError('Unrecognized degradation type: {}'.format(\n degradation_type))\n\n # create data loader\n loader = DataLoader(\n dataset=dataset,\n batch_size=data_opt['batch_size'],\n shuffle=True,\n num_workers=data_opt['num_workers'],\n pin_memory=data_opt['pin_memory'])\n\n # -------------- loader for testing -------------- #\n elif dataset_idx.startswith('test'):\n # create data loader\n dataset = MyPairedFolderDataset(data_opt, scale=opt['scale'])\n loader = DataLoader(\n dataset=dataset,\n batch_size=1,\n shuffle=False,\n num_workers=data_opt['num_workers'],\n pin_memory=data_opt['pin_memory'])\n\n else:\n raise ValueError('Unrecognized dataset index: {}'.format(dataset_idx))\n\n return loader\n\n\n\ndef prepare_data(opt, data, kernel):\n \"\"\" prepare gt, lr data for training\n\n for BD degradation, generate lr data and remove border of gt data\n for BI degradation, return data directly\n\n \"\"\"\n\n device = torch.device(opt['device'])\n degradation_type = opt['dataset']['degradation']['type']\n\n if degradation_type == 'BI':\n gt_data, lr_data = data['gt'].to(device), data['lr'].to(device)\n\n elif degradation_type == 'BD':\n # setup params\n scale = opt['scale']\n sigma = opt['dataset']['degradation'].get('sigma', 1.5)\n border_size = int(sigma * 3.0)\n\n gt_with_border = data['gt'].to(device)\n n, t, c, gt_h, gt_w = gt_with_border.size()\n lr_h = (gt_h - 2 * border_size) // scale\n lr_w = (gt_w - 2 * border_size) // scale\n\n # generate lr data\n gt_with_border = gt_with_border.view(n * t, c, gt_h, gt_w)\n lr_data = F.conv2d(\n gt_with_border, kernel, stride=scale, bias=None, padding=0)\n lr_data = lr_data.view(n, t, c, lr_h, lr_w)\n\n # remove gt border\n gt_data = gt_with_border[\n ...,\n border_size: border_size + scale * lr_h,\n border_size: border_size + scale * lr_w\n ]\n gt_data = gt_data.view(n, t, c, scale * lr_h, scale * lr_w)\n\n else:\n raise ValueError('Unrecognized degradation type: {}'.format(\n degradation_type))\n\n return { 'gt': gt_data, 'lr': lr_data }\n"
] |
[
[
"torch.device",
"torch.nn.functional.conv2d",
"torch.utils.data.DataLoader"
]
] |
sebastian-sz/tf-autoaugment
|
[
"6807f5095df1b842a8a17265dc2361165f5d1658"
] |
[
"tf_autoaugment/transforms/color.py"
] |
[
"\"\"\"Code for Color transform.\"\"\"\nimport tensorflow as tf\n\nfrom tf_autoaugment.image_utils import blend_batch\nfrom tf_autoaugment.transforms.base_transform import BaseTransform\n\n\nclass Color(BaseTransform):\n \"\"\"Implements Color Transform.\"\"\"\n\n def __call__(self, images: tf.Tensor, level: tf.Tensor) -> tf.Tensor:\n \"\"\"Parse level and run color function on image batch.\"\"\"\n factor = self._parse_level(level)\n return self.color(images=images, factor=factor)\n\n @staticmethod\n def color(images: tf.Tensor, factor: tf.Tensor) -> tf.Tensor:\n \"\"\"Adjust the color balance of the image.\"\"\"\n degenerate = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(images))\n return blend_batch(degenerate, images, factor)\n\n def _parse_level(self, level: tf.Tensor) -> tf.Tensor:\n result = (level / self._MAX_LEVEL) * 1.8 + 0.1\n return tf.cast(result, tf.float32)\n"
] |
[
[
"tensorflow.image.rgb_to_grayscale",
"tensorflow.cast"
]
] |
dvapan/simplex_method
|
[
"dcc930b092dffa2e55162ea035f43d85572c8568"
] |
[
"simplex_method.py"
] |
[
"# coding=utf-8\n__author__ = 'dvapan'\n\nimport scipy as sc\nimport scipy.linalg as lin\nimport pprint\n#\n# c = sc.matrix([2.0, 3.0]).transpose()\n# A = sc.matrix([[-10.0, 5.0], [6.0, 20.0], [8.0, 15.0]])\n# b = sc.matrix([600.0, 600.0, 600.0]).transpose()\n\n\n\n# I = [2, 3, 4]\ndef transform_to_classic(A,b,c):\n count_vars = A.shape[1]\n addition_vars = A.shape[0]\n count_all_vars = count_vars + addition_vars\n _A = sc.resize(A, (A.shape[0], count_all_vars))\n _A[:, :count_vars] = A\n _A[:, count_vars:] = sc.eye(addition_vars)\n _c = sc.resize(c, (count_all_vars, 1))\n _c[count_vars:, :] = sc.zeros((addition_vars, 1))\n I = range(count_vars, count_vars+addition_vars)\n return _A, b, _c, I\n\n\n# A = sc.matrix([[1, 1, -1, 1],\n# [1, 14, 10, -10]])\n# b = sc.matrix([2, 24]).transpose()\n# c = sc.matrix([1, 2, 3, -4]).transpose()\n\ndef get_point_from_basis(A, b, I):\n B_sigma = A[:, I]\n x_sigma = lin.solve(B_sigma, b)\n x = sc.zeros(A.shape[1])\n #print x_sigma\n x[I] = x_sigma\n return x\n\n\ndef simplex_method(A, b, c, I, eps):\n count_all_vars = A.shape[1]\n q = 50\n while q > 0:\n B_sigma = A[:, I]\n c_sigma = c[I, :]\n\n x_sigma = lin.solve(B_sigma, b)\n y = lin.solve(B_sigma.transpose(), c_sigma)\n\n D = sc.matrix(A).transpose()*y - c\n non_base_I = [e for e in range(count_all_vars) if e not in I]\n\n q-=1\n finish = reduce(lambda x, y: x and y, map(lambda x: x > -eps, D[non_base_I]), True)\n\n # print I\n # print D.transpose().tolist()[0], get_point_from_basis(A, b, I)\n\n\n if finish:\n x = get_point_from_basis(A, b, I)\n return x, I, (sc.matrix(x)*sc.matrix(c))[0, 0]\n\n k = min([i for i in non_base_I if D[i] < 0])\n\n lmd_k = lin.solve(B_sigma, A[:, k])\n finish = reduce(lambda x, y: x and y, map(lambda x: x < 0, lmd_k),True)\n if finish:\n return None, None, sc.nan\n\n\n tmp = sc.array(x_sigma.transpose())[0].tolist()\n min_i = 0\n while lmd_k[min_i] <= 0:\n min_i += 1\n for i in xrange(len(lmd_k)):\n if lmd_k[i] > 0 and tmp[i]/lmd_k[i] < tmp[min_i]/lmd_k[min_i]:\n min_i = i\n s = min_i\n I[s] = k\n return None,None,None\n\n\ndef artificial_basis_method(A, b, c, eps):\n count_vars = A.shape[1]\n addition_vars = A.shape[0]\n count_all_vars = count_vars + addition_vars\n _A = sc.resize(A, (A.shape[0], count_all_vars))\n _A[:, :count_vars] = A\n _A[:, count_vars:] = sc.eye(addition_vars)\n _c = sc.resize(c, (count_all_vars, 1))\n _c[:count_vars, :] = sc.zeros((count_vars, 1))\n _c[count_vars:, :] = sc.full((addition_vars, 1), -1)\n # if I is None:\n I = range(count_vars, count_vars+addition_vars)\n # pprint.pprint((_A, b, _c ,I))\n Res = simplex_method(_A, b, _c, I, eps)\n if Res[2] < -eps:\n return None, None, None\n Real_I = [i for i in range(count_vars) if i not in Res[1]]\n\n for i in range(len(Res[1])):\n if Res[1][i] >= count_vars:\n Res[1][i] = Real_I.pop(0)\n\n return Res\n\n\ndef double_phase_simplex_method(A, b, c, eps):\n Res = artificial_basis_method(A, b, c, eps)\n # while Res[1] is not None and len(filter(lambda x: x >= A.shape[1], Res[1])) > 0:\n # print \"NEED NEXT ITER OF FIRST PHASE\"\n # Res = artificial_basis_method(A, b, c, eps, Res[1])\n if Res[1] is not None:\n return simplex_method(A, b, c, Res[1], eps)\n else:\n return None, None, None\n"
] |
[
[
"scipy.zeros",
"scipy.matrix",
"scipy.full",
"scipy.resize",
"scipy.eye",
"scipy.linalg.solve"
]
] |
tdjames1/TMA-data-extraction
|
[
"03af0ef3b61df5486f6f061e4e3b62de2e238476"
] |
[
"python/process_content.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"tma_process_content\n.. module:: TMA-data-extraction\n :synopis: Scripts and functions for extracting weather alert data\n from Tanzanian Meteorological Authority \"Five days Severe weather\n impact-based forecasts\" PDFs.\n.. moduleauthor: Tamora D. James <[email protected]>, CEMAC (UoL)\n.. description: This module was developed by CEMAC as part of the GCRF\n African Swift Project. This script processes page contents and\n metadata extracted from Tanzanian Meteorological Authority \"Five days\n Severe weather impact-based forecasts\" PDFs and produces a netCDF4\n file containing gridded weather alert data.\n :copyright: © 2020 University of Leeds.\n :license: BSD 3-clause (see LICENSE)\nExample:\n To use::\n ./tma_process_content <path/to/page2_content.txt> <path/to/metadata.csv>\n <path/to/page2_content.txt> - Path to content extracted from page 2 of TMA weather forecast PDF\n <path/to/metadata.csv> - Path to CSV containing text metadata extracted from page 2 of TMA weather forecast PDF\n.. CEMAC_cemac_generic:\n https://github.com/cemac/cemac_generic\n\"\"\"\n\nimport sys\nimport argparse\nimport os\n\nimport numpy as np\nimport numpy.linalg as LA\nimport bezier\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nfrom matplotlib.path import Path\n\nimport cartopy.crs as ccrs\nimport cartopy.feature as cfeature\nimport cartopy.io.shapereader as shpreader\n\nimport skimage.draw\nimport xarray as xr\nimport pandas as pd\n\nPDF_GS_OPS = {\n 'g': 'setgray (nonstroke)',\n 'G': 'setgray (stroke)',\n 'gs': 'setgraphicsstate',\n 'j': 'setlinejoin',\n 'M': 'setmiterlimit',\n 'rg': 'setrgbcolor (nonstroke)',\n 'RG': 'setrgbcolor (stroke)',\n 'q': 'gsave',\n 'Q': 'grestore',\n 'w': 'setlinewidth',\n 'W': 'clip',\n 'W*': 'eoclip',\n}\n\nMAP_IMG = \"../resources/TZA_map.png\"\n\n# Extent of original map image when matched to PlateCarree projection\nextent_MAP_IMG = [28.405, 41.475, -12., -0.745]\n\ndef readFile(fp):\n with open(fp) as f:\n lines = [line.rstrip() for line in f]\n return lines\n\ndef extractGraphics(lines):\n\n path_ops = {'m', 'c', 'l'}\n term_ops = {'f*', 'S', 'n'}\n col_ops = {'rg', 'RG', 'g', 'G'}\n block = []\n graphics = []\n images = []\n col = None\n # Iterate over the lines\n for line in lines:\n if line.endswith(tuple(path_ops)):\n #print(\"got path operator\")\n block.append(line)\n elif line.endswith(tuple(term_ops)):\n #print(\"got terminating path operator\")\n block.append(line)\n path = processBlock(list(block))\n if len(path['contour']):\n graphics.append({'path': path, 'colour': col})\n del block[:]\n elif line.endswith(tuple(col_ops)):\n block.append(line)\n col = processColour(line)\n elif \"Do\" in line:\n #print(\"got image operator\")\n block.append(line)\n image = processImage(list(block))\n if len(image):\n images.append(image)\n del block[:]\n else:\n block.append(line)\n\n # print(len(graphics))\n # print(len(graphics[0]['path']))\n # print(len(images))\n\n return [images, graphics]\n\ndef appendCurve(start, controls):\n nodes = np.concatenate((start, controls))\n nodes = nodes.reshape(len(nodes)//2,2).transpose()\n #print(nodes)\n curve = bezier.Curve.from_nodes(nodes)\n return curve\n\ndef getCentroid(vertices):\n #print(vertices)\n if len(vertices):\n v = np.array(vertices)\n return np.mean(v, axis = 0)\n\ndef processBlock(lines):\n #print(list(line.rstrip() for line in lines))\n path_is_open = False\n start_xy = []\n current = []\n next_xy = []\n controls = []\n vertices = []\n line_collection = []\n draw_filled_area = True\n for line in lines:\n s = line.split()\n if not len(s):\n continue\n #print(s[-1])\n op = s[-1]\n if op == \"m\":\n path_is_open = True\n if (len(s) > 3):\n s = s[len(s)-3:]\n start_xy = current = np.array(s[:-1], dtype = float)\n vertices.append(current)\n print(\"[PATH] start point:\", start_xy)\n elif op == \"c\":\n if path_is_open:\n #print(\"append bezier curve\")\n controls = np.array(s[:-1], dtype = float)\n print(\"[PATH] bezier curve, control points:\", controls)\n curve = appendCurve(current, controls)\n line_collection.append(curve)\n current = controls[-2:]\n vertices.append(current)\n else:\n print(\"[PATH] current path is not open to append bezier curve\")\n elif op == \"l\":\n if path_is_open:\n print(\"[PATH] append line segment\")\n next_xy = np.array(s[:-1], dtype = float)\n curve = appendCurve(current, next_xy)\n line_collection.append(curve)\n current = next_xy\n vertices.append(current)\n else:\n print(\"[PATH] current path is not open to append line segment\")\n elif op == \"f*\":\n print(\"[PATH] fill region\")\n path_is_open = False\n if not draw_filled_area:\n del line_collection[:]\n break\n elif op == \"S\":\n print(\"[PATH] stroke region\")\n path_is_open = False\n break\n elif op == \"n\":\n print(\"[PATH] end path without fill or stroke\")\n path_is_open = False\n del line_collection[:]\n break\n elif op == \"h\":\n print(\"[PATH] close subpath\")\n if path_is_open:\n if (current - start_xy).any():\n print(\"[PATH] append line segment to close subpath\")\n line = appendCurve(current, start_xy)\n line_collection.append(line)\n current = start_xy\n vertices.append(current)\n path_is_open = False\n else:\n print(\"[PATH] current path is not open to close path\")\n else:\n if op in PDF_GS_OPS.keys():\n print(\"[PATH] got operator: \" + op + \" = \" + PDF_GS_OPS[op])\n else:\n print(\"[PATH] unknown operator: \" + op)\n centroid = getCentroid(vertices)\n return {'contour': line_collection, 'centroid': centroid}\n\ndef processColour(line):\n col = None\n s = line.split()\n if not len(s):\n return\n op = s[-1]\n if op.lower() == \"rg\":\n print(\"[COLOUR] got set RGB colour operator\", op)\n print(s)\n if (len(s) > 4):\n s = s[len(s)-4:]\n otype = 'stroke' if op == \"RG\" else 'fill'\n col = {'type': otype,\n 'col_spec': 'rgb',\n 'val': np.array(s[:-1], dtype = float) }\n\n elif op.lower() == \"g\":\n print(\"[PATH] got set gray operator\", op)\n print(s)\n if (len(s) > 2):\n s = s[len(s)-2:]\n otype = 'stroke' if op == \"G\" else 'fill'\n col = { 'type': otype,\n 'col_spec': 'gs',\n 'val': np.array(s[:-1], dtype = float) }\n\n return col\n\ndef processImage(lines):\n #print(list(line.rstrip() for line in lines))\n img_collection = []\n rect = []\n ctm = []\n name = \"\"\n for line in lines:\n s = line.split()\n if not len(s):\n continue\n #print(s[-1])\n op = s[-1]\n if op == \"q\":\n print(\"[IMG] start image\")\n elif op == \"re\":\n rect = np.array(s[:-1], dtype = float)\n elif op == \"cm\":\n try:\n ctm = np.array(s[-7:-1], dtype = float)\n print(\"[IMG] ctm:\", ctm)\n except ValueError as e:\n print(\"Error setting CTM from\", s, \": \", e)\n elif op == \"Q\":\n if s[-2] == \"Do\":\n name = s[-3]\n img_collection.append({'name': name, 'clip': rect, 'ctm': ctm})\n elif op == \"n\":\n print(\"[IMG] end path\")\n else:\n if op in PDF_GS_OPS.keys():\n print(\"[IMG] got operator: \" + op + \" = \" + PDF_GS_OPS[op])\n else:\n print(\"[IMG] unknown operator: \" + op)\n return img_collection\n\ndef createPlot(images, contours):\n fig, ax = plt.subplots() # Create a figure containing a single axis\n n_col = len(plt.rcParams['axes.prop_cycle'])\n for i in range(len(contours)):\n for curve in contours[i]['contour']:\n _ = curve.plot(num_pts = 256, color = \"C\" + str(i%n_col), ax = ax)\n # plot centroid i\n cx, cy = contours[i]['centroid']\n plt.plot(cx, cy, \"o\")\n\n for i in range(len(images)):\n for img in images[i]:\n print(\"Image:\", img['name'])\n\n ## x y w h re\n # xy = img[1][:2]\n # wh = img[1][2:]\n # w, h = img[1][2:]\n # print(xy)\n # print(wh)\n\n ## w 0 0 h x y cm\n ctm = img['ctm'].reshape(2,3)\n scale = [img['ctm'][0], img['ctm'][3]]\n position = img['ctm'][4:]\n w, h = scale\n xy = position\n print(\"Position:\", xy)\n print(\"Size:\", w, \"x\", h)\n\n pos_check = xy[1] + h < 450\n size_check = w > 120\n\n if pos_check & size_check:\n rect = mpatches.Rectangle(tuple(xy), w, h,\n fc=\"none\", ec=\"green\")\n ax.add_patch(rect)\n arr_img = plt.imread(MAP_IMG, format='png')\n ax.imshow(arr_img, interpolation='none',\n origin='lower',\n extent=[xy[0], xy[0]+w, xy[1]+h, xy[1]],\n clip_on=True)\n\n _ = ax.set_xlim(0, 842)\n _ = ax.set_ylim(0, 595)\n #_ = ax.set_xlim(150, 650)\n #_ = ax.set_ylim(200, 400)\n _ = ax.set_aspect(1)\n plt.show()\n\ndef getMapGroups(images, graphics):\n\n map_groups = []\n for i in range(len(images)):\n for img in images[i]:\n #print(\"Image:\", img['name'])\n\n # w 0 0 h x y cm\n ctm = img['ctm'].reshape(2,3)\n w, h = [img['ctm'][0], img['ctm'][3]]\n x, y = img['ctm'][4:]\n #print(\"Position: \", x, \",\", y)\n #print(\"Size: \", w, \"x\", h)\n\n # Identify map images by location/size\n pos_check = y + h < 450\n size_check = w > 120\n\n if pos_check & size_check:\n # Get graphics within map boundary\n graphics_dict = {}\n for gfx in graphics:\n ix, iy = gfx['path']['centroid']\n #print(\"Centroid: \", ix, \",\", iy)\n x_check = (x < ix) & (ix < x + w)\n y_check = (y < iy) & (iy < y + h)\n if x_check & y_check:\n print(\"Centroid: \", ix, \",\", iy)\n if (ix, iy) not in graphics_dict.keys():\n graphics_dict[(ix, iy)] = [ gfx ]\n else:\n # Check whether colour and contour are the\n # same as previously stored graphics\n found_match = False\n nodes = np.hstack([np.hstack(c.nodes) for c in gfx['path']['contour']])\n for g in graphics_dict[(ix, iy)]:\n n = np.hstack([np.hstack(c.nodes) for c in g['path']['contour']])\n if (nodes == n).all():\n # nodes match, what about colours?\n col = gfx['colour']\n c = g['colour']\n if col['col_spec'] == c['col_spec'] and np.array_equal(getColourValue(col), getColourValue(c)):\n found_match = True\n break\n if not found_match:\n graphics_dict[(ix, iy)].append(gfx)\n print(\"Graphics with distinct centroids:\", len(graphics_dict))\n map_groups.append((img, graphics_dict))\n\n def getXPos(mg):\n return mg[0]['ctm'][4]\n\n map_groups.sort(key = getXPos)\n return map_groups\n\ndef getColourValue(col):\n if col is not None:\n return tuple(col['val'])\n\ndef transformMapGroup(map_group):\n img, graphics_dict = map_group\n\n print(\"Image:\", img['name'])\n\n # Construct current transformation matrix for image\n # a b 0\n # c d 0\n # e f 1\n m1 = np.hstack((img['ctm'].reshape(3,2), np.array([[0],[0],[1]])))\n try:\n m1_inv = LA.inv(m1)\n except LinAlgError:\n sys.exit(\"Could not invert transformation matrix\")\n\n # Create transformation matrix to map from canonical image coords\n # to extent of original map image matched to PlateCarree projection\n lon_min, lon_max, lat_min, lat_max = extent_MAP_IMG\n tm = np.array([lon_max - lon_min, 0, 0, lat_max - lat_min, lon_min, lat_min])\n m2 = np.hstack((tm.reshape(3,2), np.array([[0],[0],[1]])))\n\n # Pre-multiply transformation matrices\n m = np.matmul(m1_inv, m2)\n\n graphics_list = []\n print(\"Processing graphics:\", len(graphics_dict))\n for z, graphics in graphics_dict.items():\n print(\"Got\", len(graphics), \"graphics objects with centroid:\", z)\n\n stroke_col = None\n fill_col = None\n #breakpoint()\n for i in range(len(graphics)):\n\n col = graphics[i]['colour']\n if col is not None:\n print(\"got colour state:\", col)\n # Get stroke colour specification\n if col['type'] == \"stroke\":\n stroke_col = getColourValue(col)\n # Get fill colour specification\n if col['type'] == \"fill\":\n fill_col = getColourValue(col)\n\n contour = []\n for curve in graphics[i]['path']['contour']:\n ## Relocate curve according to new coordinate system\n nodes = curve.nodes\n nodes_new = []\n for j in range(len(nodes.T)):\n # Multiply node by combined transformation matrix m to\n # get coordinates with respect to image space and\n # transform from canonical image coords to PlateCarree\n # map projection\n v = np.matmul(np.append(nodes.T[j], 1), m)\n nodes_new.append(v[:-1])\n nodes = np.array(nodes_new).T\n curve_new = bezier.Curve.from_nodes(nodes)\n contour.append(curve_new)\n\n # Relocate centroid i\n centroid = np.matmul(np.append(graphics[i]['path']['centroid'], 1), m)[:-1]\n path = { 'colour': { 'stroke': stroke_col,\n 'fill': fill_col },\n 'contour': contour,\n 'centroid': centroid }\n graphics_list.append({ 'path': path})\n\n return (img, graphics_list)\n ## end of transformMapGroup()\n\ndef plotMapGroup(map_group, ax):\n _, graphics = map_group\n n_col = len(plt.rcParams['axes.prop_cycle'])\n print(\"Processing graphics:\", len(graphics))\n for i in range(len(graphics)):\n col = \"C\" + str(i%n_col)\n if graphics[i]['path']['colour'] is not None:\n col = graphics[i]['path']['colour']\n for curve in graphics[i]['path']['contour']:\n _ = curve.plot(num_pts = 256, color = col, ax = ax)\n # plot centroid i\n cx, cy = graphics[i]['path']['centroid']\n ax.plot(cx, cy, \"o\")\n\n ## end of plotMapGroup()\n\ndef getAlertMasks(map_group):\n _, graphics = map_group\n\n # mask will have shape defined by the image map extent divided\n # into 0.1 degree grid\n res = 0.1\n # lon_min, lon_max, lat_min, lat_max = [round(x, 1) for x in extent_MAP_IMG]\n # nx, ny = np.array([lon_max - lon_min, lat_max - lat_min])/res\n # img_shape = (round(nx), round(ny), 3)\n lon_min, lon_max, lat_min, lat_max = extent_MAP_IMG\n x = np.arange(lon_min, lon_max, res) # [round(x,1) for x in x]\n y = np.arange(lat_min, lat_max, res) # [round(y,1) for y in y]\n xx, yy = np.meshgrid(x, y)\n xy = np.vstack((xx.ravel(), yy.ravel())).T\n\n # Create transformation matrix\n tm = np.array([res, 0, 0, res, lon_min, lat_min])\n m = np.hstack((tm.reshape(3,2), np.array([[0],[0],[1]])))\n try:\n m_inv = LA.inv(m)\n except LinAlgError:\n sys.exit(\"Could not invert transformation matrix\")\n\n mask_list = []\n for i in range(len(graphics)):\n col = graphics[i]['path']['colour']\n print(col)\n if col['stroke'] is not None and col['stroke'].count(col['stroke'][0]) != 3:\n # got a contour with RGB colour\n alert_val = 0\n r, g, b = col\n if col == (0.0, 0.0, 0.0):\n print(\"colour: black\")\n elif col == (1.0, 1.0, 0.0):\n print(\"colour: yellow\")\n alert_val = 1\n elif g > 0.33 and g < 0.66:\n # (0.89, 0.424, 0.0392)\n # (0.969, 0.588, 0.275)\n print(\"colour: orange\")\n alert_val = 2\n elif g < 0.33:\n print(\"colour: red\")\n alert_val = 3\n else:\n print(\"colour: other\")\n\n #img = np.zeros(img_shape, dtype = np.double)\n img2 = np.array([alert_val]*xx.size).reshape(xx.shape)\n img = np.zeros(xx.shape, dtype = np.double)\n\n # nodes_new = []\n # for curve in graphics[i]['path']['contour']:\n # # transform curve to grid coords\n # nodes = curve.nodes\n # for i in range(len(nodes.T)):\n # # Multiply node by transformation matrix m to\n # # get grid coordinates\n # v = np.matmul(np.append(nodes.T[i], 1), m_inv)\n # nodes_new.append(v[:-1])\n # nodes = np.array([node.round() for node in nodes_new])\n # mask = skimage.draw.polygon2mask(img_shape[:-1], nodes)\n # img[mask] = col\n #mask_list.append(img)\n\n ## alternative approach\n vv = np.vstack([curve.nodes.T for curve in graphics[i]['path']['contour']])\n # construct a Path from the vertices\n pth = Path(vv, closed=False)\n\n # test which pixels fall within the path\n mask = pth.contains_points(xy)\n\n # reshape to the same size as the grid\n mask = mask.reshape(xx.shape)\n\n # create a masked array\n masked = np.ma.masked_array(img2, ~mask)\n\n # or simply set values for masked pixels\n img[mask] = alert_val\n\n # combine with coords...\n am = np.stack((xx, yy, img))\n\n mask_list.append(am)\n return mask_list\n ## end\n\ndef createGriddedData(map_groups, alert_data, file_path=None):\n # container for gridded data layers\n vars = {}\n\n # data will have shape defined by the image map extent divided\n # into 0.1 degree grid\n res = 0.1\n lon_min, lon_max, lat_min, lat_max = extent_MAP_IMG\n x = np.arange(lon_min, lon_max, res) # [round(x,1) for x in x]\n y = np.arange(lat_min, lat_max, res) # [round(y,1) for y in y]\n xx, yy = np.meshgrid(x, y)\n xy = np.vstack((xx.ravel(), yy.ravel())).T\n\n # Create transformation matrix\n tm = np.array([res, 0, 0, res, lon_min, lat_min])\n m = np.hstack((tm.reshape(3,2), np.array([[0],[0],[1]])))\n try:\n m_inv = LA.inv(m)\n except LinAlgError:\n sys.exit(\"Could not invert transformation matrix\")\n\n for i, mg in enumerate(map_groups):\n _, graphics = mg\n print(i)\n\n # count arrays added for this group\n n = 0\n\n for j, gfx in enumerate(graphics):\n colour = gfx['path']['colour']\n print(colour)\n col = None\n if colour['stroke'] is not None and len(colour['stroke']) == 3:\n col = colour['stroke']\n elif colour['fill'] is not None and len(colour['fill']) == 3:\n col = colour['fill']\n if col is not None:\n # got a contour with associated RGB colour\n print(col)\n alert_val = 0\n r, g, b = col\n if col == (0.0, 0.0, 0.0):\n print(\"colour: black\")\n elif col == (1.0, 1.0, 0.0):\n print(\"colour: yellow\")\n alert_val = 1\n elif col == (1.0, 0.0, 0.0):\n print(\"colour: red\")\n alert_val = 3\n elif g > 0.25 and g < 0.66:\n # (0.89, 0.424, 0.0392)\n # (0.969, 0.588, 0.275)\n # (0.596, 0.282, 0.0275)\n print(\"colour: orange\")\n alert_val = 2\n elif r > 0.9 and g < 0.25:\n print(\"colour: red\")\n alert_val = 3\n else:\n print(\"colour: other\")\n\n img = np.zeros(xx.shape, dtype = np.double)\n\n # get nodes for the alert area\n vv = np.vstack([curve.nodes.T for curve in gfx['path']['contour']])\n\n # construct a Path from the vertices\n pth = Path(vv, closed=False)\n\n # test which pixels fall within the path\n mask = pth.contains_points(xy)\n\n # reshape to the same size as the grid\n mask = mask.reshape(xx.shape)\n\n # set values for masked pixels\n img[mask] = alert_val\n\n da = xr.DataArray(data=img, dims=[\"lat\", \"lon\"], coords=[y, x])\n\n da.attrs = {\n 'issue_date': alert_data.loc[i,'issue_date'],\n 'alert_date': alert_data.loc[i,'date'],\n 'alert_day': alert_data.loc[i,'day'],\n 'alert_weekday': alert_data.loc[i,'weekday'],\n 'alert_id': n+1,\n 'alert_type': '',\n 'alert_text': alert_data.loc[i,'alert_text'],\n }\n\n var_name = '_'.join(['alert', 'day'+str(da.attrs['alert_day']),\n str(da.attrs['alert_id'])])\n vars[var_name] = da\n n += 1\n\n # combine data arrays into data set\n issue_date = alert_data.loc[0, 'issue_date']\n ds = xr.Dataset(data_vars=vars,\n attrs={\n 'title': 'TMA weather warnings for ' + issue_date,\n 'issue_date': issue_date,\n })\n if file_path is None:\n file_path = 'TMA_weather_warning_'+issue_date+'.nc'\n ds.to_netcdf(file_path)\n ## end\n\n# Main\ndef main():\n\n parser = argparse.ArgumentParser(description='Process TMA PDF contents')\n parser.add_argument('filepath', nargs=1, type=str)\n parser.add_argument('metadata', nargs=1, type=str)\n args = parser.parse_args()\n\n try:\n # read lines from input file\n lines = readFile(args.filepath[0])\n except:\n # IOError\n print(\"Input file not found:\", args.filepath[0])\n sys.exit(4)\n\n images, graphics = extractGraphics(lines)\n\n mgroups = getMapGroups(images, graphics)\n mgroups = [transformMapGroup(mg) for mg in mgroups]\n\n try:\n # Get associated data - one row per forecast date\n alert_data = pd.read_csv(args.metadata[0])\n except FileNotFoundError:\n print(\"Couldn't read metadata file:\", args.metadata[0])\n else:\n file_name = os.path.basename(args.metadata[0]).split(\".\")[0] + \".nc\"\n createGriddedData(mgroups, alert_data, file_name)\n\n ## end of main()\n\n# Run main\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"matplotlib.pyplot.imread",
"numpy.concatenate",
"matplotlib.pyplot.plot",
"numpy.mean",
"numpy.hstack",
"pandas.read_csv",
"numpy.arange",
"numpy.matmul",
"numpy.stack",
"numpy.zeros",
"numpy.linalg.inv",
"matplotlib.path.Path",
"numpy.append",
"numpy.meshgrid",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots",
"numpy.ma.masked_array",
"numpy.vstack"
]
] |
happyfuntimegroup/machinelearning
|
[
"105273b2cf5907b23a2ee2b4c076d89f215c38ff"
] |
[
"CODE/models/.ipynb_checkpoints/regression-checkpoint.py"
] |
[
"def simple_linear(X_train, y_train, X_val, y_val):\n from sklearn.linear_model import LinearRegression\n from sklearn.metrics import r2_score, mean_absolute_error\n\n model = LinearRegression()\n reg = model.fit(X = X_train, y = y_train)\n y_pred_val = model.predict(X_val)\n print(r2_score(y_val, y_pred_val))\n print(mean_absolute_error(y_val, y_pred_val))\n print()\n \n #return r2, mae\n \n\ndef log_reg(X_train, y_train, X_val, y_val):\n import numpy as np\n from sklearn.linear_model import LogisticRegression\n from sklearn.preprocessing import StandardScaler\n from sklearn.metrics import r2_score, mean_absolute_error\n\n scaler = StandardScaler()\n X_train_s = scaler.fit_transform(X_train)\n X_val_s = scaler.transform(X_val)\n\n y_ravel = np.ravel(y_train)\n\n model = LogisticRegression(random_state = 123, max_iter = 2000)\n reg = model.fit(X = X_train_s, y = y_ravel)\n y_pred_val = model.predict(X_val_s)\n\n print('r2:', r2_score(y_val, y_pred_val)) # 0.006551953988217396\n print(\"MAE:\", mean_absolute_error(y_val, y_pred_val)) # 34.07342328208346\n print()"
] |
[
[
"sklearn.metrics.r2_score",
"sklearn.linear_model.LogisticRegression",
"sklearn.metrics.mean_absolute_error",
"sklearn.linear_model.LinearRegression",
"numpy.ravel",
"sklearn.preprocessing.StandardScaler"
]
] |
manodeep/yymao-helpers
|
[
"4ceffd639f4a10d259146f3f94e0b2415e835f32"
] |
[
"helpers/shuffleMockCatalog.py"
] |
[
"__all__ = ['shuffleMockCatalog', 'generate_upid']\nimport warnings\nfrom itertools import izip\nimport numpy as np\nfrom numpy.lib.recfunctions import rename_fields\n\ndef _iter_plateau_in_sorted_array(a):\n if len(a):\n k = np.where(a[1:] != a[:-1])[0]\n k += 1\n i = 0\n for j in k:\n yield i, j\n i = j\n yield i, len(a)\n\ndef _iter_indices_in_bins(bins, a):\n if len(a) and len(bins):\n s = a.argsort()\n k = np.searchsorted(a, bins, 'right', sorter=s)\n i = 0\n for j in k:\n yield s[i:j]\n i = j\n yield s[i:]\n\ndef _apply_rotation(pos, box_size):\n half_box_size = box_size * 0.5\n pos[pos > half_box_size] -= box_size\n pos[pos < -half_box_size] += box_size\n return np.dot(pos, np.linalg.qr(np.random.randn(3,3))[0])\n\n_axes = list('xyz')\n\ndef _get_xyz(a, ax_type=float):\n return np.fromiter((a[ax] for ax in _axes), ax_type, 3)\n\n\ndef generate_upid(pid, id, recursive=True):\n \"\"\"\n To generate (or to fix) the upid of a halo catalog.\n\n Parameters\n ----------\n pid : array_like\n An ndarray of integer that contains the parent IDs of each halo.\n id : array_like\n An ndarray of integer that contains the halo IDs.\n recursive : bool, optional\n Whether or not to run this function recursively. Default is True.\n\n Returns\n -------\n upid : array_like\n The ultimate parent IDs. \n\n Examples\n --------\n >>> halos['upid'] = generate_upid(halos['pid'], halos['id'])\n \n \"\"\"\n pid = np.ravel(pid)\n id = np.ravel(id)\n if len(id) != len(pid):\n raise ValueError('`pid` and `id` must have the same length.')\n if not len(pid):\n raise ValueError('`pid` and `id` must not be empty.')\n s = pid.argsort()\n idx = np.fromiter(_iter_plateau_in_sorted_array(pid[s]), \\\n np.dtype([('start', int), ('stop', int)]))\n unique_pid = pid[s[idx['start']]]\n if unique_pid[0] == -1:\n unique_pid = unique_pid[1:]\n idx = idx[1:]\n host_flag = (pid == -1)\n not_found = np.where(np.in1d(unique_pid, id[host_flag], True, True))[0]\n if not len(not_found):\n return pid\n sub_flag = np.where(~host_flag)[0]\n found = sub_flag[np.in1d(id[sub_flag], unique_pid[not_found], True)]\n found = found[id[found].argsort()]\n assert (id[found] == unique_pid[not_found]).all()\n del host_flag, sub_flag, unique_pid\n pid_old = pid.copy()\n for i, j in izip(found, not_found):\n pid[s[slice(*idx[j])]] = pid_old[i]\n del pid_old, idx, s, found, not_found\n return generate_upid(pid, id, True) if recursive else pid\n\n\ndef shuffleMockCatalog(mock_ids, halo_catalog, bin_width=None, bins=None,\n proxy='mvir', box_size=None, apply_rsd=False,\n shuffle_centrals=True, shuffle_satellites=True, rotate_satellites=False,\n return_structured_array=False):\n \"\"\"\n Shuffle a mock catalog according to Zentner et al. (2014) [arXiv:1311.1818]\n\n Parameters\n ----------\n mock_ids : array_like\n Should be a 1-d array of int which contains the corresponding halo IDs\n for the galaxies in the mock catalog to be shuffled.\n halo_catalog : array_like\n Should be a 1-d structrued array which has the following fields:\n id, upid, x, y, z, vz (if `apply_rsd` it True), and the proxy.\n bin_width : float or None, optional\n The width of the bin, in dex.\n bins : int, array_like, or None, optional\n If an integer is provided, it is interpreted as the number of bins.\n If an array is provided, it is interpreted as the edges of the bins.\n The parameter _overwrites_ `bin_width`.\n proxy : string, optional\n The proxy to bin on. Must be present in the fields of `halo_catalog`.\n box_size : float or None, optional\n The side length of the box. Should be in the same unit as x, y, z.\n apply_rsd : bool, optional\n Whether or not to apply redshift space distortions on the z-axis.\n (Default is False)\n shuffle_centrals : bool, optional\n Whether or not to shuffle central galaxies (Default is True)\n shuffle_satellites : bool, optional\n Whether or not to shuffle satellite galaxies (Default is True)\n rotate_satellites : bool, optional\n Whether or not to apply a random rotation to satellite galaxies \n (Default is False)\n return_structured_array : bool, optional\n Whether to return a structured array that contains x, y, z\n or just a n-by-3 float array.\n\n Returns\n -------\n pos : array_like\n A ndarray that contains x, y, z of the shuffled positions.\n \"\"\"\n\n # check necessary fields in halo_catalog\n fields = ['id', 'upid', proxy] + _axes\n if apply_rsd:\n fields.append('vz')\n if not all((f in halo_catalog.dtype.names for f in fields)):\n raise ValueError('`halo_catalog` should have the following fields: '+ \\\n ', '.join(fields))\n\n # check dtype\n ax_type = halo_catalog['x'].dtype.type\n if any((halo_catalog[ax].dtype.type != ax_type for ax in 'yz')):\n raise ValueError('The types of fields x, y, z in `halo_catalog` ' \\\n 'must all be the same.')\n\n # check all mock_ids are in halo_catalog\n s = halo_catalog['id'].argsort()\n idx = np.searchsorted(halo_catalog['id'], mock_ids, sorter=s)\n try:\n idx = s[idx]\n except IndexError:\n raise ValueError('`mock_ids` must all present in `halo_catalog`')\n if not (halo_catalog['id'][idx] == mock_ids).all():\n raise ValueError('`mock_ids` must all present in `halo_catalog`')\n mock_idx = np.ones(len(halo_catalog), dtype=int)\n mock_idx *= -1\n mock_idx[idx] = np.arange(len(mock_ids))\n del idx\n \n # separate hosts and subs\n host_flag = (halo_catalog['upid'] == -1)\n subs = rename_fields(halo_catalog[~host_flag], {'id':'mock_idx'})\n subs['mock_idx'] = mock_idx[~host_flag]\n subs = subs[subs['mock_idx'] > -1] # only need subs that are mocks \n host_flag = s[host_flag[s]] # this sorts `hosts` by `id`\n hosts = rename_fields(halo_catalog[host_flag], {'upid':'mock_idx'})\n hosts['mock_idx'] = mock_idx[host_flag]\n del host_flag, mock_idx, s\n\n # group subhalos\n subs.sort(order='upid')\n idx = np.fromiter(_iter_plateau_in_sorted_array(subs['upid']), \\\n np.dtype([('start', int), ('stop', int)]))\n host_ids = subs['upid'][idx['start']]\n if not np.in1d(host_ids, hosts['id'], True).all():\n raise ValueError('Some subhalos associdated with the mock galaxies ' \\\n 'have no parent halos in `halo_catalog`. Consider using ' \\\n '`generate_upid` to fix this.')\n # for the following to work, `hosts` need to be sorted by `id`\n subs_idx = np.zeros(len(hosts), dtype=idx.dtype)\n subs_idx[np.in1d(hosts['id'], host_ids, True)] = idx\n del idx, host_ids\n\n # check bins\n try:\n bin_width = float(bin_width)\n except (ValueError, TypeError):\n bin_width = None\n else:\n if bin_width <= 0:\n bin_width = None\n if bin_width is None:\n bin_width = 0.1\n \n mi = np.log10(hosts[proxy].min()*0.99999)\n ma = np.log10(hosts[proxy].max())\n\n if bins is None:\n bins = int(np.ceil((ma-mi)/bin_width))\n mi = ma - bin_width*bins\n try:\n bins = int(bins)\n except (ValueError, TypeError):\n bins = np.asarray(bins)\n if len(bins) < 2 or (bins[1:]<bins[:-1]).any():\n raise ValueError('Please specify a valid `bin` parameter.')\n else:\n bins = np.logspace(mi, ma, bins+1)\n\n # create the array for storing results\n pos = np.empty((len(mock_ids), 3), ax_type)\n pos.fill(np.nan)\n\n # loop of bins of proxy (e.g. mvir)\n for i, indices in enumerate(_iter_indices_in_bins(bins, hosts[proxy])):\n if not len(indices):\n continue\n\n if i==0 or i==len(bins):\n if (hosts['mock_idx'][indices] > -1).any() or \\\n any((subs_idx['start'][j] < subs_idx['stop'][j] \\\n for j in indices)):\n warnings.warn('Some halos associdated with the mock catalog ' \\\n 'are outside the bin range.', RuntimeWarning)\n continue\n\n # shuffle satellites\n if shuffle_satellites:\n choices = indices.tolist()\n for j in indices:\n subs_this = subs[slice(*subs_idx[j])]\n if not len(subs_this):\n continue\n mock_idx_this = subs_this['mock_idx']\n pos[mock_idx_this] = subs_this[_axes].view((ax_type,3))\n if shuffle_satellites:\n k = choices.pop(np.random.randint(len(choices)))\n pos[mock_idx_this] -= _get_xyz(hosts[j], ax_type)\n if rotate_satellites:\n pos[mock_idx_this] = \\\n _apply_rotation(pos[mock_idx_this], box_size)\n pos[mock_idx_this] += _get_xyz(hosts[k], ax_type)\n if apply_rsd:\n pos[mock_idx_this,2] += (subs_this['vz'] \\\n + hosts['vz'][k] - hosts['vz'][j])/100.0\n else:\n if rotate_satellites:\n host_pos = _get_xyz(hosts[j], ax_type)\n pos[mock_idx_this] -= host_pos\n pos[mock_idx_this] = \\\n _apply_rotation(pos[mock_idx_this], box_size)\n pos[mock_idx_this] += host_pos\n if apply_rsd:\n pos[mock_idx_this,2] += subs_this['vz']/100.0\n \n # shuffle hosts\n has_mock = indices[hosts['mock_idx'][indices] > -1]\n if not len(has_mock):\n continue\n mock_idx_this = hosts['mock_idx'][has_mock]\n if shuffle_centrals:\n has_mock = np.random.choice(indices, len(has_mock), False)\n pos[mock_idx_this] = hosts[_axes][has_mock].view((ax_type,3))\n if apply_rsd:\n pos[mock_idx_this,2] += hosts['vz'][has_mock]/100.0\n\n # sanity check\n if np.isnan(pos).any():\n warnings.warn('Some galaxies in the mock catalog have not been ' \\\n 'assigned a new position. Maybe the corresponding halo is ' \\\n 'outside the bin range.', RuntimeWarning)\n\n # wrap box\n if box_size is not None:\n pos = np.remainder(pos, box_size, pos)\n\n if return_structured_array:\n pos = pos.view(np.dtype(zip(_axes, [ax_type]*3)))\n\n return pos\n\n"
] |
[
[
"numpy.logspace",
"numpy.lib.recfunctions.rename_fields",
"numpy.in1d",
"numpy.asarray",
"numpy.isnan",
"numpy.dtype",
"numpy.ceil",
"numpy.random.randn",
"numpy.remainder",
"numpy.searchsorted",
"numpy.ravel",
"numpy.fromiter",
"numpy.where"
]
] |
geneing/TensorFlowTTS
|
[
"0035ba00fec1b2b1184c8df32646d6a88b01ee5b"
] |
[
"examples/mfa_extraction/fix_mismatch.py"
] |
[
"# -*- coding: utf-8 -*-\n# Copyright 2020 TensorFlowTTS Team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Fix mismatch between sum durations and mel lengths.\"\"\"\n\nimport numpy as np\nimport os\nfrom tqdm import tqdm\nimport click\nimport logging\nimport sys\n\n\nlogging.basicConfig(\n level=logging.DEBUG,\n stream=sys.stdout,\n format=\"%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s\",\n)\n\n\[email protected]()\[email protected](\"--base_path\", default=\"dump\")\[email protected](\"--trimmed_dur_path\", default=\"dataset/trimmed-durations\")\[email protected](\"--dur_path\", default=\"dataset/durations\")\[email protected](\"--use_norm\", default=\"f\")\ndef fix(base_path: str, dur_path: str, trimmed_dur_path: str, use_norm: str):\n for t in [\"train\", \"valid\"]:\n mfa_longer = []\n mfa_shorter = []\n big_diff = []\n not_fixed = []\n pre_path = os.path.join(base_path, t)\n os.makedirs(os.path.join(pre_path, \"fix_dur\"), exist_ok=True)\n os.makedirs(os.path.join(pre_path, \"phids\"), exist_ok=True)\n \n logging.info(f\"FIXING {t} set ...\\n\")\n base = lambda s: s.replace('-ids.npy','')\n for i in tqdm(os.listdir(os.path.join(pre_path, \"ids\"))):\n if use_norm == \"t\":\n mel = np.load(\n os.path.join(\n pre_path, \"norm-feats\", f\"{base(i)}-norm-feats.npy\"\n )\n )\n else:\n mel = np.load(\n os.path.join(\n pre_path, \"raw-feats\", f\"{base(i)}-raw-feats.npy\"\n )\n )\n\n try:\n dur = np.load(\n os.path.join(trimmed_dur_path, f\"{base(i)}-durations.npy\")\n )\n except:\n dur = np.load(\n os.path.join(dur_path, f\"{base(i)}-durations.npy\")\n )\n\n ph_ids = np.load(os.path.join(dur_path, f\"{base(i)}-phids.npy\"))\n\n l_mel = len(mel)\n dur_s = np.sum(dur)\n cloned = np.array(dur, copy=True)\n diff = abs(l_mel - dur_s)\n\n if abs(l_mel - dur_s) > 30: # more then 300 ms\n big_diff.append([i, abs(l_mel - dur_s)])\n\n if dur_s > l_mel:\n for j in range(1, len(dur) - 1):\n if diff == 0:\n break\n dur_val = cloned[-j]\n\n if dur_val >= diff:\n cloned[-j] -= diff\n diff -= dur_val\n break\n else:\n cloned[-j] = 0\n diff -= dur_val\n\n if j == len(dur) - 2:\n not_fixed.append(i)\n\n mfa_longer.append(abs(l_mel - dur_s))\n elif dur_s < l_mel:\n cloned[-1] += diff\n mfa_shorter.append(abs(l_mel - dur_s))\n\n np.save(\n os.path.join(pre_path, \"fix_dur\", f\"{base(i)}-durations.npy\"),\n cloned.astype(np.int32),\n allow_pickle=False,\n )\n\n np.save(\n os.path.join(pre_path, \"phids\", f\"{base(i)}-phids.npy\"),\n ph_ids,\n allow_pickle=False,\n )\n\n logging.info(\n f\"{t} stats: number of mfa with longer duration: {len(mfa_longer)}, total diff: {sum(mfa_longer)}\"\n f\", mean diff: {sum(mfa_longer)/len(mfa_longer) if len(mfa_longer) > 0 else 0}\"\n )\n logging.info(\n f\"{t} stats: number of mfa with shorter duration: {len(mfa_shorter)}, total diff: {sum(mfa_shorter)}\"\n f\", mean diff: {sum(mfa_shorter)/len(mfa_shorter) if len(mfa_shorter) > 0 else 0}\"\n )\n logging.info(\n f\"{t} stats: number of files with a ''big'' duration diff: {len(big_diff)} if number>1 you should check it\"\n )\n logging.info(f\"{t} stats: not fixed len: {len(not_fixed)}\\n\")\n\n\nif __name__ == \"__main__\":\n fix()\n"
] |
[
[
"numpy.array",
"numpy.sum"
]
] |
MahdiDavari/pymatgen
|
[
"eb6cd95230c11ac761a96ebf82b98f71177bb71f"
] |
[
"pymatgen/core/tests/test_structure.py"
] |
[
"# coding: utf-8\n# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\nfrom __future__ import division, unicode_literals, print_function\n\nfrom pymatgen.util.testing import PymatgenTest\nfrom pymatgen.core.periodic_table import Element, Specie\nfrom pymatgen.core.composition import Composition\nfrom pymatgen.core.operations import SymmOp\nfrom pymatgen.core.structure import IStructure, Structure, IMolecule, \\\n StructureError, Molecule\nfrom pymatgen.core.lattice import Lattice\nimport random\nimport os\nimport numpy as np\n\n\nclass IStructureTest(PymatgenTest):\n\n def setUp(self):\n coords = [[0, 0, 0], [0.75, 0.5, 0.75]]\n self.lattice = Lattice([[3.8401979337, 0.00, 0.00],\n [1.9200989668, 3.3257101909, 0.00],\n [0.00, -2.2171384943, 3.1355090603]])\n self.struct = IStructure(self.lattice, [\"Si\"] * 2, coords)\n self.assertEqual(len(self.struct), 2,\n \"Wrong number of sites in structure!\")\n self.assertTrue(self.struct.is_ordered)\n self.assertTrue(self.struct.ntypesp == 1)\n coords = list()\n coords.append([0, 0, 0])\n coords.append([0., 0, 0.0000001])\n self.assertRaises(StructureError, IStructure, self.lattice,\n [\"Si\"] * 2, coords, True)\n self.propertied_structure = IStructure(\n self.lattice, [\"Si\"] * 2, coords,\n site_properties={'magmom': [5, -5]})\n\n def test_matches(self):\n ss = self.struct * 2\n self.assertTrue(ss.matches(self.struct))\n\n def test_bad_structure(self):\n coords = list()\n coords.append([0, 0, 0])\n coords.append([0.75, 0.5, 0.75])\n coords.append([0.75, 0.5, 0.75])\n self.assertRaises(StructureError, IStructure, self.lattice,\n [\"Si\"] * 3, coords, validate_proximity=True)\n #these shouldn't raise an error\n IStructure(self.lattice, [\"Si\"] * 2, coords[:2], True)\n IStructure(self.lattice, [\"Si\"], coords[:1], True)\n\n\n def test_volume_and_density(self):\n self.assertAlmostEqual(self.struct.volume, 40.04, 2, \"Volume wrong!\")\n self.assertAlmostEqual(self.struct.density, 2.33, 2,\n \"Incorrect density\")\n\n def test_specie_init(self):\n coords = list()\n coords.append([0, 0, 0])\n coords.append([0.75, 0.5, 0.75])\n s = IStructure(self.lattice, [{Specie('O', -2): 1.0},\n {Specie('Mg', 2): 0.8}], coords)\n self.assertEqual(s.composition.formula, 'Mg0.8 O1')\n\n def test_get_sorted_structure(self):\n coords = list()\n coords.append([0, 0, 0])\n coords.append([0.75, 0.5, 0.75])\n s = IStructure(self.lattice, [\"O\", \"Li\"], coords,\n site_properties={'charge': [-2, 1]})\n sorted_s = s.get_sorted_structure()\n self.assertEqual(sorted_s[0].species_and_occu, Composition(\"Li\"))\n self.assertEqual(sorted_s[1].species_and_occu, Composition(\"O\"))\n self.assertEqual(sorted_s[0].charge, 1)\n self.assertEqual(sorted_s[1].charge, -2)\n s = IStructure(self.lattice, [\"Se\", \"C\", \"Se\", \"C\"],\n [[0] * 3, [0.5] * 3, [0.25] * 3, [0.75] * 3])\n self.assertEqual([site.specie.symbol\n for site in s.get_sorted_structure()],\n [\"C\", \"C\", \"Se\", \"Se\"])\n\n def test_get_space_group_data(self):\n self.assertEqual(self.struct.get_space_group_info(), ('Fd-3m', 227))\n\n def test_fractional_occupations(self):\n coords = list()\n coords.append([0, 0, 0])\n coords.append([0.75, 0.5, 0.75])\n s = IStructure(self.lattice, [{'O': 1.0}, {'Mg': 0.8}],\n coords)\n self.assertEqual(s.composition.formula, 'Mg0.8 O1')\n self.assertFalse(s.is_ordered)\n\n def test_get_distance(self):\n self.assertAlmostEqual(self.struct.get_distance(0, 1), 2.35, 2,\n \"Distance calculated wrongly!\")\n pt = [0.9, 0.9, 0.8]\n self.assertAlmostEqual(self.struct[0].distance_from_point(pt),\n 1.50332963784, 2,\n \"Distance calculated wrongly!\")\n\n def test_as_dict(self):\n si = Specie(\"Si\", 4)\n mn = Element(\"Mn\")\n coords = list()\n coords.append([0, 0, 0])\n coords.append([0.75, 0.5, 0.75])\n struct = IStructure(self.lattice, [{si: 0.5, mn: 0.5}, {si: 0.5}],\n coords)\n self.assertIn(\"lattice\", struct.as_dict())\n self.assertIn(\"sites\", struct.as_dict())\n d = self.propertied_structure.as_dict()\n self.assertEqual(d['sites'][0]['properties']['magmom'], 5)\n coords = list()\n coords.append([0, 0, 0])\n coords.append([0.75, 0.5, 0.75])\n s = IStructure(self.lattice, [{Specie('O', -2,\n properties={\"spin\": 3}): 1.0},\n {Specie('Mg', 2,\n properties={\"spin\": 2}): 0.8}],\n coords, site_properties={'magmom': [5, -5]})\n d = s.as_dict()\n self.assertEqual(d['sites'][0]['properties']['magmom'], 5)\n self.assertEqual(d['sites'][0]['species'][0]['properties']['spin'], 3)\n\n d = s.as_dict(0)\n self.assertNotIn(\"volume\", d['lattice'])\n self.assertNotIn(\"xyz\", d['sites'][0])\n\n def test_from_dict(self):\n\n d = self.propertied_structure.as_dict()\n s = IStructure.from_dict(d)\n self.assertEqual(s[0].magmom, 5)\n d = self.propertied_structure.as_dict(0)\n s2 = IStructure.from_dict(d)\n self.assertEqual(s, s2)\n\n d = {'lattice': {'a': 3.8401979337, 'volume': 40.044794644251596,\n 'c': 3.8401979337177736, 'b': 3.840198994344244,\n 'matrix': [[3.8401979337, 0.0, 0.0],\n [1.9200989668, 3.3257101909, 0.0],\n [0.0, -2.2171384943, 3.1355090603]],\n 'alpha': 119.9999908639842, 'beta': 90.0,\n 'gamma': 60.000009137322195},\n 'sites': [{'properties': {'magmom': 5}, 'abc': [0.0, 0.0, 0.0],\n 'occu': 1.0, 'species': [{'occu': 1.0,\n 'oxidation_state': -2,\n 'properties': {'spin': 3},\n 'element': 'O'}],\n 'label': 'O2-', 'xyz': [0.0, 0.0, 0.0]},\n {'properties': {'magmom': -5},\n 'abc': [0.75, 0.5, 0.75],\n 'occu': 0.8, 'species': [{'occu': 0.8,\n 'oxidation_state': 2,\n 'properties': {'spin': 2},\n 'element': 'Mg'}],\n 'label': 'Mg2+:0.800',\n 'xyz': [3.8401979336749994, 1.2247250003039056e-06,\n 2.351631795225]}]}\n s = IStructure.from_dict(d)\n self.assertEqual(s[0].magmom, 5)\n self.assertEqual(s[0].specie.spin, 3)\n self.assertEqual(type(s), IStructure)\n\n def test_site_properties(self):\n site_props = self.propertied_structure.site_properties\n self.assertEqual(site_props['magmom'], [5, -5])\n self.assertEqual(self.propertied_structure[0].magmom, 5)\n self.assertEqual(self.propertied_structure[1].magmom, -5)\n\n def test_copy(self):\n new_struct = self.propertied_structure.copy(site_properties={'charge':\n [2, 3]})\n self.assertEqual(new_struct[0].magmom, 5)\n self.assertEqual(new_struct[1].magmom, -5)\n self.assertEqual(new_struct[0].charge, 2)\n self.assertEqual(new_struct[1].charge, 3)\n\n coords = list()\n coords.append([0, 0, 0])\n coords.append([0., 0, 0.0000001])\n\n structure = IStructure(self.lattice, [\"O\", \"Si\"], coords,\n site_properties={'magmom': [5, -5]})\n\n new_struct = structure.copy(site_properties={'charge': [2, 3]},\n sanitize=True)\n self.assertEqual(new_struct[0].magmom, -5)\n self.assertEqual(new_struct[1].magmom, 5)\n self.assertEqual(new_struct[0].charge, 3)\n self.assertEqual(new_struct[1].charge, 2)\n self.assertAlmostEqual(new_struct.volume, structure.volume)\n\n def test_interpolate(self):\n coords = list()\n coords.append([0, 0, 0])\n coords.append([0.75, 0.5, 0.75])\n struct = IStructure(self.lattice, [\"Si\"] * 2, coords)\n coords2 = list()\n coords2.append([0, 0, 0])\n coords2.append([0.5, 0.5, 0.5])\n struct2 = IStructure(self.struct.lattice, [\"Si\"] * 2, coords2)\n int_s = struct.interpolate(struct2, 10)\n for s in int_s:\n self.assertIsNotNone(s, \"Interpolation Failed!\")\n self.assertEqual(int_s[0].lattice, s.lattice)\n self.assertArrayEqual(int_s[1][1].frac_coords, [0.725, 0.5, 0.725])\n\n badlattice = [[1, 0.00, 0.00], [0, 1, 0.00], [0.00, 0, 1]]\n struct2 = IStructure(badlattice, [\"Si\"] * 2, coords2)\n self.assertRaises(ValueError, struct.interpolate, struct2)\n\n coords2 = list()\n coords2.append([0, 0, 0])\n coords2.append([0.5, 0.5, 0.5])\n struct2 = IStructure(self.struct.lattice, [\"Si\", \"Fe\"], coords2)\n self.assertRaises(ValueError, struct.interpolate, struct2)\n\n # Test autosort feature.\n s1 = Structure.from_spacegroup(\"Fm-3m\", Lattice.cubic(3),\n [\"Fe\"], [[0, 0, 0]])\n s1.pop(0)\n s2 = Structure.from_spacegroup(\"Fm-3m\", Lattice.cubic(3),\n [\"Fe\"], [[0, 0, 0]])\n s2.pop(2)\n random.shuffle(s2)\n\n for s in s1.interpolate(s2, autosort_tol=0.5):\n self.assertArrayAlmostEqual(s1[0].frac_coords, s[0].frac_coords)\n self.assertArrayAlmostEqual(s1[2].frac_coords, s[2].frac_coords)\n\n # Make sure autosort has no effect on simpler interpolations,\n # and with shuffled sites.\n s1 = Structure.from_spacegroup(\"Fm-3m\", Lattice.cubic(3),\n [\"Fe\"], [[0, 0, 0]])\n s2 = Structure.from_spacegroup(\"Fm-3m\", Lattice.cubic(3),\n [\"Fe\"], [[0, 0, 0]])\n s2[0] = \"Fe\", [0.01, 0.01, 0.01]\n random.shuffle(s2)\n\n for s in s1.interpolate(s2, autosort_tol=0.5):\n self.assertArrayAlmostEqual(s1[1].frac_coords, s[1].frac_coords)\n self.assertArrayAlmostEqual(s1[2].frac_coords, s[2].frac_coords)\n self.assertArrayAlmostEqual(s1[3].frac_coords, s[3].frac_coords)\n\n def test_interpolate_lattice(self):\n coords = list()\n coords.append([0, 0, 0])\n coords.append([0.75, 0.5, 0.75])\n struct = IStructure(self.lattice, [\"Si\"] * 2, coords)\n coords2 = list()\n coords2.append([0, 0, 0])\n coords2.append([0.5, 0.5, 0.5])\n l2 = Lattice.from_lengths_and_angles([3,4,4], [100,100,70])\n struct2 = IStructure(l2, [\"Si\"] * 2, coords2)\n int_s = struct.interpolate(struct2, 2, interpolate_lattices=True)\n self.assertArrayAlmostEqual(struct.lattice.abc,\n int_s[0].lattice.abc)\n self.assertArrayAlmostEqual(struct.lattice.angles,\n int_s[0].lattice.angles)\n self.assertArrayAlmostEqual(struct2.lattice.abc,\n int_s[2].lattice.abc)\n self.assertArrayAlmostEqual(struct2.lattice.angles,\n int_s[2].lattice.angles)\n int_angles = [110.3976469, 94.5359731, 64.5165856]\n self.assertArrayAlmostEqual(int_angles,\n int_s[1].lattice.angles)\n\n # Assert that volume is monotonic\n self.assertTrue(struct2.lattice.volume >= int_s[1].lattice.volume)\n self.assertTrue(int_s[1].lattice.volume >= struct.lattice.volume)\n\n def test_interpolate_lattice_rotation(self):\n l1 = Lattice([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n l2 = Lattice([[-1.01, 0, 0], [0, -1.01, 0], [0, 0, 1]])\n coords = [[0, 0, 0], [0.75, 0.5, 0.75]]\n struct1 = IStructure(l1, [\"Si\"] * 2, coords)\n struct2 = IStructure(l2, [\"Si\"] * 2, coords)\n int_s = struct1.interpolate(struct2, 2, interpolate_lattices=True)\n\n # Assert that volume is monotonic\n self.assertTrue(struct2.lattice.volume >= int_s[1].lattice.volume)\n self.assertTrue(int_s[1].lattice.volume >= struct1.lattice.volume)\n\n def test_get_primitive_structure(self):\n coords = [[0, 0, 0], [0.5, 0.5, 0], [0, 0.5, 0.5], [0.5, 0, 0.5]]\n fcc_ag = IStructure(Lattice.cubic(4.09), [\"Ag\"] * 4, coords)\n self.assertEqual(len(fcc_ag.get_primitive_structure()), 1)\n coords = [[0, 0, 0], [0.5, 0.5, 0.5]]\n bcc_li = IStructure(Lattice.cubic(4.09), [\"Li\"] * 2, coords)\n bcc_prim = bcc_li.get_primitive_structure()\n self.assertEqual(len(bcc_prim), 1)\n self.assertAlmostEqual(bcc_prim.lattice.alpha, 109.47122, 3)\n\n coords = [[0] * 3, [0.5] * 3, [0.25] * 3, [0.26] * 3]\n s = IStructure(Lattice.cubic(4.09), [\"Ag\"] * 4, coords)\n self.assertEqual(len(s.get_primitive_structure()), 4)\n\n def test_primitive_cell_site_merging(self):\n l = Lattice.cubic(10)\n coords = [[0, 0, 0], [0, 0, 0.5],\n [0, 0, 0.26], [0, 0, 0.74]]\n sp = ['Ag', 'Ag', 'Be', 'Be']\n s = Structure(l, sp, coords)\n dm = s.get_primitive_structure().distance_matrix\n self.assertArrayAlmostEqual(dm, [[0, 2.5], [2.5, 0]])\n\n def test_primitive_on_large_supercell(self):\n coords = [[0, 0, 0], [0.5, 0.5, 0], [0, 0.5, 0.5], [0.5, 0, 0.5]]\n fcc_ag = Structure(Lattice.cubic(4.09), [\"Ag\"] * 4, coords)\n fcc_ag.make_supercell([2, 2, 2])\n fcc_ag_prim = fcc_ag.get_primitive_structure()\n self.assertEqual(len(fcc_ag_prim), 1)\n self.assertAlmostEqual(fcc_ag_prim.volume, 17.10448225)\n\n def test_primitive_positions(self):\n coords = [[0, 0, 0], [0.3, 0.35, 0.45]]\n s = Structure(Lattice.from_parameters(1,2,3,50,66,88), [\"Ag\"] * 2, coords)\n\n a = [[-1,2,-3], [3,2,-4], [1,0,-1]]\n b = [[4, 0, 0], [1, 1, 0], [3, 0, 1]]\n c = [[2, 0, 0], [1, 3, 0], [1, 1, 1]]\n\n for sc_matrix in [c]:\n sc = s.copy()\n sc.make_supercell(sc_matrix)\n prim = sc.get_primitive_structure(0.01)\n\n self.assertEqual(len(prim), 2)\n self.assertAlmostEqual(prim.distance_matrix[0,1], 1.0203432356739286)\n\n def test_primitive_structure_volume_check(self):\n l = Lattice.tetragonal(10, 30)\n coords = [[0.5, 0.8, 0], [0.5, 0.2, 0],\n [0.5, 0.8, 0.333], [0.5, 0.5, 0.333],\n [0.5, 0.5, 0.666], [0.5, 0.2, 0.666]]\n s = IStructure(l, [\"Ag\"] * 6, coords)\n sprim = s.get_primitive_structure(tolerance=0.1)\n self.assertEqual(len(sprim), 6)\n\n def test_get_all_neighbors_and_get_neighbors(self):\n s = self.struct\n nn = s.get_neighbors_in_shell(s[0].frac_coords, 2, 4,\n include_index=True)\n self.assertEqual(len(nn), 47)\n self.assertEqual(nn[0][-1], 0)\n\n r = random.uniform(3, 6)\n all_nn = s.get_all_neighbors(r, True)\n for i in range(len(s)):\n self.assertEqual(len(all_nn[i]), len(s.get_neighbors(s[i], r)))\n\n for site, nns in zip(s, all_nn):\n for nn in nns:\n self.assertTrue(nn[0].is_periodic_image(s[nn[2]]))\n d = sum((site.coords - nn[0].coords) ** 2) ** 0.5\n self.assertAlmostEqual(d, nn[1])\n\n s = Structure(Lattice.cubic(1), ['Li'], [[0,0,0]])\n s.make_supercell([2,2,2])\n self.assertEqual(sum(map(len, s.get_all_neighbors(3))), 976)\n\n\n def test_get_all_neighbors_outside_cell(self):\n s = Structure(Lattice.cubic(2), ['Li', 'Li', 'Li', 'Si'],\n [[3.1] * 3, [0.11] * 3, [-1.91] * 3, [0.5] * 3])\n all_nn = s.get_all_neighbors(0.2, True)\n for site, nns in zip(s, all_nn):\n for nn in nns:\n self.assertTrue(nn[0].is_periodic_image(s[nn[2]]))\n d = sum((site.coords - nn[0].coords) ** 2) ** 0.5\n self.assertAlmostEqual(d, nn[1])\n self.assertEqual(list(map(len, all_nn)), [2, 2, 2, 0])\n\n def test_get_dist_matrix(self):\n ans = [[0., 2.3516318],\n [2.3516318, 0.]]\n self.assertArrayAlmostEqual(self.struct.distance_matrix, ans)\n\n def test_to_from_file_string(self):\n for fmt in [\"cif\", \"json\", \"poscar\", \"cssr\"]:\n s = self.struct.to(fmt=fmt)\n self.assertIsNotNone(s)\n ss = IStructure.from_str(s, fmt=fmt)\n self.assertArrayAlmostEqual(\n ss.lattice.lengths_and_angles,\n self.struct.lattice.lengths_and_angles, decimal=5)\n self.assertArrayAlmostEqual(ss.frac_coords, self.struct.frac_coords)\n self.assertIsInstance(ss, IStructure)\n\n self.struct.to(filename=\"POSCAR.testing\")\n self.assertTrue(os.path.exists(\"POSCAR.testing\"))\n os.remove(\"POSCAR.testing\")\n\n self.struct.to(filename=\"Si_testing.yaml\")\n self.assertTrue(os.path.exists(\"Si_testing.yaml\"))\n s = Structure.from_file(\"Si_testing.yaml\")\n self.assertEqual(s, self.struct)\n os.remove(\"Si_testing.yaml\")\n\n self.struct.to(filename=\"POSCAR.testing.gz\")\n s = Structure.from_file(\"POSCAR.testing.gz\")\n self.assertEqual(s, self.struct)\n os.remove(\"POSCAR.testing.gz\")\n\n\nclass StructureTest(PymatgenTest):\n\n def setUp(self):\n coords = list()\n coords.append([0, 0, 0])\n coords.append([0.75, 0.5, 0.75])\n lattice = Lattice([[3.8401979337, 0.00, 0.00],\n [1.9200989668, 3.3257101909, 0.00],\n [0.00, -2.2171384943, 3.1355090603]])\n self.structure = Structure(lattice, [\"Si\", \"Si\"], coords)\n\n def test_mutable_sequence_methods(self):\n s = self.structure\n s[0] = \"Fe\"\n self.assertEqual(s.formula, \"Fe1 Si1\")\n s[0] = \"Fe\", [0.5, 0.5, 0.5]\n self.assertEqual(s.formula, \"Fe1 Si1\")\n self.assertArrayAlmostEqual(s[0].frac_coords, [0.5, 0.5, 0.5])\n s.reverse()\n self.assertEqual(s[0].specie, Element(\"Si\"))\n self.assertArrayAlmostEqual(s[0].frac_coords, [0.75, 0.5, 0.75])\n s[0] = {\"Mn\": 0.5}\n self.assertEqual(s.formula, \"Mn0.5 Fe1\")\n del s[1]\n self.assertEqual(s.formula, \"Mn0.5\")\n s[0] = \"Fe\", [0.9, 0.9, 0.9], {\"magmom\": 5}\n self.assertEqual(s.formula, \"Fe1\")\n self.assertEqual(s[0].magmom, 5)\n\n def test_non_hash(self):\n self.assertRaises(TypeError, dict, [(self.structure, 1)])\n\n def test_sort(self):\n s = self.structure\n s[0] = \"F\"\n s.sort()\n self.assertEqual(s[0].species_string, \"Si\")\n self.assertEqual(s[1].species_string, \"F\")\n s.sort(key=lambda site: site.species_string)\n self.assertEqual(s[0].species_string, \"F\")\n self.assertEqual(s[1].species_string, \"Si\")\n s.sort(key=lambda site: site.species_string, reverse=True)\n self.assertEqual(s[0].species_string, \"Si\")\n self.assertEqual(s[1].species_string, \"F\")\n\n def test_append_insert_remove_replace(self):\n s = self.structure\n s.insert(1, \"O\", [0.5, 0.5, 0.5])\n self.assertEqual(s.formula, \"Si2 O1\")\n self.assertTrue(s.ntypesp == 2)\n self.assertTrue(s.symbol_set == (\"Si\", \"O\"))\n self.assertTrue(s.indices_from_symbol(\"Si\") == (0,2))\n self.assertTrue(s.indices_from_symbol(\"O\") == (1,))\n del s[2]\n self.assertEqual(s.formula, \"Si1 O1\")\n self.assertTrue(s.indices_from_symbol(\"Si\") == (0,))\n self.assertTrue(s.indices_from_symbol(\"O\") == (1,))\n s.append(\"N\", [0.25, 0.25, 0.25])\n self.assertEqual(s.formula, \"Si1 N1 O1\")\n self.assertTrue(s.ntypesp == 3)\n self.assertTrue(s.symbol_set == (\"Si\", \"O\", \"N\"))\n self.assertTrue(s.indices_from_symbol(\"Si\") == (0,))\n self.assertTrue(s.indices_from_symbol(\"O\") == (1,))\n self.assertTrue(s.indices_from_symbol(\"N\") == (2,))\n s[0] = \"Ge\"\n self.assertEqual(s.formula, \"Ge1 N1 O1\")\n self.assertTrue(s.symbol_set == (\"Ge\", \"O\", \"N\"))\n s.replace_species({\"Ge\": \"Si\"})\n self.assertEqual(s.formula, \"Si1 N1 O1\")\n self.assertTrue(s.ntypesp == 3)\n\n s.replace_species({\"Si\": {\"Ge\": 0.5, \"Si\": 0.5}})\n self.assertEqual(s.formula, \"Si0.5 Ge0.5 N1 O1\")\n #this should change the .5Si .5Ge sites to .75Si .25Ge\n s.replace_species({\"Ge\": {\"Ge\": 0.5, \"Si\": 0.5}})\n self.assertEqual(s.formula, \"Si0.75 Ge0.25 N1 O1\")\n\n # In this case, s.ntypesp is ambiguous.\n # for the time being, we raise AttributeError.\n with self.assertRaises(AttributeError):\n s.ntypesp\n\n s.remove_species([\"Si\"])\n self.assertEqual(s.formula, \"Ge0.25 N1 O1\")\n\n s.remove_sites([1, 2])\n self.assertEqual(s.formula, \"Ge0.25\")\n\n def test_add_site_property(self):\n s = self.structure\n s.add_site_property(\"charge\", [4.1, -5])\n self.assertEqual(s[0].charge, 4.1)\n self.assertEqual(s[1].charge, -5)\n s.add_site_property(\"magmom\", [3, 2])\n self.assertEqual(s[0].charge, 4.1)\n self.assertEqual(s[0].magmom, 3)\n\n def test_propertied_structure(self):\n #Make sure that site properties are set to None for missing values.\n s = self.structure\n s.add_site_property(\"charge\", [4.1, -5])\n s.append(\"Li\", [0.3, 0.3 ,0.3])\n self.assertEqual(len(s.site_properties[\"charge\"]), 3)\n\n def test_perturb(self):\n d = 0.1\n pre_perturbation_sites = self.structure.sites[:]\n self.structure.perturb(distance=d)\n post_perturbation_sites = self.structure.sites\n\n for i, x in enumerate(pre_perturbation_sites):\n self.assertAlmostEqual(x.distance(post_perturbation_sites[i]), d,\n 3, \"Bad perturbation distance\")\n\n def test_add_oxidation_states(self):\n oxidation_states = {\"Si\": -4}\n self.structure.add_oxidation_state_by_element(oxidation_states)\n for site in self.structure:\n for k in site.species_and_occu.keys():\n self.assertEqual(k.oxi_state, oxidation_states[k.symbol],\n \"Wrong oxidation state assigned!\")\n oxidation_states = {\"Fe\": 2}\n self.assertRaises(ValueError,\n self.structure.add_oxidation_state_by_element,\n oxidation_states)\n self.structure.add_oxidation_state_by_site([2, -4])\n self.assertEqual(self.structure[0].specie.oxi_state, 2)\n self.assertRaises(ValueError,\n self.structure.add_oxidation_state_by_site,\n [1])\n\n def test_remove_oxidation_states(self):\n co_elem = Element(\"Co\")\n o_elem = Element(\"O\")\n co_specie = Specie(\"Co\", 2)\n o_specie = Specie(\"O\", -2)\n coords = list()\n coords.append([0, 0, 0])\n coords.append([0.75, 0.5, 0.75])\n lattice = Lattice.cubic(10)\n s_elem = Structure(lattice, [co_elem, o_elem], coords)\n s_specie = Structure(lattice, [co_specie, o_specie], coords)\n s_specie.remove_oxidation_states()\n self.assertEqual(s_elem, s_specie, \"Oxidation state remover \"\n \"failed\")\n\n def test_apply_operation(self):\n op = SymmOp.from_axis_angle_and_translation([0, 0, 1], 90)\n s = self.structure.copy()\n s.apply_operation(op)\n self.assertArrayAlmostEqual(\n s.lattice.matrix,\n [[0.000000, 3.840198, 0.000000],\n [-3.325710, 1.920099, 0.000000],\n [2.217138, -0.000000, 3.135509]], 5)\n\n op = SymmOp([[1, 1, 0, 0.5], [1, 0, 0, 0.5], [0, 0, 1, 0.5],\n [0, 0, 0, 1]])\n s = self.structure.copy()\n s.apply_operation(op, fractional=True)\n self.assertArrayAlmostEqual(\n s.lattice.matrix,\n [[5.760297, 3.325710, 0.000000],\n [3.840198, 0.000000, 0.000000],\n [0.000000, -2.217138, 3.135509]], 5)\n\n def test_apply_strain(self):\n s = self.structure\n initial_coord = s[1].coords\n s.apply_strain(0.01)\n self.assertAlmostEqual(\n s.lattice.abc,\n (3.8785999130369997, 3.878600984287687, 3.8785999130549516))\n self.assertArrayAlmostEqual(s[1].coords, initial_coord * 1.01)\n a1, b1, c1 = s.lattice.abc\n s.apply_strain([0.1, 0.2, 0.3])\n a2, b2, c2 = s.lattice.abc\n self.assertAlmostEqual(a2 / a1, 1.1)\n self.assertAlmostEqual(b2 / b1, 1.2)\n self.assertAlmostEqual(c2 / c1, 1.3)\n\n def test_scale_lattice(self):\n initial_coord = self.structure[1].coords\n self.structure.scale_lattice(self.structure.volume * 1.01 ** 3)\n self.assertArrayAlmostEqual(\n self.structure.lattice.abc,\n (3.8785999130369997, 3.878600984287687, 3.8785999130549516))\n self.assertArrayAlmostEqual(self.structure[1].coords,\n initial_coord * 1.01)\n\n def test_translate_sites(self):\n self.structure.translate_sites([0, 1], [0.5, 0.5, 0.5],\n frac_coords=True)\n self.assertArrayEqual(self.structure.frac_coords[0],\n [0.5, 0.5, 0.5])\n\n self.structure.translate_sites([0], [0.5, 0.5, 0.5],\n frac_coords=False)\n self.assertArrayAlmostEqual(self.structure.cart_coords[0],\n [3.38014845, 1.05428585, 2.06775453])\n\n self.structure.translate_sites([0], [0.5, 0.5, 0.5],\n frac_coords=True, to_unit_cell=False)\n self.assertArrayAlmostEqual(self.structure.frac_coords[0],\n [1.00187517, 1.25665291, 1.15946374])\n\n def test_mul(self):\n self.structure *= [2, 1, 1]\n self.assertEqual(self.structure.formula, \"Si4\")\n s = [2, 1, 1] * self.structure\n self.assertEqual(s.formula, \"Si8\")\n self.assertIsInstance(s, Structure)\n s = self.structure * [[1, 0, 0], [2, 1, 0], [0, 0, 2]]\n self.assertEqual(s.formula, \"Si8\")\n self.assertArrayAlmostEqual(s.lattice.abc,\n [7.6803959, 17.5979979, 7.6803959])\n\n def test_make_supercell(self):\n self.structure.make_supercell([2, 1, 1])\n self.assertEqual(self.structure.formula, \"Si4\")\n self.structure.make_supercell([[1, 0, 0], [2, 1, 0], [0, 0, 1]])\n self.assertEqual(self.structure.formula, \"Si4\")\n self.structure.make_supercell(2)\n self.assertEqual(self.structure.formula, \"Si32\")\n self.assertArrayAlmostEqual(self.structure.lattice.abc,\n [15.360792, 35.195996, 7.680396], 5)\n\n def test_disordered_supercell_primitive_cell(self):\n l = Lattice.cubic(2)\n f = [[0.5, 0.5, 0.5]]\n sp = [{'Si': 0.54738}]\n s = Structure(l, sp, f)\n #this supercell often breaks things\n s.make_supercell([[0,-1,1],[-1,1,0],[1,1,1]])\n self.assertEqual(len(s.get_primitive_structure()), 1)\n\n def test_another_supercell(self):\n #this is included b/c for some reason the old algo was failing on it\n s = self.structure.copy()\n s.make_supercell([[0, 2, 2], [2, 0, 2], [2, 2, 0]])\n self.assertEqual(s.formula, \"Si32\")\n s = self.structure.copy()\n s.make_supercell([[0, 2, 0], [1, 0, 0], [0, 0, 1]])\n self.assertEqual(s.formula, \"Si4\")\n\n def test_to_from_dict(self):\n d = self.structure.as_dict()\n s2 = Structure.from_dict(d)\n self.assertEqual(type(s2), Structure)\n\n def test_to_from_file_string(self):\n for fmt in [\"cif\", \"json\", \"poscar\", \"cssr\", \"yaml\", \"xsf\"]:\n s = self.structure.to(fmt=fmt)\n self.assertIsNotNone(s)\n ss = Structure.from_str(s, fmt=fmt)\n self.assertArrayAlmostEqual(\n ss.lattice.lengths_and_angles,\n self.structure.lattice.lengths_and_angles, decimal=5)\n self.assertArrayAlmostEqual(ss.frac_coords,\n self.structure.frac_coords)\n self.assertIsInstance(ss, Structure)\n\n self.structure.to(filename=\"POSCAR.testing\")\n self.assertTrue(os.path.exists(\"POSCAR.testing\"))\n os.remove(\"POSCAR.testing\")\n\n self.structure.to(filename=\"structure_testing.json\")\n self.assertTrue(os.path.exists(\"structure_testing.json\"))\n s = Structure.from_file(\"structure_testing.json\")\n self.assertEqual(s, self.structure)\n os.remove(\"structure_testing.json\")\n\n def test_from_spacegroup(self):\n s1 = Structure.from_spacegroup(\"Fm-3m\", Lattice.cubic(3), [\"Li\", \"O\"],\n [[0.25, 0.25, 0.25], [0, 0, 0]])\n self.assertEqual(s1.formula, \"Li8 O4\")\n s2 = Structure.from_spacegroup(225, Lattice.cubic(3), [\"Li\", \"O\"],\n [[0.25, 0.25, 0.25], [0, 0, 0]])\n self.assertEqual(s1, s2)\n\n s2 = Structure.from_spacegroup(225, Lattice.cubic(3), [\"Li\", \"O\"],\n [[0.25, 0.25, 0.25], [0, 0, 0]],\n site_properties={\"charge\": [1, -2]})\n self.assertEqual(sum(s2.site_properties[\"charge\"]), 0)\n\n s = Structure.from_spacegroup(\"Pm-3m\", Lattice.cubic(3), [\"Cs\", \"Cl\"],\n [[0, 0, 0], [0.5, 0.5, 0.5]])\n self.assertEqual(s.formula, \"Cs1 Cl1\")\n\n self.assertRaises(ValueError, Structure.from_spacegroup,\n \"Pm-3m\", Lattice.tetragonal(1, 3), [\"Cs\", \"Cl\"],\n [[0, 0, 0], [0.5, 0.5, 0.5]])\n\n self.assertRaises(ValueError, Structure.from_spacegroup,\n \"Pm-3m\", Lattice.cubic(3), [\"Cs\"],\n [[0, 0, 0], [0.5, 0.5, 0.5]])\n\n def test_merge_sites(self):\n species = [{'Ag': 0.5}, {'Cl': 0.25}, {'Cl': 0.1},\n {'Ag': 0.5}, {'F': 0.15}, {'F': 0.1}]\n coords = [[0, 0, 0], [0.5, 0.5, 0.5], [0.5, 0.5, 0.5],\n [0, 0, 0], [0.5, 0.5, 1.501], [0.5, 0.5, 1.501]]\n s = Structure(Lattice.cubic(1), species, coords)\n s.merge_sites(mode=\"s\")\n self.assertEqual(s[0].specie.symbol, 'Ag')\n self.assertEqual(s[1].species_and_occu,\n Composition({'Cl': 0.35, 'F': 0.25}))\n self.assertArrayAlmostEqual(s[1].frac_coords, [.5, .5, .5005])\n\n # Test for TaS2 with spacegroup 166 in 160 setting.\n l = Lattice.from_lengths_and_angles([3.374351, 3.374351, 20.308941],\n [90.000000, 90.000000, 120.000000])\n species = [\"Ta\", \"S\", \"S\"]\n coords = [[0.000000, 0.000000, 0.944333], [0.333333, 0.666667, 0.353424],\n [0.666667, 0.333333, 0.535243]]\n tas2 = Structure.from_spacegroup(160, l, species, coords)\n assert len(tas2) == 13\n tas2.merge_sites(mode=\"d\")\n assert len(tas2) == 9\n\n l = Lattice.from_lengths_and_angles([3.587776, 3.587776, 19.622793],\n [90.000000, 90.000000, 120.000000])\n species = [\"Na\", \"V\", \"S\", \"S\"]\n coords = [[0.333333, 0.666667, 0.165000], [0.000000, 0.000000, 0.998333],\n [0.333333, 0.666667, 0.399394], [0.666667, 0.333333, 0.597273]]\n navs2 = Structure.from_spacegroup(160, l, species, coords)\n assert len(navs2) == 18\n navs2.merge_sites(mode=\"d\")\n assert len(navs2) == 12\n\n def test_properties(self):\n self.assertEqual(self.structure.num_sites, len(self.structure))\n self.structure.make_supercell(2)\n self.structure[1] = \"C\"\n sites = list(self.structure.group_by_types())\n self.assertEqual(sites[-1].specie.symbol, \"C\")\n self.structure.add_oxidation_state_by_element({\"Si\": 4, \"C\": 2})\n self.assertEqual(self.structure.charge, 62)\n\n def test_set_item(self):\n s = self.structure.copy()\n s[0] = \"C\"\n self.assertEqual(s.formula, \"Si1 C1\")\n s[(0, 1)] = \"Ge\"\n self.assertEqual(s.formula, \"Ge2\")\n s[0:2] = \"Sn\"\n self.assertEqual(s.formula, \"Sn2\")\n\n s = self.structure.copy()\n s[\"Si\"] = \"C\"\n self.assertEqual(s.formula, \"C2\")\n s[\"C\"] = \"C0.25Si0.5\"\n self.assertEqual(s.formula, \"Si1 C0.5\")\n s[\"C\"] = \"C0.25Si0.5\"\n self.assertEqual(s.formula, \"Si1.25 C0.125\")\n\n def test_init_error(self):\n self.assertRaises(StructureError, Structure, Lattice.cubic(3), [\"Si\"], [[0, 0, 0], [0.5, 0.5, 0.5]])\n\n def test_from_sites(self):\n self.structure.add_site_property(\"hello\", [1, 2])\n s = Structure.from_sites(self.structure, to_unit_cell=True)\n self.assertEqual(s.site_properties[\"hello\"][1], 2)\n\n def test_magic(self):\n s = Structure.from_sites(self.structure)\n self.assertEqual(s, self.structure)\n self.assertNotEqual(s, None)\n s.apply_strain(0.5)\n self.assertNotEqual(s, self.structure)\n self.assertNotEqual(self.structure * 2, self.structure)\n\n\nclass IMoleculeTest(PymatgenTest):\n\n def setUp(self):\n coords = [[0.000000, 0.000000, 0.000000],\n [0.000000, 0.000000, 1.089000],\n [1.026719, 0.000000, -0.363000],\n [-0.513360, -0.889165, -0.363000],\n [-0.513360, 0.889165, -0.363000]]\n self.coords = coords\n self.mol = Molecule([\"C\", \"H\", \"H\", \"H\", \"H\"], coords)\n\n def test_set_item(self):\n s = self.mol.copy()\n s[0] = \"Si\"\n self.assertEqual(s.formula, \"Si1 H4\")\n s[(0, 1)] = \"Ge\"\n self.assertEqual(s.formula, \"Ge2 H3\")\n s[0:2] = \"Sn\"\n self.assertEqual(s.formula, \"Sn2 H3\")\n\n s = self.mol.copy()\n s[\"H\"] = \"F\"\n self.assertEqual(s.formula, \"C1 F4\")\n s[\"C\"] = \"C0.25Si0.5\"\n self.assertEqual(s.formula, \"Si0.5 C0.25 F4\")\n s[\"C\"] = \"C0.25Si0.5\"\n self.assertEqual(s.formula, \"Si0.625 C0.0625 F4\")\n\n def test_bad_molecule(self):\n coords = [[0.000000, 0.000000, 0.000000],\n [0.000000, 0.000000, 1.089000],\n [1.026719, 0.000000, -0.363000],\n [-0.513360, -0.889165, -0.363000],\n [-0.513360, 0.889165, -0.363000],\n [-0.513360, 0.889165, -0.36301]]\n self.assertRaises(StructureError, Molecule,\n [\"C\", \"H\", \"H\", \"H\", \"H\", \"H\"], coords,\n validate_proximity=True)\n\n def test_get_angle_dihedral(self):\n self.assertAlmostEqual(self.mol.get_angle(1, 0, 2), 109.47122144618737)\n self.assertAlmostEqual(self.mol.get_angle(3, 1, 2), 60.00001388659683)\n self.assertAlmostEqual(self.mol.get_dihedral(0, 1, 2, 3),\n - 35.26438851071765)\n\n coords = list()\n coords.append([0, 0, 0])\n coords.append([0, 0, 1])\n coords.append([0, 1, 1])\n coords.append([1, 1, 1])\n self.mol2 = Molecule([\"C\", \"O\", \"N\", \"S\"], coords)\n self.assertAlmostEqual(self.mol2.get_dihedral(0, 1, 2, 3), -90)\n\n def test_get_covalent_bonds(self):\n self.assertEqual(len(self.mol.get_covalent_bonds()), 4)\n\n def test_properties(self):\n self.assertEqual(len(self.mol), 5)\n self.assertTrue(self.mol.is_ordered)\n self.assertEqual(self.mol.formula, \"H4 C1\")\n\n def test_repr_str(self):\n ans = \"\"\"Full Formula (H4 C1)\nReduced Formula: H4C\nCharge = 0, Spin Mult = 1\nSites (5)\n0 C 0.000000 0.000000 0.000000\n1 H 0.000000 0.000000 1.089000\n2 H 1.026719 0.000000 -0.363000\n3 H -0.513360 -0.889165 -0.363000\n4 H -0.513360 0.889165 -0.363000\"\"\"\n self.assertEqual(self.mol.__str__(), ans)\n ans = \"\"\"Molecule Summary\nSite: C (0.0000, 0.0000, 0.0000)\nSite: H (0.0000, 0.0000, 1.0890)\nSite: H (1.0267, 0.0000, -0.3630)\nSite: H (-0.5134, -0.8892, -0.3630)\nSite: H (-0.5134, 0.8892, -0.3630)\"\"\"\n self.assertEqual(repr(self.mol), ans)\n\n def test_site_properties(self):\n propertied_mol = Molecule([\"C\", \"H\", \"H\", \"H\", \"H\"], self.coords,\n site_properties={'magmom':\n [0.5, -0.5, 1, 2, 3]})\n self.assertEqual(propertied_mol[0].magmom, 0.5)\n self.assertEqual(propertied_mol[1].magmom, -0.5)\n\n def test_get_boxed_structure(self):\n s = self.mol.get_boxed_structure(9, 9, 9)\n # C atom should be in center of box.\n self.assertArrayAlmostEqual(s[4].frac_coords,\n [0.50000001, 0.5, 0.5])\n self.assertArrayAlmostEqual(s[1].frac_coords,\n [0.6140799, 0.5, 0.45966667])\n self.assertRaises(ValueError, self.mol.get_boxed_structure, 1, 1, 1)\n s2 = self.mol.get_boxed_structure(5, 5, 5, (2, 3, 4))\n self.assertEqual(len(s2), 24 * 5)\n self.assertEqual(s2.lattice.abc, (10, 15, 20))\n\n # Test offset option\n s3 = self.mol.get_boxed_structure(9, 9, 9, offset=[0.5,0.5,0.5])\n self.assertArrayAlmostEqual(s3[4].coords,\n [5,5,5])\n # Test no_cross option\n self.assertRaises(ValueError, self.mol.get_boxed_structure,\n 5, 5, 5, offset=[10,10,10],no_cross = True)\n\n def test_get_distance(self):\n self.assertAlmostEqual(self.mol.get_distance(0, 1), 1.089)\n\n def test_get_neighbors(self):\n nn = self.mol.get_neighbors(self.mol[0], 1)\n self.assertEqual(len(nn), 0)\n nn = self.mol.get_neighbors(self.mol[0], 2)\n self.assertEqual(len(nn), 4)\n\n def test_get_neighbors_in_shell(self):\n nn = self.mol.get_neighbors_in_shell([0, 0, 0], 0, 1)\n self.assertEqual(len(nn), 1)\n nn = self.mol.get_neighbors_in_shell([0, 0, 0], 1, 0.9)\n self.assertEqual(len(nn), 4)\n nn = self.mol.get_neighbors_in_shell([0, 0, 0], 1, 0.9)\n self.assertEqual(len(nn), 4)\n nn = self.mol.get_neighbors_in_shell([0, 0, 0], 2, 0.1)\n self.assertEqual(len(nn), 0)\n\n def test_get_dist_matrix(self):\n ans = [[0.0, 1.089, 1.08899995636, 1.08900040717, 1.08900040717],\n [1.089, 0.0, 1.77832952654, 1.7783298026, 1.7783298026],\n [1.08899995636, 1.77832952654, 0.0, 1.77833003783,\n 1.77833003783],\n [1.08900040717, 1.7783298026, 1.77833003783, 0.0, 1.77833],\n [1.08900040717, 1.7783298026, 1.77833003783, 1.77833, 0.0]]\n self.assertArrayAlmostEqual(self.mol.distance_matrix, ans)\n\n def test_break_bond(self):\n (mol1, mol2) = self.mol.break_bond(0, 1)\n self.assertEqual(mol1.formula, \"H3 C1\")\n self.assertEqual(mol2.formula, \"H1\")\n\n def test_prop(self):\n self.assertEqual(self.mol.charge, 0)\n self.assertEqual(self.mol.spin_multiplicity, 1)\n self.assertEqual(self.mol.nelectrons, 10)\n self.assertArrayAlmostEqual(self.mol.center_of_mass, [0, 0, 0])\n self.assertRaises(ValueError, Molecule, [\"C\", \"H\", \"H\", \"H\", \"H\"],\n self.coords, charge=1, spin_multiplicity=1)\n mol = Molecule([\"C\", \"H\", \"H\", \"H\", \"H\"], self.coords, charge=1)\n self.assertEqual(mol.spin_multiplicity, 2)\n self.assertEqual(mol.nelectrons, 9)\n\n #Triplet O2\n mol = IMolecule([\"O\"] * 2, [[0, 0, 0], [0, 0, 1.2]],\n spin_multiplicity=3)\n self.assertEqual(mol.spin_multiplicity, 3)\n\n def test_equal(self):\n mol = IMolecule([\"C\", \"H\", \"H\", \"H\", \"H\"], self.coords, charge=1)\n self.assertNotEqual(mol, self.mol)\n\n def test_get_centered_molecule(self):\n mol = IMolecule([\"O\"] * 2, [[0, 0, 0], [0, 0, 1.2]],\n spin_multiplicity=3)\n centered = mol.get_centered_molecule()\n self.assertArrayAlmostEqual(centered.center_of_mass, [0, 0, 0])\n\n def test_to_from_dict(self):\n d = self.mol.as_dict()\n mol2 = IMolecule.from_dict(d)\n self.assertEqual(type(mol2), IMolecule)\n propertied_mol = Molecule([\"C\", \"H\", \"H\", \"H\", \"H\"], self.coords,\n charge=1,\n site_properties={'magmom':\n [0.5, -0.5, 1, 2, 3]})\n d = propertied_mol.as_dict()\n self.assertEqual(d['sites'][0]['properties']['magmom'], 0.5)\n mol = Molecule.from_dict(d)\n self.assertEqual(propertied_mol, mol)\n self.assertEqual(mol[0].magmom, 0.5)\n self.assertEqual(mol.formula, \"H4 C1\")\n self.assertEqual(mol.charge, 1)\n\n def test_to_from_file_string(self):\n for fmt in [\"xyz\", \"json\", \"g03\", \"yaml\"]:\n s = self.mol.to(fmt=fmt)\n self.assertIsNotNone(s)\n m = IMolecule.from_str(s, fmt=fmt)\n self.assertEqual(m, self.mol)\n self.assertIsInstance(m, IMolecule)\n\n self.mol.to(filename=\"CH4_testing.xyz\")\n self.assertTrue(os.path.exists(\"CH4_testing.xyz\"))\n os.remove(\"CH4_testing.xyz\")\n self.mol.to(filename=\"CH4_testing.yaml\")\n self.assertTrue(os.path.exists(\"CH4_testing.yaml\"))\n mol = Molecule.from_file(\"CH4_testing.yaml\")\n self.assertEqual(self.mol, mol)\n os.remove(\"CH4_testing.yaml\")\n\n\nclass MoleculeTest(PymatgenTest):\n\n def setUp(self):\n coords = [[0.000000, 0.000000, 0.000000],\n [0.000000, 0.000000, 1.089000],\n [1.026719, 0.000000, -0.363000],\n [-0.513360, -0.889165, -0.363000],\n [-0.513360, 0.889165, -0.363000]]\n self.mol = Molecule([\"C\", \"H\", \"H\", \"H\", \"H\"], coords)\n\n def test_mutable_sequence_methods(self):\n s = self.mol\n s[1] = (\"F\", [0.5, 0.5, 0.5])\n self.assertEqual(s.formula, \"H3 C1 F1\")\n self.assertArrayAlmostEqual(s[1].coords, [0.5, 0.5, 0.5])\n s.reverse()\n self.assertEqual(s[0].specie, Element(\"H\"))\n self.assertArrayAlmostEqual(s[0].coords,\n [-0.513360, 0.889165, -0.363000])\n del s[1]\n self.assertEqual(s.formula, \"H2 C1 F1\")\n s[3] = \"N\", [0,0,0], {\"charge\": 4}\n self.assertEqual(s.formula, \"H2 N1 F1\")\n self.assertEqual(s[3].charge, 4)\n\n def test_insert_remove_append(self):\n mol = self.mol\n mol.insert(1, \"O\", [0.5, 0.5, 0.5])\n self.assertEqual(mol.formula, \"H4 C1 O1\")\n del mol[2]\n self.assertEqual(mol.formula, \"H3 C1 O1\")\n mol.set_charge_and_spin(0)\n self.assertEqual(mol.spin_multiplicity, 2)\n mol.append(\"N\", [1, 1, 1])\n self.assertEqual(mol.formula, \"H3 C1 N1 O1\")\n self.assertRaises(TypeError, dict, [(mol, 1)])\n mol.remove_sites([0, 1])\n self.assertEqual(mol.formula, \"H3 N1\")\n\n def test_translate_sites(self):\n self.mol.translate_sites([0, 1], [0.5, 0.5, 0.5])\n self.assertArrayEqual(self.mol.cart_coords[0],\n [0.5, 0.5, 0.5])\n\n def test_rotate_sites(self):\n self.mol.rotate_sites(theta=np.radians(30))\n self.assertArrayAlmostEqual(self.mol.cart_coords[2],\n [ 0.889164737, 0.513359500, -0.363000000])\n\n def test_replace(self):\n self.mol[0] = \"Ge\"\n self.assertEqual(self.mol.formula, \"Ge1 H4\")\n\n self.mol.replace_species({Element(\"Ge\"): {Element(\"Ge\"): 0.5,\n Element(\"Si\"): 0.5}})\n self.assertEqual(self.mol.formula, \"Si0.5 Ge0.5 H4\")\n\n #this should change the .5Si .5Ge sites to .75Si .25Ge\n self.mol.replace_species({Element(\"Ge\"): {Element(\"Ge\"): 0.5,\n Element(\"Si\"): 0.5}})\n self.assertEqual(self.mol.formula, \"Si0.75 Ge0.25 H4\")\n\n d = 0.1\n pre_perturbation_sites = self.mol.sites[:]\n self.mol.perturb(distance=d)\n post_perturbation_sites = self.mol.sites\n\n for i, x in enumerate(pre_perturbation_sites):\n self.assertAlmostEqual(x.distance(post_perturbation_sites[i]), d,\n 3, \"Bad perturbation distance\")\n\n def test_add_site_property(self):\n self.mol.add_site_property(\"charge\", [4.1, -2, -2, -2, -2])\n self.assertEqual(self.mol[0].charge, 4.1)\n self.assertEqual(self.mol[1].charge, -2)\n\n self.mol.add_site_property(\"magmom\", [3, 2, 2, 2, 2])\n self.assertEqual(self.mol[0].charge, 4.1)\n self.assertEqual(self.mol[0].magmom, 3)\n\n def test_to_from_dict(self):\n d = self.mol.as_dict()\n mol2 = Molecule.from_dict(d)\n self.assertEqual(type(mol2), Molecule)\n\n def test_apply_operation(self):\n op = SymmOp.from_axis_angle_and_translation([0, 0, 1], 90)\n self.mol.apply_operation(op)\n self.assertArrayAlmostEqual(self.mol[2].coords,\n [0.000000, 1.026719, -0.363000])\n\n def test_substitute(self):\n coords = [[0.000000, 0.000000, 1.08],\n [0.000000, 0.000000, 0.000000],\n [1.026719, 0.000000, -0.363000],\n [-0.513360, -0.889165, -0.363000],\n [-0.513360, 0.889165, -0.363000]]\n sub = Molecule([\"X\", \"C\", \"H\", \"H\", \"H\"], coords)\n self.mol.substitute(1, sub)\n self.assertAlmostEqual(self.mol.get_distance(0, 4), 1.54)\n f = Molecule([\"X\", \"F\"], [[0, 0, 0], [0, 0, 1.11]])\n self.mol.substitute(2, f)\n self.assertAlmostEqual(self.mol.get_distance(0, 7), 1.35)\n oh = Molecule([\"X\", \"O\", \"H\"],\n [[0, 0.780362, -.456316], [0, 0, .114079],\n [0, -.780362, -.456316]])\n self.mol.substitute(1, oh)\n self.assertAlmostEqual(self.mol.get_distance(0, 7), 1.43)\n self.mol.substitute(3, \"methyl\")\n self.assertEqual(self.mol.formula, \"H7 C3 O1 F1\")\n coords = [[0.00000, 1.40272, 0.00000],\n [0.00000, 2.49029, 0.00000],\n [-1.21479, 0.70136, 0.00000],\n [-2.15666, 1.24515, 0.00000],\n [-1.21479, -0.70136, 0.00000],\n [-2.15666, -1.24515, 0.00000],\n [0.00000, -1.40272, 0.00000],\n [0.00000, -2.49029, 0.00000],\n [1.21479, -0.70136, 0.00000],\n [2.15666, -1.24515, 0.00000],\n [1.21479, 0.70136, 0.00000],\n [2.15666, 1.24515, 0.00000]]\n benzene = Molecule([\"C\", \"H\", \"C\", \"H\", \"C\", \"H\", \"C\", \"H\", \"C\", \"H\",\n \"C\", \"H\"], coords)\n benzene.substitute(1, sub)\n self.assertEqual(benzene.formula, \"H8 C7\")\n #Carbon attached should be in plane.\n self.assertAlmostEqual(benzene[11].coords[2], 0)\n\n def test_to_from_file_string(self):\n for fmt in [\"xyz\", \"json\", \"g03\"]:\n s = self.mol.to(fmt=fmt)\n self.assertIsNotNone(s)\n m = Molecule.from_str(s, fmt=fmt)\n self.assertEqual(m, self.mol)\n self.assertIsInstance(m, Molecule)\n\n self.mol.to(filename=\"CH4_testing.xyz\")\n self.assertTrue(os.path.exists(\"CH4_testing.xyz\"))\n os.remove(\"CH4_testing.xyz\")\n\n\nif __name__ == '__main__':\n import unittest2 as unittest\n unittest.main()\n"
] |
[
[
"numpy.radians"
]
] |
kacel33/ActionAI_PC
|
[
"a0528f49ea61cc07d7c1e9a3cd6846e5f50cfae7"
] |
[
"lib/utils/paf_to_pose.py"
] |
[
"import cv2\nimport numpy as np\nimport time\nfrom scipy.ndimage.filters import gaussian_filter, maximum_filter\n\nfrom scipy.ndimage.morphology import generate_binary_structure\nfrom lib.pafprocess import pafprocess\n\nfrom lib.utils.common import Human, BodyPart, CocoPart, CocoColors, CocoPairsRender\n\n# Heatmap indices to find each limb (joint connection). Eg: limb_type=1 is\n# Neck->LShoulder, so joint_to_limb_heatmap_relationship[1] represents the\n# indices of heatmaps to look for joints: neck=1, LShoulder=5\n\njoint_to_limb_heatmap_relationship = [[1, 2], [2, 3], [3, 4], [1, 5], [5, 6], [6, 7], [1, 0]]\n\n# PAF indices containing the x and y coordinates of the PAF for a given limb.\n# Eg: limb_type=1 is Neck->LShoulder, so\n# PAFneckLShoulder_x=paf_xy_coords_per_limb[1][0] and\n# PAFneckLShoulder_y=paf_xy_coords_per_limb[1][1]\npaf_xy_coords_per_limb = np.arange(14).reshape(7, 2)\nNUM_LIMBS = len(joint_to_limb_heatmap_relationship)\n\n\ndef find_peaks(param, img):\n \"\"\"\n Given a (grayscale) image, find local maxima whose value is above a given\n threshold (param['thre1'])\n :param img: Input image (2d array) where we want to find peaks\n :return: 2d np.array containing the [x,y] coordinates of each peak found\n in the image\n \"\"\"\n\n peaks_binary = (maximum_filter(img, footprint=generate_binary_structure(\n 2, 1)) == img) * (img > param)\n # Note reverse ([::-1]): we return [[x y], [x y]...] instead of [[y x], [y\n # x]...]\n return np.array(np.nonzero(peaks_binary)[::-1]).T\n\n\ndef compute_resized_coords(coords, resizeFactor):\n \"\"\"\n Given the index/coordinates of a cell in some input array (e.g. image),\n provides the new coordinates if that array was resized by making it\n resizeFactor times bigger.\n E.g.: image of size 3x3 is resized to 6x6 (resizeFactor=2), we'd like to\n know the new coordinates of cell [1,2] -> Function would return [2.5,4.5]\n :param coords: Coordinates (indices) of a cell in some input array\n :param resizeFactor: Resize coefficient = shape_dest/shape_source. E.g.:\n resizeFactor=2 means the destination array is twice as big as the\n original one\n :return: Coordinates in an array of size\n shape_dest=resizeFactor*shape_source, expressing the array indices of the\n closest point to 'coords' if an image of size shape_source was resized to\n shape_dest\n \"\"\"\n\n # 1) Add 0.5 to coords to get coordinates of center of the pixel (e.g.\n # index [0,0] represents the pixel at location [0.5,0.5])\n # 2) Transform those coordinates to shape_dest, by multiplying by resizeFactor\n # 3) That number represents the location of the pixel center in the new array,\n # so subtract 0.5 to get coordinates of the array index/indices (revert\n # step 1)\n return (np.array(coords, dtype=float) + 0.5) * resizeFactor - 0.5\n\n\ndef NMS(heatmaps, upsampFactor=1., bool_refine_center=True, bool_gaussian_filt=False, config=None):\n \"\"\"\n NonMaximaSuppression: find peaks (local maxima) in a set of grayscale images\n :param heatmaps: set of grayscale images on which to find local maxima (3d np.array,\n with dimensions image_height x image_width x num_heatmaps)\n :param upsampFactor: Size ratio between CPM heatmap output and the input image size.\n Eg: upsampFactor=16 if original image was 480x640 and heatmaps are 30x40xN\n :param bool_refine_center: Flag indicating whether:\n - False: Simply return the low-res peak found upscaled by upsampFactor (subject to grid-snap)\n - True: (Recommended, very accurate) Upsample a small patch around each low-res peak and\n fine-tune the location of the peak at the resolution of the original input image\n :param bool_gaussian_filt: Flag indicating whether to apply a 1d-GaussianFilter (smoothing)\n to each upsampled patch before fine-tuning the location of each peak.\n :return: a NUM_JOINTS x 4 np.array where each row represents a joint type (0=nose, 1=neck...)\n and the columns indicate the {x,y} position, the score (probability) and a unique id (counter)\n \"\"\"\n # MODIFIED BY CARLOS: Instead of upsampling the heatmaps to heatmap_avg and\n # then performing NMS to find peaks, this step can be sped up by ~25-50x by:\n # (9-10ms [with GaussFilt] or 5-6ms [without GaussFilt] vs 250-280ms on RoG\n # 1. Perform NMS at (low-res) CPM's output resolution\n # 1.1. Find peaks using scipy.ndimage.filters.maximum_filter\n # 2. Once a peak is found, take a patch of 5x5 centered around the peak, upsample it, and\n # fine-tune the position of the actual maximum.\n # '-> That's equivalent to having found the peak on heatmap_avg, but much faster because we only\n # upsample and scan the 5x5 patch instead of the full (e.g.) 480x640\n\n joint_list_per_joint_type = []\n cnt_total_joints = 0\n\n # For every peak found, win_size specifies how many pixels in each\n # direction from the peak we take to obtain the patch that will be\n # upsampled. Eg: win_size=1 -> patch is 3x3; win_size=2 -> 5x5\n # (for BICUBIC interpolation to be accurate, win_size needs to be >=2!)\n win_size = 2\n\n for joint in range(config.MODEL.NUM_KEYPOINTS):\n map_orig = heatmaps[:, :, joint]\n peak_coords = find_peaks(config.TEST.THRESH_HEATMAP, map_orig)\n peaks = np.zeros((len(peak_coords), 4))\n for i, peak in enumerate(peak_coords):\n if bool_refine_center:\n x_min, y_min = np.maximum(0, peak - win_size)\n x_max, y_max = np.minimum(\n np.array(map_orig.T.shape) - 1, peak + win_size)\n\n # Take a small patch around each peak and only upsample that\n # tiny region\n patch = map_orig[y_min:y_max + 1, x_min:x_max + 1]\n map_upsamp = cv2.resize(\n patch, None, fx=upsampFactor, fy=upsampFactor, interpolation=cv2.INTER_CUBIC)\n\n # Gaussian filtering takes an average of 0.8ms/peak (and there might be\n # more than one peak per joint!) -> For now, skip it (it's\n # accurate enough)\n map_upsamp = gaussian_filter(\n map_upsamp, sigma=3) if bool_gaussian_filt else map_upsamp\n\n # Obtain the coordinates of the maximum value in the patch\n location_of_max = np.unravel_index(\n map_upsamp.argmax(), map_upsamp.shape)\n # Remember that peaks indicates [x,y] -> need to reverse it for\n # [y,x]\n location_of_patch_center = compute_resized_coords(\n peak[::-1] - [y_min, x_min], upsampFactor)\n # Calculate the offset wrt to the patch center where the actual\n # maximum is\n refined_center = (location_of_max - location_of_patch_center)\n peak_score = map_upsamp[location_of_max]\n else:\n refined_center = [0, 0]\n # Flip peak coordinates since they are [x,y] instead of [y,x]\n peak_score = map_orig[tuple(peak[::-1])]\n peaks[i, :] = tuple(\n x for x in compute_resized_coords(peak_coords[i], upsampFactor) + refined_center[::-1]) + (\n peak_score, cnt_total_joints)\n cnt_total_joints += 1\n joint_list_per_joint_type.append(peaks)\n\n return joint_list_per_joint_type\n\n\ndef find_connected_joints(paf_upsamp, joint_list_per_joint_type, num_intermed_pts=10, config=None):\n \"\"\"\n For every type of limb (eg: forearm, shin, etc.), look for every potential\n pair of joints (eg: every wrist-elbow combination) and evaluate the PAFs to\n determine which pairs are indeed body limbs.\n :param paf_upsamp: PAFs upsampled to the original input image resolution\n :param joint_list_per_joint_type: See 'return' doc of NMS()\n :param num_intermed_pts: Int indicating how many intermediate points to take\n between joint_src and joint_dst, at which the PAFs will be evaluated\n :return: List of NUM_LIMBS rows. For every limb_type (a row) we store\n a list of all limbs of that type found (eg: all the right forearms).\n For each limb (each item in connected_limbs[limb_type]), we store 5 cells:\n # {joint_src_id,joint_dst_id}: a unique number associated with each joint,\n # limb_score_penalizing_long_dist: a score of how good a connection\n of the joints is, penalized if the limb length is too long\n # {joint_src_index,joint_dst_index}: the index of the joint within\n all the joints of that type found (eg: the 3rd right elbow found)\n \"\"\"\n connected_limbs = []\n\n # Auxiliary array to access paf_upsamp quickly\n limb_intermed_coords = np.empty((4, num_intermed_pts), dtype=np.intp)\n for limb_type in range(NUM_LIMBS):\n # List of all joints of type A found, where A is specified by limb_type\n # (eg: a right forearm starts in a right elbow)\n joints_src = joint_list_per_joint_type[joint_to_limb_heatmap_relationship[limb_type][0]]\n # List of all joints of type B found, where B is specified by limb_type\n # (eg: a right forearm ends in a right wrist)\n joints_dst = joint_list_per_joint_type[joint_to_limb_heatmap_relationship[limb_type][1]]\n # print(joint_to_limb_heatmap_relationship[limb_type][0])\n # print(joint_to_limb_heatmap_relationship[limb_type][1])\n # print(paf_xy_coords_per_limb[limb_type][0])\n # print(paf_xy_coords_per_limb[limb_type][1])\n if len(joints_src) == 0 or len(joints_dst) == 0:\n # No limbs of this type found (eg: no right forearms found because\n # we didn't find any right wrists or right elbows)\n connected_limbs.append([])\n else:\n connection_candidates = []\n # Specify the paf index that contains the x-coord of the paf for\n # this limb\n limb_intermed_coords[2, :] = paf_xy_coords_per_limb[limb_type][0]\n # And the y-coord paf index\n limb_intermed_coords[3, :] = paf_xy_coords_per_limb[limb_type][1]\n for i, joint_src in enumerate(joints_src):\n # Try every possible joints_src[i]-joints_dst[j] pair and see\n # if it's a feasible limb\n for j, joint_dst in enumerate(joints_dst):\n # Subtract the position of both joints to obtain the\n # direction of the potential limb\n limb_dir = joint_dst[:2] - joint_src[:2]\n # Compute the distance/length of the potential limb (norm\n # of limb_dir)\n limb_dist = np.sqrt(np.sum(limb_dir ** 2)) + 1e-8\n limb_dir = limb_dir / limb_dist # Normalize limb_dir to be a unit vector\n\n # Linearly distribute num_intermed_pts points from the x\n # coordinate of joint_src to the x coordinate of joint_dst\n limb_intermed_coords[1, :] = np.round(np.linspace(\n joint_src[0], joint_dst[0], num=num_intermed_pts))\n limb_intermed_coords[0, :] = np.round(np.linspace(\n joint_src[1], joint_dst[1], num=num_intermed_pts)) # Same for the y coordinate\n intermed_paf = paf_upsamp[limb_intermed_coords[0, :],\n limb_intermed_coords[1, :], limb_intermed_coords[2:4, :]].T\n\n score_intermed_pts = intermed_paf.dot(limb_dir)\n score_penalizing_long_dist = score_intermed_pts.mean(\n ) + min(0.5 * paf_upsamp.shape[0] / limb_dist - 1, 0)\n # Criterion 1: At least 80% of the intermediate points have\n # a score higher than thre2\n criterion1 = (np.count_nonzero(\n score_intermed_pts > config.TEST.THRESH_PAF) > 0.8 * num_intermed_pts)\n # Criterion 2: Mean score, penalized for large limb\n # distances (larger than half the image height), is\n # positive\n criterion2 = (score_penalizing_long_dist > 0)\n if criterion1 and criterion2:\n # Last value is the combined paf(+limb_dist) + heatmap\n # scores of both joints\n connection_candidates.append(\n [i, j, score_penalizing_long_dist,\n score_penalizing_long_dist + joint_src[2] + joint_dst[2]])\n\n # Sort connection candidates based on their\n # score_penalizing_long_dist\n connection_candidates = sorted(\n connection_candidates, key=lambda x: x[2], reverse=True)\n connections = np.empty((0, 5))\n # There can only be as many limbs as the smallest number of source\n # or destination joints (eg: only 2 forearms if there's 5 wrists\n # but 2 elbows)\n max_connections = min(len(joints_src), len(joints_dst))\n # Traverse all potential joint connections (sorted by their score)\n for potential_connection in connection_candidates:\n i, j, s = potential_connection[0:3]\n # Make sure joints_src[i] or joints_dst[j] haven't already been\n # connected to other joints_dst or joints_src\n if i not in connections[:, 3] and j not in connections[:, 4]:\n # [joint_src_id, joint_dst_id, limb_score_penalizing_long_dist, joint_src_index, joint_dst_index]\n connections = np.vstack(\n [connections, [joints_src[i][3], joints_dst[j][3], s, i, j]])\n # Exit if we've already established max_connections\n # connections (each joint can't be connected to more than\n # one joint)\n if len(connections) >= max_connections:\n break\n connected_limbs.append(connections)\n\n return connected_limbs\n\n\ndef group_limbs_of_same_person(connected_limbs, joint_list, config):\n \"\"\"\n Associate limbs belonging to the same person together.\n :param connected_limbs: See 'return' doc of find_connected_joints()\n :param joint_list: unravel'd version of joint_list_per_joint [See 'return' doc of NMS()]\n :return: 2d np.array of size num_people x (NUM_JOINTS+2). For each person found:\n # First NUM_JOINTS columns contain the index (in joint_list) of the joints associated\n with that person (or -1 if their i-th joint wasn't found)\n # 2nd-to-last column: Overall score of the joints+limbs that belong to this person\n # Last column: Total count of joints found for this person\n \"\"\"\n person_to_joint_assoc = []\n\n for limb_type in range(NUM_LIMBS):\n joint_src_type, joint_dst_type = joint_to_limb_heatmap_relationship[limb_type]\n\n for limb_info in connected_limbs[limb_type]:\n person_assoc_idx = []\n for person, person_limbs in enumerate(person_to_joint_assoc):\n if person_limbs[joint_src_type] == limb_info[0] or person_limbs[joint_dst_type] == limb_info[1]:\n person_assoc_idx.append(person)\n\n # If one of the joints has been associated to a person, and either\n # the other joint is also associated with the same person or not\n # associated to anyone yet:\n if len(person_assoc_idx) == 1:\n person_limbs = person_to_joint_assoc[person_assoc_idx[0]]\n # If the other joint is not associated to anyone yet,\n if person_limbs[joint_dst_type] != limb_info[1]:\n # Associate it with the current person\n person_limbs[joint_dst_type] = limb_info[1]\n # Increase the number of limbs associated to this person\n person_limbs[-1] += 1\n # And update the total score (+= heatmap score of joint_dst\n # + score of connecting joint_src with joint_dst)\n person_limbs[-2] += joint_list[limb_info[1]\n .astype(int), 2] + limb_info[2]\n elif len(person_assoc_idx) == 2: # if found 2 and disjoint, merge them\n person1_limbs = person_to_joint_assoc[person_assoc_idx[0]]\n person2_limbs = person_to_joint_assoc[person_assoc_idx[1]]\n membership = ((person1_limbs >= 0) & (person2_limbs >= 0))[:-2]\n if not membership.any(): # If both people have no same joints connected, merge into a single person\n # Update which joints are connected\n person1_limbs[:-2] += (person2_limbs[:-2] + 1)\n # Update the overall score and total count of joints\n # connected by summing their counters\n person1_limbs[-2:] += person2_limbs[-2:]\n # Add the score of the current joint connection to the\n # overall score\n person1_limbs[-2] += limb_info[2]\n person_to_joint_assoc.pop(person_assoc_idx[1])\n else: # Same case as len(person_assoc_idx)==1 above\n person1_limbs[joint_dst_type] = limb_info[1]\n person1_limbs[-1] += 1\n person1_limbs[-2] += joint_list[limb_info[1]\n .astype(int), 2] + limb_info[2]\n else: # No person has claimed any of these joints, create a new person\n # Initialize person info to all -1 (no joint associations)\n row = -1 * np.ones(config.MODEL.NUM_KEYPOINTS + 2)\n # Store the joint info of the new connection\n row[joint_src_type] = limb_info[0]\n row[joint_dst_type] = limb_info[1]\n # Total count of connected joints for this person: 2\n row[-1] = 2\n # Compute overall score: score joint_src + score joint_dst + score connection\n # {joint_src,joint_dst}\n row[-2] = sum(joint_list[limb_info[:2].astype(int), 2]\n ) + limb_info[2]\n person_to_joint_assoc.append(row)\n\n # Delete people who have very few parts connected\n people_to_delete = []\n for person_id, person_info in enumerate(person_to_joint_assoc):\n if person_info[-1] < 3 or person_info[-2] / person_info[-1] < 0.2:\n people_to_delete.append(person_id)\n # Traverse the list in reverse order so we delete indices starting from the\n # last one (otherwise, removing item for example 0 would modify the indices of\n # the remaining people to be deleted!)\n for index in people_to_delete[::-1]:\n person_to_joint_assoc.pop(index)\n\n # Appending items to a np.array can be costly (allocating new memory, copying over the array, then adding new row)\n # Instead, we treat the set of people as a list (fast to append items) and\n # only convert to np.array at the end\n return np.array(person_to_joint_assoc)\n\n\ndef paf_to_pose(heatmaps, pafs, config):\n # Bottom-up approach:\n # Step 1: find all joints in the image (organized by joint type: [0]=nose,\n # [1]=neck...)\n joint_list_per_joint_type = NMS(heatmaps, upsampFactor=config.MODEL.DOWNSAMPLE, config=config)\n # joint_list is an unravel'd version of joint_list_per_joint, where we add\n # a 5th column to indicate the joint_type (0=nose, 1=neck...)\n joint_list = np.array([tuple(peak) + (joint_type,) for joint_type,\n joint_peaks in enumerate(joint_list_per_joint_type) for peak in joint_peaks])\n\n # import ipdb\n # ipdb.set_trace()\n # Step 2: find which joints go together to form limbs (which wrists go\n # with which elbows)\n paf_upsamp = cv2.resize(\n pafs, None, fx=config.MODEL.DOWNSAMPLE, fy=config.MODEL.DOWNSAMPLE, interpolation=cv2.INTER_CUBIC)\n connected_limbs = find_connected_joints(paf_upsamp, joint_list_per_joint_type,\n config.TEST.NUM_INTERMED_PTS_BETWEEN_KEYPOINTS, config)\n\n # Step 3: associate limbs that belong to the same person\n person_to_joint_assoc = group_limbs_of_same_person(\n connected_limbs, joint_list, config)\n\n return joint_list, person_to_joint_assoc\n\n\ndef paf_to_pose_cpp(heatmaps, pafs, config):\n humans = []\n joint_list_per_joint_type = NMS(heatmaps, upsampFactor=config.MODEL.DOWNSAMPLE, config=config)\n\n joint_list = np.array(\n [tuple(peak) + (joint_type,) for joint_type, joint_peaks in enumerate(joint_list_per_joint_type) for peak in\n joint_peaks]).astype(np.float32)\n\n if joint_list.shape[0] > 0:\n joint_list = np.expand_dims(joint_list, 0)\n paf_upsamp = cv2.resize(\n pafs, None, fx=config.MODEL.DOWNSAMPLE, fy=config.MODEL.DOWNSAMPLE, interpolation=cv2.INTER_NEAREST)\n heatmap_upsamp = cv2.resize(\n heatmaps, None, fx=config.MODEL.DOWNSAMPLE, fy=config.MODEL.DOWNSAMPLE, interpolation=cv2.INTER_NEAREST)\n pafprocess.process_paf(joint_list, heatmap_upsamp, paf_upsamp)\n for human_id in range(pafprocess.get_num_humans()):\n human = Human([])\n is_added = False\n for part_idx in range(config.MODEL.NUM_KEYPOINTS):\n c_idx = int(pafprocess.get_part_cid(human_id, part_idx))\n if c_idx < 0:\n continue\n is_added = True\n human.body_parts[part_idx] = BodyPart(\n '%d-%d' % (human_id, part_idx), part_idx,\n float(pafprocess.get_part_x(c_idx)) / heatmap_upsamp.shape[1],\n float(pafprocess.get_part_y(c_idx)) / heatmap_upsamp.shape[0],\n pafprocess.get_part_score(c_idx)\n )\n if is_added:\n score = pafprocess.get_score(human_id)\n human.score = score\n humans.append(human)\n\n return humans\n"
] |
[
[
"numpy.expand_dims",
"numpy.maximum",
"numpy.nonzero",
"numpy.linspace",
"numpy.arange",
"numpy.vstack",
"numpy.ones",
"scipy.ndimage.filters.gaussian_filter",
"scipy.ndimage.morphology.generate_binary_structure",
"numpy.count_nonzero",
"numpy.array",
"numpy.sum",
"numpy.empty"
]
] |
jrbourbeau/composition
|
[
"f8debd81b0467a6094d5ba56a5f0fc6047369d30"
] |
[
"analysis/ShowerLLH/reco-vs-true-containment.py"
] |
[
"#!/usr/bin/env python\n\nimport os\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LogNorm\nimport argparse\nimport seaborn.apionly as sns\n\nimport composition.support_functions.paths as paths\nfrom composition.support_functions.checkdir import checkdir\nfrom composition.analysis.load_sim import load_sim\n# from effective_area import getEff\nfrom ShowerLLH_scripts.analysis.LLH_tools import *\n# from LLH_tools import *\n# from zfix import zfix\n\ndef histogram_2D(x, y, bins, log_counts=False, **opts):\n h, xedges, yedges = np.histogram2d(x, y, bins=bins, normed=False)\n h = np.rot90(h)\n h = np.flipud(h)\n h = np.ma.masked_where(h == 0, h)\n if log_counts:\n h = np.log10(h)\n extent = [yedges[0], yedges[-1], xedges[0], xedges[-1]]\n colormap = 'viridis'\n plt.imshow(h, extent=extent, origin='lower',\n interpolation='none', cmap=colormap)\n # plt.xlabel('$\\log_{10}(E_\\mathrm{MC}/\\mathrm{GeV})$')\n # plt.ylabel('$\\log_{10}(E_{\\mathrm{ML}}/\\mathrm{GeV})$')\n # plt.title(r'ShowerLLH - IT73 - {} LLH bins'.format(opts['bintype']))\n # plt.xlim([5, 9.5])\n # plt.ylim([5, 9.5])\n # cb = plt.colorbar(\n # label='$\\log_{10}{P(E_{\\mathrm{ML}}|E_{\\mathrm{MC}})}$')\n # plt.plot([0, 10], [0, 10], linestyle='--', color='k')\n # outfile = opts['outdir'] + '/' + \\\n # 'MLenergy_vs_MCenergy_{}.png'.format(opts['bintype'])\n # plt.savefig(outfile)\n # plt.close()\n\n\n\nif __name__ == \"__main__\":\n # Global variables setup for path names\n mypaths = paths.Paths()\n\n p = argparse.ArgumentParser(\n description='Creates performance plots for ShowerLLH')\n p.add_argument('-c', '--config', dest='config',\n default='IT73',\n choices=['IT73', 'IT81'],\n help='Detector configuration')\n p.add_argument('-o', '--outdir', dest='outdir',\n default='/home/jbourbeau/public_html/figures/composition/ShowerLLH',\n help='Output directory')\n p.add_argument('-b', '--bintype', dest='bintype',\n default='logdist',\n choices=['standard', 'nozenith', 'logdist'],\n help='Option for a variety of preset bin values')\n p.add_argument('-n', '--numbins', dest='numbins', type=float,\n default=30, help='Number of energy bins')\n args = p.parse_args()\n checkdir(args.outdir + '/')\n opts = vars(args).copy()\n\n # df = load_sim()\n df, cut_dict = load_sim(return_cut_dict=True)\n selection_mask = np.array([True] * len(df))\n standard_cut_keys = ['reco_exists', 'MC_zenith',\n 'IceTopMaxSignalInEdge', 'IceTopMaxSignal']\n for key in standard_cut_keys:\n selection_mask *= cut_dict[key]\n\n print('n_events before cuts = {}'.format(len(df)))\n df = df[selection_mask]\n print('n_events after cuts = {}'.format(len(df)))\n\n MC_IT_containment = df.IceTop_FractionContainment\n reco_IT_containment = df.reco_IT_containment\n"
] |
[
[
"numpy.rot90",
"matplotlib.pyplot.imshow",
"numpy.flipud",
"numpy.log10",
"numpy.ma.masked_where",
"numpy.histogram2d"
]
] |
Abdullah-Abuolaim/defocus-deblurring-dual-pixel
|
[
"21a43e7d12350c62c4038485cdeebc27a078765b"
] |
[
"DPDNet/image_to_patch_filter.py"
] |
[
"\"\"\"\nThis code is used to extract image patches from the training and validation\nsets as described in the paper. For the training set patches, we discard 30%\nof the patches that have the lowest sharpness energy. Recall that we don't\nextract patches for test images because we process full image at test time.\n\nCopyright (c) 2020-present, Abdullah Abuolaim\nThis source code is licensed under the license found in the LICENSE file in\nthe root directory of this source tree.\n\nNote: this code is the implementation of the \"Defocus Deblurring Using Dual-\nPixel Data\" paper accepted to ECCV 2020. Link to GitHub repository:\nhttps://github.com/Abdullah-Abuolaim/defocus-deblurring-dual-pixel\n\nEmail: [email protected]\n\"\"\"\n\nimport numpy as np\nimport os\nimport cv2\nimport errno\nfrom copy import deepcopy\n\ndef check_create_directory(path_to_check):\n if not os.path.exists(path_to_check):\n try:\n os.makedirs(path_to_check)\n except OSError as exc: # Guard against race condition\n if exc.errno != errno.EEXIST:\n raise\n\ndef shapness_measure(img_temp,kernel_size):\n conv_x = cv2.Sobel(img_temp,cv2.CV_64F,1,0,ksize=kernel_size)\n conv_y = cv2.Sobel(img_temp,cv2.CV_64F,0,1,ksize=kernel_size)\n temp_arr_x=deepcopy(conv_x*conv_x)\n temp_arr_y=deepcopy(conv_y*conv_y)\n temp_sum_x_y=temp_arr_x+temp_arr_y\n temp_sum_x_y=np.sqrt(temp_sum_x_y)\n return np.sum(temp_sum_x_y)\n\ndef filter_patch_sharpness(patches_src_c_temp, patches_trg_c_temp, patches_src_l_temp, patches_src_r_temp):\n global patches_src_c, patches_trg_c, patches_src_l, patches_src_r\n fitnessVal_3=[]\n fitnessVal_7=[]\n fitnessVal_11=[]\n fitnessVal_15=[]\n num_of_img_patches=len(patches_trg_c_temp)\n for i in range(num_of_img_patches):\n fitnessVal_3.append(shapness_measure(cv2.cvtColor(patches_trg_c_temp[i], cv2.COLOR_BGR2GRAY),3))\n fitnessVal_7.append(shapness_measure(cv2.cvtColor(patches_trg_c_temp[i], cv2.COLOR_BGR2GRAY),7))\n fitnessVal_11.append(shapness_measure(cv2.cvtColor(patches_trg_c_temp[i], cv2.COLOR_BGR2GRAY),11))\n fitnessVal_15.append(shapness_measure(cv2.cvtColor(patches_trg_c_temp[i], cv2.COLOR_BGR2GRAY),15))\n fitnessVal_3=np.asarray(fitnessVal_3)\n fitnessVal_7=np.asarray(fitnessVal_7)\n fitnessVal_11=np.asarray(fitnessVal_11)\n fitnessVal_15=np.asarray(fitnessVal_15)\n fitnessVal_3=(fitnessVal_3-np.min(fitnessVal_3))/np.max((fitnessVal_3-np.min(fitnessVal_3)))\n fitnessVal_7=(fitnessVal_7-np.min(fitnessVal_7))/np.max((fitnessVal_7-np.min(fitnessVal_7)))\n fitnessVal_11=(fitnessVal_11-np.min(fitnessVal_11))/np.max((fitnessVal_11-np.min(fitnessVal_11)))\n fitnessVal_15=(fitnessVal_15-np.min(fitnessVal_15))/np.max((fitnessVal_15-np.min(fitnessVal_15)))\n fitnessVal_all=fitnessVal_3*fitnessVal_7*fitnessVal_11*fitnessVal_15\n \n to_remove_patches_number=int(to_remove_ratio*num_of_img_patches)\n \n for itr in range(to_remove_patches_number):\n minArrInd=np.argmin(fitnessVal_all)\n fitnessVal_all[minArrInd]=2\n for itr in range(num_of_img_patches):\n if fitnessVal_all[itr]!=2:\n patches_src_c.append(patches_src_c_temp[itr])\n patches_trg_c.append(patches_trg_c_temp[itr])\n patches_src_l.append(patches_src_l_temp[itr])\n patches_src_r.append(patches_src_r_temp[itr])\n \ndef slice_stride(_img_src_c, _img_trg_c, _img_src_l, _img_src_r):\n global set_type, patch_size, stride, patches_src_c, patches_trg_c, patches_src_l, patches_src_r\n coordinates_list=[]\n coordinates_list.append([0,0,0,0])\n patches_src_c_temp, patches_trg_c_temp, patches_src_l_temp, patches_src_r_temp = [], [], [], []\n for r in range(0,_img_src_c.shape[0],stride[0]):\n for c in range(0,_img_src_c.shape[1],stride[1]):\n if (r+patch_size[0]) <= _img_src_c.shape[0] and (c+patch_size[1]) <= _img_src_c.shape[1]:\n patches_src_c_temp.append(_img_src_c[r:r+patch_size[0],c:c+patch_size[1]])\n patches_trg_c_temp.append(_img_trg_c[r:r+patch_size[0],c:c+patch_size[1]])\n patches_src_l_temp.append(_img_src_l[r:r+patch_size[0],c:c+patch_size[1]])\n patches_src_r_temp.append(_img_src_r[r:r+patch_size[0],c:c+patch_size[1]])\n\n elif (r+patch_size[0]) <= _img_src_c.shape[0] and not ([r,r+patch_size[0],_img_src_c.shape[1]-patch_size[1],_img_src_c.shape[1]] in coordinates_list):\n patches_src_c_temp.append(_img_src_c[r:r+patch_size[0],_img_src_c.shape[1]-patch_size[1]:_img_src_c.shape[1]])\n patches_trg_c_temp.append(_img_trg_c[r:r+patch_size[0],_img_trg_c.shape[1]-patch_size[1]:_img_trg_c.shape[1]])\n patches_src_l_temp.append(_img_src_l[r:r+patch_size[0],_img_src_l.shape[1]-patch_size[1]:_img_src_l.shape[1]])\n patches_src_r_temp.append(_img_src_r[r:r+patch_size[0],_img_src_r.shape[1]-patch_size[1]:_img_src_r.shape[1]])\n coordinates_list.append([r,r+patch_size[0],_img_src_c.shape[1]-patch_size[1],_img_src_c.shape[1]])\n \n elif (c+patch_size[1]) <= _img_src_c.shape[1] and not ([_img_src_c.shape[0]-patch_size[0],_img_src_c.shape[0],c,c+patch_size[1]] in coordinates_list):\n patches_src_c_temp.append(_img_src_c[_img_src_c.shape[0]-patch_size[0]:_img_src_c.shape[0],c:c+patch_size[1]])\n patches_trg_c_temp.append(_img_trg_c[_img_trg_c.shape[0]-patch_size[0]:_img_trg_c.shape[0],c:c+patch_size[1]])\n patches_src_l_temp.append(_img_src_l[_img_src_l.shape[0]-patch_size[0]:_img_src_l.shape[0],c:c+patch_size[1]])\n patches_src_r_temp.append(_img_src_r[_img_src_r.shape[0]-patch_size[0]:_img_src_r.shape[0],c:c+patch_size[1]])\n coordinates_list.append([_img_src_c.shape[0]-patch_size[0],_img_src_c.shape[0],c,c+patch_size[1]])\n \n elif not ([_img_src_c.shape[0]-patch_size[0],_img_src_c.shape[0],_img_src_c.shape[1]-patch_size[1],_img_src_c.shape[1]] in coordinates_list):\n patches_src_c_temp.append(_img_src_c[_img_src_c.shape[0]-patch_size[0]:_img_src_c.shape[0],_img_src_c.shape[1]-patch_size[1]:_img_src_c.shape[1]])\n patches_trg_c_temp.append(_img_trg_c[_img_trg_c.shape[0]-patch_size[0]:_img_trg_c.shape[0],_img_trg_c.shape[1]-patch_size[1]:_img_trg_c.shape[1]])\n patches_src_l_temp.append(_img_src_l[_img_src_l.shape[0]-patch_size[0]:_img_src_l.shape[0],_img_src_l.shape[1]-patch_size[1]:_img_src_l.shape[1]])\n patches_src_r_temp.append(_img_src_r[_img_src_r.shape[0]-patch_size[0]:_img_src_r.shape[0],_img_src_r.shape[1]-patch_size[1]:_img_src_r.shape[1]])\n coordinates_list.append([_img_src_c.shape[0]-patch_size[0],_img_src_c.shape[0],_img_src_c.shape[1]-patch_size[1],_img_src_c.shape[1]])\n if set_type == 'train':\n filter_patch_sharpness(patches_src_c_temp, patches_trg_c_temp, patches_src_l_temp, patches_src_r_temp)\n else:\n patches_src_c, patches_trg_c, patches_src_l, patches_src_r = deepcopy(patches_src_c_temp), deepcopy(patches_trg_c_temp), deepcopy(patches_src_l_temp), deepcopy(patches_src_r_temp)\n\nset_type_arr=['train','val']\nimg_ex='.png'\nsub_folder=['source/','target/']\ndataset='./dd_dp_dataset_canon/'\n\n# color flag used to select the reading image mode in opencv. 0:graysca,\n# 1:rgb 8bits, -1:read image as it including its bit depth\ncolor_flag=-1\n\npatch_size=[512, 512]\n\nto_remove_ratio=0.3 # discard 30% of the patches\n \nfor set_type in set_type_arr:\n print('Image to patch of ',set_type,'set has started...')\n if set_type == 'train':\n # patch settings\n patch_overlap_ratio=0.6\n stride=[int((1-patch_overlap_ratio)*patch_size[0]),int((1-patch_overlap_ratio)*patch_size[1])]\n else:\n # patch settings\n patch_overlap_ratio=0.01\n stride=[int((1-patch_overlap_ratio)*patch_size[0]),int((1-patch_overlap_ratio)*patch_size[1])]\n \n # pathes to write extracted patches\n path_write_c= './dd_dp_dataset_canon_patch/'+set_type+'_c/'\n path_write_l= './dd_dp_dataset_canon_patch/'+set_type+'_l/'\n path_write_r= './dd_dp_dataset_canon_patch/'+set_type+'_r/'\n \n # to check if directory exist, otherwise create one\n check_create_directory(path_write_c+sub_folder[0])\n check_create_directory(path_write_c+sub_folder[1])\n check_create_directory(path_write_l+sub_folder[0])\n check_create_directory(path_write_r+sub_folder[0])\n \n # load image filenames\n images_src=np.load('./file_names/'+set_type+'_src.npy')\n images_trg=np.load('./file_names/'+set_type+'_trg.npy')\n \n # set counter\n img_patch_count=0\n \n data_ims_size=len(images_src)\n for i in range(data_ims_size):\n patches_src_c=[]\n patches_trg_c=[]\n \n patches_src_l=[]\n patches_src_r=[]\n\n img_src_c=cv2.imread(dataset+set_type+'_c/'+sub_folder[0]+images_src[i]+img_ex,color_flag)\n img_trg_c=cv2.imread(dataset+set_type+'_c/'+sub_folder[1]+images_trg[i]+img_ex,color_flag)\n \n print(dataset+set_type+'_c/'+sub_folder[0]+images_src[i]+img_ex)\n \n img_src_l=cv2.imread(dataset+set_type+'_l/'+sub_folder[0]+images_src[i]+'_L'+img_ex,color_flag)\n img_src_r=cv2.imread(dataset+set_type+'_r/'+sub_folder[0]+images_src[i]+'_R'+img_ex,color_flag)\n \n slice_stride(img_src_c, img_trg_c, img_src_l, img_src_r)\n for j in range(len(patches_src_c)):\n cv2.imwrite(path_write_c+sub_folder[0]+str(img_patch_count).zfill(5)+img_ex,(patches_src_c[j]).astype(np.uint16))\n cv2.imwrite(path_write_c+sub_folder[1]+str(img_patch_count).zfill(5)+img_ex,(patches_trg_c[j]).astype(np.uint16))\n cv2.imwrite(path_write_l+sub_folder[0]+str(img_patch_count).zfill(5)+img_ex,(patches_src_l[j]).astype(np.uint16))\n cv2.imwrite(path_write_r+sub_folder[0]+str(img_patch_count).zfill(5)+img_ex,(patches_src_r[j]).astype(np.uint16))\n img_patch_count+=1\n print(set_type+': ',i,j,img_patch_count)"
] |
[
[
"numpy.sqrt",
"numpy.min",
"numpy.asarray",
"numpy.argmin",
"numpy.load",
"numpy.sum"
]
] |
kcotar/Stellar_abudance_trees
|
[
"1a4377ef53a4b4c8df1be860598a70be31626110"
] |
[
"colorize_sky.py"
] |
[
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.basemap import Basemap\n\n\ndef _prepare_ra_dec(data):\n ra = data['ra']\n idx_trans = ra > 180\n if len(idx_trans) > 0:\n ra[idx_trans] -= 360\n ra = np.deg2rad(ra)\n dec = np.deg2rad(data['dec'])\n return ra, dec\n\n\ndef plot_ra_dec_locations(data, path='sky_pos.png'):\n # plt.subplot(111, projection='mollweide')\n # ra, dec = _prepare_ra_dec(data)\n # plt.scatter(ra, dec, lw=0, c='black', s=0.4)\n # plt.grid(True)\n # plt.colorbar()\n # plt.tight_layout()\n # plt.savefig(path, dpi=500)\n # plt.close()\n plt.figure()\n map = Basemap(projection='moll', lon_0=0)\n map.drawparallels(np.arange(-90., 95., 5.))\n map.drawmeridians(np.arange(0., 365., 5.))\n ra, dec = _prepare_ra_dec(data)\n map.scatter(ra, dec, lw=0, c='black', s=0.4)\n ax = plt.gca()\n ax.set_xlim((np.min(ra), np.max(ra)))\n ax.set_ylim((np.min(dec), np.max(dec)))\n plt.tight_layout()\n plt.savefig(path, dpi=250)\n plt.close()\n\n\ndef plot_ra_dec_attribute(data, attribute, path='sky_pos_attribute.png'):\n # plt.subplot(111, projection='mollweide')\n # ra, dec = _prepare_ra_dec(data)\n # plt.scatter(ra, dec, lw=0, c=data[attribute], s=0.4)\n # plt.grid(True)\n # plt.colorbar()\n # plt.tight_layout()\n # plt.show()\n # plt.savefig(path, dpi=500)\n # plt.close()\n plt.figure()\n map = Basemap(projection='moll', lon_0=0)\n map.drawparallels(np.arange(-90., 95., 5.))\n map.drawmeridians(np.arange(0., 365., 5.))\n ra, dec = _prepare_ra_dec(data)\n map.scatter(ra, dec, lw=0, c=data[attribute], s=2)\n ax = plt.gca()\n ax.set_xlim((np.min(ra), np.max(ra)))\n ax.set_ylim((np.min(dec), np.max(dec)))\n plt.tight_layout()\n plt.savefig(path, dpi=250)\n plt.close()"
] |
[
[
"matplotlib.pyplot.gca",
"matplotlib.pyplot.tight_layout",
"numpy.min",
"numpy.arange",
"matplotlib.pyplot.savefig",
"numpy.max",
"numpy.deg2rad",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure"
]
] |
hyuanmech/MOPSO
|
[
"f2cbe9151d9dbd21b562957b368f22e2648232b9"
] |
[
"check_generated_geometry.py"
] |
[
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Mar 23 16:05:17 2020\r\n\r\n@author: yuanh\r\n\"\"\"\r\n\r\nimport os\r\nimport shutil\r\nfrom openpyxl import load_workbook\r\nimport numpy as np\r\n\r\nit = 6\r\n\r\nflag = 0\r\n\r\nnPop = 100\r\n\r\nif flag == 1:\r\n n = 9\r\n index = np.zeros((n, 1))\r\n wb = load_workbook('Positions.xlsx')\r\n sheet = wb['2_mu']\r\n for i in range(n):\r\n index[i,0] = sheet.cell(row=i+2,column=1).value\r\n \r\nif flag == 1:\r\n os.mkdir(str(it)+'_MU_all')\r\n for hh in range(n):\r\n source = \"J:/Coupler_optimization/MOPSO_CAE/\"+str(it)+\"_\"+str(int(index[hh,0]))+\"_MU\"+\"/\"+str(it)+\"_\"+str(int(index[hh,0]))+\"_MU_geo.pdf\"\r\n destination = \"J:/Coupler_optimization/MOPSO_CAE/\"+str(it)+\"_MU_all\"+\"/\"+str(it)+\"_\"+str(int(index[hh,0]))+\"_MU_geo.pdf\"\r\n shutil.copyfile(source, destination)\r\nelse:\r\n os.mkdir(str(it)+'_all')\r\n for hh in range(nPop):\r\n source = \"J:/Coupler_optimization/MOPSO_CAE/\"+str(it)+\"_\"+str(hh)+\"/\"+str(it)+\"_\"+str(hh)+\"_geo.pdf\"\r\n destination = \"J:/Coupler_optimization/MOPSO_CAE/\"+str(it)+\"_all\"+\"/\"+str(it)+\"_\"+str(hh)+\"_geo.pdf\"\r\n shutil.copyfile(source, destination)\r\n\r\n "
] |
[
[
"numpy.zeros"
]
] |
jaspreetj/opics
|
[
"037ed93ad9f6c9ad9fec5feb214bb89de24635f0"
] |
[
"opics/utils.py"
] |
[
"from typing import Any, Dict, List, Tuple\nimport cmath as cm\nimport time\nimport re\nimport itertools\nimport inspect\nfrom copy import deepcopy\nimport numpy as np\nfrom numpy import ndarray\nfrom pathlib import PosixPath\nfrom defusedxml.ElementTree import parse\n\n\ndef fromSI(value: str) -> float:\n \"\"\"converts from SI unit values to metric\n\n Args:\n value (str): a value in SI units, e.g. 1.3u\n\n Returns:\n float: the value in metric units.\n \"\"\"\n return float(value.replace(\"u\", \"e-6\"))\n\n\ndef universal_sparam_filereader(\n nports: int, sfilename: str, sfiledir: PosixPath, format_type: str = \"auto\"\n) -> Tuple[ndarray, ndarray]:\n \"\"\"\n Function to automatically detect the sparameter file format and use appropriate method to delimit and format sparam data\n\n This function is a unified version of sparameter reader function defined in https://github.com/BYUCamachoLab/simphony\n\n Args:\n nports: Number of ports\n sfilename: XML look-up-table filename\n sfiledir: Path to the directory containing the XML file\n format_type: Format type. For more information: https://support.lumerical.com/hc/en-us/articles/360036618513-S-parameter-file-formats\n \"\"\"\n numports = nports\n filename = sfiledir / sfilename\n\n if format_type == \"auto\":\n try:\n # print(\"try A\")\n result = universal_sparam_filereader(nports, sfilename, sfiledir, \"A\")\n return result\n except Exception:\n try:\n # print(\"try B\")\n result = universal_sparam_filereader(nports, sfilename, sfiledir, \"B\")\n return result\n except Exception:\n # print(\"try C\")\n result = universal_sparam_filereader(nports, sfilename, sfiledir, \"C\")\n return result\n\n elif format_type == \"A\":\n \"\"\"\n dc_halfring_te_1550\n Returns the s-parameters across some frequency range for the Sparam fileformat A\n\n input:\n [\"port 1\",\"\"]\n [\"port 2\",\"\"]\n [\"port 3\",\"\"]\n [\"port 4\",\"\"]\n (\"port 1\",\"mode 1\",1,\"port 1\",1,\"transmission\")\n (101, 3)\n\n output:\n [frequency, s-parameters]\n \"\"\"\n F = []\n S = []\n with open(filename, \"r\") as fid:\n for i in range(5):\n line = fid.readline()\n line = fid.readline()\n numrows = int(tuple(line[1:-2].split(\",\"))[0])\n S = np.zeros((numrows, numports, numports), dtype=\"complex128\")\n r = m = n = 0\n for line in fid:\n if line[0] == \"(\":\n continue\n data = line.split()\n data = list(map(float, data))\n if m == 0 and n == 0:\n F.append(data[0])\n S[r, m, n] = data[1] * np.exp(1j * data[2])\n r += 1\n if r == numrows:\n r = 0\n m += 1\n if m == numports:\n m = 0\n n += 1\n if n == numports:\n break\n return (np.array(F), S)\n\n elif format_type == \"B\":\n \"\"\"\n ebeam_bdc_te1550, nanotaper, ebeam_y_1550\n\n Returns the s-parameters across some frequency range for the Sparam fileformat A\n input:\n ('port 1','TE',1,'port 1',1,'transmission')\n (51,3)\n\n output:\n [frequency, s-parameters]\n \"\"\"\n F = []\n S = []\n with open(filename, \"r\") as fid:\n line = fid.readline()\n line = fid.readline()\n numrows = int(tuple(line[1:-2].split(\",\"))[0])\n S = np.zeros((numrows, numports, numports), dtype=\"complex128\")\n r = m = n = 0\n for line in fid:\n if line[0] == \"(\":\n continue\n data = line.split()\n data = list(map(float, data))\n if m == 0 and n == 0:\n F.append(data[0])\n S[r, m, n] = data[1] * np.exp(1j * data[2])\n r += 1\n if r == numrows:\n r = 0\n m += 1\n if m == numports:\n m = 0\n n += 1\n if n == numports:\n break\n return (np.array(F), S)\n\n elif format_type == \"C\":\n \"\"\"\n ebeam_gc_te1550\n\n Returns the s-parameters across some frequency range for the Sparam fileformat A\n input:\n columns with space delimiter\n\n output:\n [frequency, s-parameters]\n \"\"\"\n with open(filename) as fid:\n # grating coupler compact models have 100 points for each s-matrix index\n arrlen = 100\n\n lines = fid.readlines()\n F = np.zeros(arrlen)\n S = np.zeros((arrlen, 2, 2), \"complex128\")\n for i in range(0, arrlen):\n words = lines[i].split()\n F[i] = float(words[0])\n S[i, 0, 0] = cm.rect(float(words[1]), float(words[2]))\n S[i, 0, 1] = cm.rect(float(words[3]), float(words[4]))\n S[i, 1, 0] = cm.rect(float(words[5]), float(words[6]))\n S[i, 1, 1] = cm.rect(float(words[7]), float(words[8]))\n F = F[::-1]\n S = S[::-1, :, :]\n return (np.array(F), S)\n\n\ndef LUT_reader(filedir: PosixPath, lutfilename: str, lutdata: List[List[str]]):\n \"\"\"\n Reads look up table data.\n\n Args:\n filedir: Directory of the XML look-up-table file.\n lutfilename: Look-up-table filename.\n lutdata: Look-up-table arguments.\n \"\"\"\n xml = parse(filedir / lutfilename)\n root = xml.getroot()\n\n for node in root.iter(\"association\"):\n sample = [[each.attrib[\"name\"], each.text] for each in node.iter(\"value\")]\n if sorted(sample[0:-1]) == sorted(lutdata):\n break\n sparam_file = sample[-1][1].split(\";\")\n return (sparam_file, xml, node)\n\n\ndef LUT_processor(\n filedir: PosixPath,\n lutfilename: str,\n lutdata: List[List[str]],\n nports: int,\n sparam_attr: str,\n verbose: bool = False,\n) -> Tuple[Tuple[ndarray, ndarray], str]:\n \"\"\"process look up table data\"\"\"\n start = time.time()\n sparam_file, xml, node = LUT_reader(filedir, lutfilename, lutdata)\n\n # read data\n if \".npz\" in sparam_file[0] or \".npz\" in sparam_file[-1]:\n npzfile = [each for each in sparam_file if \".npz\" in each][0]\n tempdata = np.load(filedir / npzfile)\n sdata = (tempdata[\"f\"], tempdata[\"s\"])\n npz_file = npzfile\n\n else:\n if verbose:\n print(\"numpy datafile not found. reading sparam file instead..\")\n\n sdata = universal_sparam_filereader(nports, sparam_file[-1], filedir, \"auto\")\n # create npz file name\n npz_file = sparam_file[-1].split(\".\")[0]\n\n # save as npz file\n np.savez(filedir / npz_file, f=sdata[0], s=sdata[1])\n\n # update xml file\n sparam_file.append(npz_file + \".npz\")\n sparam_file = list(set(sparam_file))\n\n for each in node.iter(\"value\"):\n if each.attrib[\"name\"] == sparam_attr:\n each.text = \";\".join(sparam_file)\n xml.write(filedir / lutfilename)\n\n if verbose:\n print(\"SParam data extracted in \", time.time() - start)\n return (sdata, npz_file)\n\n\ndef NetlistProcessor(spice_filepath, Network, libraries, c_, circuitData, verbose=True):\n \"\"\"\n Processes a spice netlist to setup and simulate a circuit.\n\n Args:\n spice_filepath: Path to the spice netlist file.\n Network:\n\n \"\"\"\n if verbose:\n for key, value in circuitData.items():\n print(key, str(value))\n\n # define frequency range and resolution\n freq = np.linspace(\n c_ / circuitData[\"sim_params\"][0],\n c_ / circuitData[\"sim_params\"][1],\n circuitData[\"sim_params\"][2],\n )\n\n # create a circuit\n subckt = Network(network_id=circuitData[\"networkID\"], f=freq)\n # get library\n all_libraries = dict(\n [\n each\n for each in inspect.getmembers(libraries, inspect.ismodule)\n if each[0][0] != \"_\"\n ]\n )\n libs_comps = {}\n for each_lib in list(set(circuitData[\"compLibs\"])):\n # temp_comps = dict(inspect.getmembers(all_libraries[each_lib], inspect.isclass))\n libs_comps[each_lib] = all_libraries[each_lib].component_factory\n\n # add circuit components\n for i in range(len(circuitData[\"compModels\"])):\n\n # get component model\n comp_model = libs_comps[circuitData[\"compLibs\"][i]][\n circuitData[\"compModels\"][i]\n ]\n # clean attributes\n cls_attrs = deepcopy(comp_model.cls_attrs) # class attributes\n comp_attrs = circuitData[\"compAttrs\"][i] # component attributes\n # clean up attributes\n for each_cls_attrs in cls_attrs.keys():\n for each_comp_attrs in comp_attrs.keys():\n if each_cls_attrs in each_comp_attrs:\n cls_attrs[each_cls_attrs] = fromSI(comp_attrs[each_comp_attrs])\n\n subckt.add_component(\n libs_comps[circuitData[\"compLibs\"][i]][circuitData[\"compModels\"][i]],\n params=cls_attrs,\n component_id=circuitData[\"compLabels\"][i],\n )\n\n # add circuit netlist\n subckt.global_netlist[circuitData[\"compLabels\"][i]] = circuitData[\n \"circuitNets\"\n ][i]\n # add unique net component connections\n subckt.current_connections = circuitData[\"circuitConns\"]\n\n return subckt\n\n\nclass netlistParser:\n \"A netlist parser to read spi files generated by SiEPIC tools\"\n\n def __init__(self, mainfile_path: PosixPath) -> None:\n self.circuitComponents = []\n self.circuitConnections = []\n self.mainfile_path = mainfile_path\n\n def readfile(self) -> Dict[str, Any]:\n filepath = self.mainfile_path\n circuitID = \"\"\n inp = \"\"\n out = \"\"\n inp_net = 0\n out_net = []\n\n circuitLabels = []\n circuitModels = []\n circuitConns = []\n circuitNets = []\n componentLibs = []\n componentAttrs = []\n component_locations = []\n\n temp_file = open(filepath, \"r\")\n temp_lines = temp_file.readlines()\n\n free_node_idx = -1\n\n freq_data = []\n seek_component = 0\n seek_ona = 0\n orthogonal_ID = 0\n\n # extract circuit connectivity\n for each_line in temp_lines:\n each_line = re.sub(\" +\", \" \", each_line.strip()) # remove empty lines\n if each_line.startswith(\"*\"):\n continue\n else:\n each_line = \"\".join(\n [\n \"\".join(filter(None, each_section.split(\" \")))\n if ('\"' in each_section)\n else each_section\n for each_section in re.split(\n r\"\"\"(\"[^\"]*\"|'[^']*')\"\"\", each_line\n )\n ]\n )\n temp_data = each_line.split(\" \")\n\n if len(temp_data) > 1: # if line is not an empty one\n\n MC_location = []\n\n if temp_data[0] == \".subckt\":\n circuitID = temp_data[1]\n inp = temp_data[2]\n out = [temp_data[x] for x in range(3, len(temp_data))]\n seek_component = 1\n\n elif temp_data[0] == \".param\":\n continue\n\n elif temp_data[0] == \".ends\":\n seek_component = 0\n\n elif temp_data[0] == \".ona\":\n seek_ona = 1\n\n elif seek_ona == 1:\n # ONA related data\n if len(temp_data) < 3:\n temp_data = [0] + temp_data[-1].split(\"=\")\n\n if temp_data[1] == \"orthogonal_identifier\":\n orthogonal_ID = int(temp_data[-1])\n\n elif temp_data[1] == \"start\":\n freq_data.append(float(temp_data[-1]))\n\n elif temp_data[1] == \"stop\":\n freq_data.append(float(temp_data[-1]))\n\n elif temp_data[1] == \"number_of_points\":\n freq_data.append(int(temp_data[-1]))\n\n elif seek_component == 1:\n # otherwise its component data\n circuitLabels.append(temp_data[0])\n temp_ports = []\n found_ports = 0\n found_library = 0\n for i in range(1, len(temp_data)):\n # if its an optical port\n if (\n \"N$\" in temp_data[i]\n and \"N$None\".lower() != temp_data[i].lower()\n ):\n temp_ports.append(int(temp_data[i].replace(\"N$\", \"\")))\n found_ports = 1\n\n elif \"N$None\".lower() == temp_data[i].lower():\n temp_ports.append(free_node_idx)\n free_node_idx -= 1\n found_ports = 1\n\n elif inp == temp_data[i]:\n temp_ports.append(free_node_idx)\n inp_net = free_node_idx\n free_node_idx -= 1\n found_ports = 1\n\n elif out[0] == temp_data[i]:\n temp_ports.append(free_node_idx)\n out_net.append(free_node_idx)\n free_node_idx -= 1\n\n if len(out) > 1:\n out.pop(0)\n\n if len(out) == 0:\n found_ports = 1\n\n elif found_ports == 1 and \"N$\" not in temp_data[i]:\n circuitModels.append(temp_data[i])\n temp_cls_atrr = (\n {}\n ) # deepcopy(lib[temp_data[i]].cls_attrs)\n found_ports = -1\n\n elif \"lay\" in temp_data[i] or \"sch\" in temp_data[i]:\n if \"lay\" in temp_data[i]:\n MC_location.append(\n fromSI(temp_data[i].split(\"=\")[-1]) * 1e6\n )\n\n # ignore layout and schematic position data for now.\n # adapt opics models to accept this data\n # they are component parameters\n elif \"library\" in temp_data[i]:\n # cprint(temp_data[i])\n temp_lib = (\n temp_data[i].replace('\"', \"\").split(\"=\")[1].split()\n )\n componentLibs.append(\n temp_lib[-1].split(\"/\")[-1].lower()\n )\n found_library = 1\n\n elif \"=\" in temp_data[i] and found_library == 1:\n # if its a components' attribute\n temp_attr = temp_data[i].split(\"=\")\n # print(temp_attr[0])\n # if(temp_attr[0] in temp_cls_atrr):\n temp_cls_atrr[temp_attr[0]] = temp_attr[1].strip('\"')\n\n componentAttrs.append(temp_cls_atrr)\n circuitNets.append(temp_ports)\n if bool(MC_location):\n component_locations.append(MC_location)\n\n circuitConns = list(set(list(itertools.chain(*circuitNets))))\n # remove IOs from component connections' list\n circuitConns = [each for each in circuitConns if each >= 0]\n\n # return all data\n return {\n \"circuitNets\": circuitNets,\n \"circuitConns\": circuitConns,\n \"compLibs\": componentLibs,\n \"compModels\": circuitModels,\n \"compLabels\": circuitLabels,\n \"compAttrs\": componentAttrs,\n \"compLocs\": component_locations,\n \"networkID\": circuitID,\n \"inp_net\": inp_net,\n \"out_net\": out_net,\n \"sim_params\": freq_data,\n \"OID\": orthogonal_ID,\n }\n"
] |
[
[
"numpy.savez",
"numpy.linspace",
"numpy.exp",
"numpy.load",
"numpy.array",
"numpy.zeros"
]
] |
collector-m/H-23D_R-CNN
|
[
"40c89c7a6910b738f7e4ed1d0dbb32b1ca99a016"
] |
[
"pcdet/models/model_utils/layers.py"
] |
[
"import torch\nimport torch.nn as nn\n\n\nclass ConvBNReLU(nn.Module):\n\n def __init__(self, in_channels, out_channels, kernel_size, stride=1, eps=1e-3, momentum=0.01):\n super().__init__()\n self.block = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=kernel_size//2, bias=False),\n nn.BatchNorm2d(out_channels, eps=eps, momentum=momentum),\n nn.ReLU(inplace=True)\n )\n \n def forward(self, x):\n out = self.block(x)\n return out\n\n\nclass SeparateConvBNReLU(nn.Module):\n\n def __init__(self, in_channels, out_channels, kernel_size, stride=1, eps=1e-3, momentum=0.01):\n super().__init__()\n self.block = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, \\\n padding=kernel_size//2, groups=in_channels, bias=False),\n nn.BatchNorm2d(out_channels, eps=eps, momentum=momentum),\n nn.ReLU(inplace=True),\n nn.Conv2d(out_channels, out_channels, 1, stride=1, padding=0, bias=False),\n nn.BatchNorm2d(out_channels, eps=eps, momentum=momentum),\n nn.ReLU(inplace=True)\n )\n \n def forward(self, x):\n out = self.block(x)\n return out"
] |
[
[
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.BatchNorm2d"
]
] |
bfirsh/StyleCLIP
|
[
"164fa8497ea91ea184c0488fcc5e3e14f709561a"
] |
[
"global_directions/cog_predict.py"
] |
[
"import tempfile\nfrom pathlib import Path\nimport os\nfrom argparse import Namespace\nimport time\nimport dlib\nimport os\nimport sys\nimport numpy as np\nfrom PIL import Image\nimport torch\nimport torchvision.transforms as transforms\nimport tensorflow as tf\nimport numpy as np\nimport torch\nimport clip\nfrom PIL import Image\nimport pickle\nimport copy\nimport matplotlib.pyplot as plt\nfrom MapTS import GetFs, GetBoundary, GetDt\nfrom manipulate import Manipulator\nfrom dnnlib import tflib\n\nsys.path.insert(0, \"/content\")\nsys.path.insert(0, \"/content/encoder4editing\")\n\nfrom encoder4editing.utils.common import tensor2im\nfrom encoder4editing.utils.alignment import align_face\nfrom encoder4editing.models.psp import pSp\n\nimport cog\n\n\nclass Model(cog.Model):\n def setup(self):\n print(\"starting setup\")\n\n self.device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n self.model, self.preprocess = clip.load(\n \"ViT-B/32\", device=self.device, jit=False\n )\n\n self.graph = tf.get_default_graph()\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)\n self.sess = tf.Session(\n graph=self.graph, config=tf.ConfigProto(gpu_options=gpu_options)\n )\n\n experiment_type = \"ffhq_encode\"\n\n self.experiment_args = {\"model_path\": \"e4e_ffhq_encode.pt\"}\n self.experiment_args[\"transform\"] = transforms.Compose(\n [\n transforms.Resize((256, 256)),\n transforms.ToTensor(),\n transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),\n ]\n )\n self.resize_dims = (256, 256)\n\n model_path = self.experiment_args[\"model_path\"]\n\n ckpt = torch.load(model_path, map_location=\"cpu\")\n opts = ckpt[\"opts\"]\n # pprint.pprint(opts) # Display full options used\n # update the training options\n opts[\"checkpoint_path\"] = model_path\n opts = Namespace(**opts)\n\n self.net = pSp(opts)\n self.net.eval()\n self.net.cuda()\n\n self.shape_predictor = dlib.shape_predictor(\n \"/content/shape_predictor_68_face_landmarks.dat\"\n )\n\n with self.graph.as_default(), self.sess.as_default():\n #tflib.init_tf()\n\n self.M = Manipulator(dataset_name=\"ffhq\", sess=self.sess)\n self.fs3 = np.load(\"./npy/ffhq/fs3.npy\")\n np.set_printoptions(suppress=True)\n\n print(\"setup complete\")\n\n @cog.input(\"input\", type=Path, help=\"Input image\")\n @cog.input(\"neutral\", type=str, help=\"Neutral image description\")\n @cog.input(\"target\", type=str, help=\"Target image description\")\n @cog.input(\n \"manipulation_strength\",\n type=float,\n min=-10,\n max=10,\n default=4.1,\n help=\"The higher the manipulation strength, the closer the generated image becomes to the target description. Negative values moves the generated image further from the target description\",\n )\n @cog.input(\n \"disentanglement_threshold\",\n type=float,\n min=0.08,\n max=0.3,\n default=0.15,\n help=\"The higher the disentanglement threshold, the more specific the changes are to the target attribute. Lower values mean that broader changes are made to the input image\",\n )\n def predict(\n self,\n input,\n neutral,\n target,\n manipulation_strength,\n disentanglement_threshold,\n ):\n # @title Align image\n original_image = Image.open(str(input))\n original_image = original_image.convert(\"RGB\")\n input_image = self.run_alignment(str(input))\n input_image = original_image\n input_image.resize(self.resize_dims)\n\n img_transforms = self.experiment_args[\"transform\"]\n transformed_image = img_transforms(input_image)\n\n with torch.no_grad():\n images, latents = self.run_on_batch(transformed_image.unsqueeze(0))\n result_image, latent = images[0], latents[0]\n\n print(\"latents\", latents)\n\n print(transformed_image.shape, result_image.shape)\n\n w_plus = latents.cpu().detach().numpy()\n with self.graph.as_default(), self.sess.as_default():\n dlatents_loaded = self.M.W2S(w_plus)\n\n #print(\"w_plus, dlatents_loaded\", w_plus, dlatents_loaded)\n\n img_index = 0\n w_plus=latents.cpu().detach().numpy()\n with self.graph.as_default(), self.sess.as_default():\n dlatents_loaded=self.M.W2S(w_plus)\n\n img_indexs=[img_index]\n dlatent_tmp=[tmp[img_indexs] for tmp in dlatents_loaded]\n with self.graph.as_default(), self.sess.as_default():\n self.M.num_images = len(img_indexs)\n self.M.alpha = [0]\n self.M.manipulate_layers = [0]\n\n with self.graph.as_default(), self.sess.as_default():\n codes, out = self.M.EditOneC(0, dlatent_tmp)\n\n original = Image.fromarray(out[0, 0]).resize((512, 512))\n with self.graph.as_default(), self.sess.as_default():\n self.M.manipulate_layers = None\n\n classnames = [target, neutral]\n dt = GetDt(classnames, self.model)\n\n with self.graph.as_default(), self.sess.as_default():\n self.M.alpha = [manipulation_strength]\n boundary_tmp2, c = GetBoundary(\n self.fs3, dt, self.M, threshold=disentanglement_threshold\n )\n codes = self.M.MSCode(dlatent_tmp, boundary_tmp2)\n out = self.M.GenerateImg(codes)\n generated = Image.fromarray(out[0, 0]) # .resize((512,512))\n\n out_path = Path(tempfile.mkdtemp()) / \"out.jpg\"\n generated.save(str(out_path))\n\n return out_path\n\n def run_alignment(self, image_path):\n aligned_image = align_face(filepath=image_path, predictor=self.shape_predictor)\n print(\"Aligned image has shape: {}\".format(aligned_image.size))\n return aligned_image\n\n def run_on_batch(self, inputs):\n images, latents = self.net(\n inputs.to(\"cuda\").float(), randomize_noise=False, return_latents=True\n )\n return images, latents\n\n\ndef concat_images(*images):\n width = 0\n for im in images:\n width += im.width\n height = max([im.height for im in images])\n concat = Image.new(\"RGB\", (width, height))\n offset = 0\n for im in images:\n concat.paste(im, (offset, 0))\n offset += im.width\n return concat\n"
] |
[
[
"torch.load",
"numpy.set_printoptions",
"numpy.load",
"tensorflow.ConfigProto",
"tensorflow.GPUOptions",
"torch.no_grad",
"torch.cuda.is_available",
"tensorflow.get_default_graph"
]
] |
CSchulzeTLK/FMPy
|
[
"fde192346c36eb69dbaca60a96e80cdc8ef37b89",
"fde192346c36eb69dbaca60a96e80cdc8ef37b89"
] |
[
"fmpy/gui/MainWindow.py",
"fmpy/ssp/simulation.py"
] |
[
"\"\"\" Entry point for the graphical user interface \"\"\"\r\n\r\ntry:\r\n from . import compile_resources\r\n compile_resources()\r\nexcept Exception as e:\r\n print(\"Failed to compiled resources. %s\" % e)\r\n\r\nimport os\r\nimport sys\r\n\r\nfrom PyQt5.QtCore import QCoreApplication, QDir, Qt, pyqtSignal, QUrl, QSettings, QPoint, QTimer, QStandardPaths, \\\r\n QPointF, QBuffer, QIODevice\r\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QWidget, QLineEdit, QComboBox, QFileDialog, QLabel, QVBoxLayout, QMenu, QMessageBox, QProgressDialog, QProgressBar, QDialog, QGraphicsScene, QGraphicsItemGroup, QGraphicsRectItem, QGraphicsTextItem, QGraphicsPathItem\r\nfrom PyQt5.QtGui import QDesktopServices, QPixmap, QIcon, QDoubleValidator, QColor, QFont, QPen, QFontMetricsF, QPolygonF, QPainterPath\r\n\r\nfrom fmpy.gui.generated.MainWindow import Ui_MainWindow\r\nimport fmpy\r\nfrom fmpy import read_model_description, supported_platforms, platform\r\nfrom fmpy.model_description import ScalarVariable\r\nfrom fmpy.util import can_simulate\r\n\r\nfrom fmpy.gui.model import VariablesTableModel, VariablesTreeModel, VariablesModel, VariablesFilterModel\r\nfrom fmpy.gui.log import Log, LogMessagesFilterProxyModel\r\n\r\nQCoreApplication.setApplicationVersion(fmpy.__version__)\r\nQCoreApplication.setOrganizationName(\"CATIA-Systems\")\r\nQCoreApplication.setApplicationName(\"FMPy\")\r\n\r\nimport pyqtgraph as pg\r\n\r\npg.setConfigOptions(background='w', foreground='k', antialias=True)\r\n\r\nCOLLAPSABLE_COLUMNS = ['Type', 'Value Reference', 'Initial', 'Causality', 'Variability', 'Min', 'Max']\r\n\r\n\r\nclass ClickableLabel(QLabel):\r\n \"\"\" A QLabel that shows a pointing hand cursor and emits a *clicked* event when clicked \"\"\"\r\n\r\n clicked = pyqtSignal()\r\n\r\n def __init__(self, parent=None):\r\n super(ClickableLabel, self).__init__(parent)\r\n self.setCursor(Qt.PointingHandCursor)\r\n\r\n def mousePressEvent(self, ev):\r\n self.clicked.emit()\r\n super(ClickableLabel, self).mousePressEvent(ev)\r\n\r\n\r\nclass AboutDialog(QDialog):\r\n\r\n def __init__(self, parent=None):\r\n super(AboutDialog, self).__init__(parent)\r\n\r\n from .generated.AboutDialog import Ui_Dialog\r\n from .. import __version__, platform, __file__\r\n import sys\r\n import os\r\n\r\n self.ui = Ui_Dialog()\r\n self.ui.setupUi(self)\r\n\r\n # hide the question mark button\r\n flags = self.windowFlags()\r\n flags &= ~Qt.WindowContextHelpButtonHint\r\n flags |= Qt.MSWindowsFixedSizeDialogHint\r\n self.setWindowFlags(flags)\r\n\r\n self.ui.fmpyVersionLabel.setText(__version__)\r\n self.ui.fmiPlatformLabel.setText(platform)\r\n self.ui.installationPathLabel.setText(os.path.dirname(__file__))\r\n self.ui.pythonInterpreterLabel.setText(sys.executable)\r\n self.ui.pythonVersionLabel.setText(sys.version)\r\n\r\n\r\nclass MainWindow(QMainWindow):\r\n\r\n variableSelected = pyqtSignal(ScalarVariable, name='variableSelected')\r\n variableDeselected = pyqtSignal(ScalarVariable, name='variableDeselected')\r\n windows = []\r\n windowOffset = QPoint()\r\n\r\n def __init__(self, parent=None):\r\n super(MainWindow, self).__init__(parent)\r\n\r\n # save from garbage collection\r\n self.windows.append(self)\r\n\r\n # state\r\n self.filename = None\r\n self.result = None\r\n self.modelDescription = None\r\n self.variables = dict()\r\n self.selectedVariables = set()\r\n self.startValues = dict()\r\n self.simulationThread = None\r\n # self.progressDialog = None\r\n self.plotUpdateTimer = QTimer(self)\r\n self.plotUpdateTimer.timeout.connect(self.updatePlotData)\r\n self.curves = []\r\n\r\n # UI\r\n self.ui = Ui_MainWindow()\r\n self.ui.setupUi(self)\r\n\r\n self.showColumnActions = {}\r\n\r\n # use a smaller default font size on Mac and Linux\r\n if sys.platform in ['darwin', 'linux']:\r\n defaultFont = QFont()\r\n defaultFont.setPixelSize(11)\r\n QApplication.setFont(defaultFont)\r\n self.setStyleSheet(\"QWidget { font-size: 11px; }\")\r\n\r\n self.ui.treeView.setAttribute(Qt.WA_MacShowFocusRect, False)\r\n self.ui.tableView.setAttribute(Qt.WA_MacShowFocusRect, False)\r\n self.ui.logTreeView.setAttribute(Qt.WA_MacShowFocusRect, False)\r\n\r\n # set the window size to 85% of the available space\r\n geo = QApplication.desktop().availableGeometry()\r\n width = min(geo.width() * 0.85, 1100.0)\r\n height = min(geo.height() * 0.85, 900.0)\r\n self.resize(int(width), int(height))\r\n\r\n # hide the variables\r\n self.ui.dockWidget.hide()\r\n\r\n # toolbar\r\n self.stopTimeLineEdit = QLineEdit(\"1\")\r\n self.stopTimeLineEdit.setToolTip(\"Stop time\")\r\n self.stopTimeLineEdit.setFixedWidth(50)\r\n self.stopTimeValidator = QDoubleValidator(self)\r\n self.stopTimeValidator.setBottom(0)\r\n self.stopTimeLineEdit.setValidator(self.stopTimeValidator)\r\n\r\n self.ui.toolBar.addWidget(self.stopTimeLineEdit)\r\n\r\n spacer = QWidget(self)\r\n spacer.setFixedWidth(10)\r\n self.ui.toolBar.addWidget(spacer)\r\n\r\n self.fmiTypeComboBox = QComboBox(self)\r\n self.fmiTypeComboBox.addItem(\"Co-Simulation\")\r\n self.fmiTypeComboBox.setToolTip(\"FMI type\")\r\n self.fmiTypeComboBox.setSizeAdjustPolicy(QComboBox.AdjustToContents)\r\n self.ui.toolBar.addWidget(self.fmiTypeComboBox)\r\n\r\n # disable widgets\r\n self.ui.actionLoadStartValues.setEnabled(False)\r\n self.ui.actionReload.setEnabled(False)\r\n self.ui.actionSettings.setEnabled(False)\r\n self.ui.actionShowLog.setEnabled(False)\r\n self.ui.actionShowResults.setEnabled(False)\r\n self.ui.actionSimulate.setEnabled(False)\r\n self.ui.actionSaveResult.setEnabled(False)\r\n self.ui.actionSavePlottedResult.setEnabled(False)\r\n self.stopTimeLineEdit.setEnabled(False)\r\n self.fmiTypeComboBox.setEnabled(False)\r\n\r\n # hide the dock's title bar\r\n self.ui.dockWidget.setTitleBarWidget(QWidget())\r\n\r\n self.ui.dockWidgetContents.setMinimumWidth(500)\r\n\r\n self.tableModel = VariablesTableModel(self.selectedVariables, self.startValues)\r\n self.tableFilterModel = VariablesFilterModel()\r\n self.tableFilterModel.setSourceModel(self.tableModel)\r\n self.tableFilterModel.setFilterCaseSensitivity(Qt.CaseInsensitive)\r\n self.ui.tableView.setModel(self.tableFilterModel)\r\n\r\n self.treeModel = VariablesTreeModel(self.selectedVariables, self.startValues)\r\n self.treeFilterModel = VariablesFilterModel()\r\n self.treeFilterModel.setSourceModel(self.treeModel)\r\n self.treeFilterModel.setFilterCaseSensitivity(Qt.CaseInsensitive)\r\n self.ui.treeView.setModel(self.treeFilterModel)\r\n\r\n for i, (w, n) in enumerate(zip(VariablesModel.COLUMN_WIDTHS, VariablesModel.COLUMN_NAMES)):\r\n self.ui.treeView.setColumnWidth(i, w)\r\n self.ui.tableView.setColumnWidth(i, w)\r\n\r\n self.hideAllColumns()\r\n\r\n # populate the recent files list\r\n settings = QSettings()\r\n recent_files = settings.value(\"recentFiles\", defaultValue=[])\r\n recent_files = self.removeDuplicates(recent_files)\r\n vbox = QVBoxLayout()\r\n\r\n if recent_files:\r\n added = set()\r\n for file in recent_files[:5]:\r\n link = QLabel('<a href=\"%s\" style=\"text-decoration: none\">%s</a>' % (file, os.path.basename(file)))\r\n link.setToolTip(file)\r\n link.linkActivated.connect(self.load)\r\n vbox.addWidget(link)\r\n added.add(file)\r\n\r\n self.ui.recentFilesGroupBox.setLayout(vbox)\r\n self.ui.recentFilesGroupBox.setVisible(len(recent_files) > 0)\r\n\r\n # settings page\r\n self.inputFileMenu = QMenu()\r\n self.inputFileMenu.addAction(\"New input file...\", self.createInputFile)\r\n self.inputFileMenu.addSeparator()\r\n self.inputFileMenu.addAction(\"Show in Explorer\", self.showInputFileInExplorer)\r\n self.inputFileMenu.addAction(\"Open in default application\", self.openInputFile)\r\n self.ui.selectInputButton.setMenu(self.inputFileMenu)\r\n\r\n # log page\r\n self.log = Log(self)\r\n self.logFilterModel = LogMessagesFilterProxyModel(self)\r\n self.logFilterModel.setSourceModel(self.log)\r\n self.logFilterModel.setFilterCaseSensitivity(Qt.CaseInsensitive)\r\n self.ui.logTreeView.setModel(self.logFilterModel)\r\n self.ui.clearLogButton.clicked.connect(self.log.clear)\r\n\r\n self.log.numberOfDebugMessagesChanged.connect(lambda n: self.ui.showDebugMessagesButton.setText(str(n)))\r\n self.log.numberOfInfoMessagesChanged.connect(lambda n: self.ui.showInfoMessagesButton.setText(str(n)))\r\n self.log.numberOfWarningMessagesChanged.connect(lambda n: self.ui.showWarningMessagesButton.setText(str(n)))\r\n self.log.numberOfErrorMessagesChanged.connect(lambda n: self.ui.showErrorMessagesButton.setText(str(n)))\r\n\r\n self.ui.logFilterLineEdit.textChanged.connect(self.logFilterModel.setFilterFixedString)\r\n\r\n self.ui.showDebugMessagesButton.toggled.connect(self.logFilterModel.setShowDebugMessages)\r\n self.ui.showInfoMessagesButton.toggled.connect(self.logFilterModel.setShowInfoMessages)\r\n self.ui.showWarningMessagesButton.toggled.connect(self.logFilterModel.setShowWarningMessages)\r\n self.ui.showErrorMessagesButton.toggled.connect(self.logFilterModel.setShowErrorMessages)\r\n\r\n # context menu\r\n self.contextMenu = QMenu()\r\n self.actionExpandAll = self.contextMenu.addAction(\"Expand all\")\r\n self.actionExpandAll.triggered.connect(self.ui.treeView.expandAll)\r\n self.actionCollapseAll = self.contextMenu.addAction(\"Collapse all\")\r\n self.actionCollapseAll.triggered.connect(self.ui.treeView.collapseAll)\r\n self.contextMenu.addSeparator()\r\n self.actionCopyVariableName = self.contextMenu.addAction(\"Copy Variable Name\", self.copyVariableName)\r\n self.actionCopyValueReference = self.contextMenu.addAction(\"Copy Value Reference\", self.copyValueReference)\r\n self.contextMenu.addSeparator()\r\n self.actionEditTable = self.contextMenu.addAction(\"Edit Table\", self.editTable)\r\n self.contextMenu.addSeparator()\r\n self.columnsMenu = self.contextMenu.addMenu('Columns')\r\n action = self.columnsMenu.addAction('Show All')\r\n action.triggered.connect(self.showAllColumns)\r\n action = self.columnsMenu.addAction('Hide All')\r\n action.triggered.connect(self.hideAllColumns)\r\n self.columnsMenu.addSeparator()\r\n for column in COLLAPSABLE_COLUMNS:\r\n action = self.columnsMenu.addAction(column)\r\n action.setCheckable(True)\r\n action.toggled.connect(lambda show, col=column: self.showColumn(col, show))\r\n self.showColumnActions[column] = action\r\n self.contextMenu.addSeparator()\r\n self.actionClearPlots = self.contextMenu.addAction(\"Clear Plots\", self.clearPlots)\r\n\r\n # file menu\r\n self.ui.actionExit.triggered.connect(QApplication.closeAllWindows)\r\n self.ui.actionLoadStartValues.triggered.connect(self.loadStartValues)\r\n self.ui.actionReload.triggered.connect(lambda: self.load(self.filename))\r\n self.ui.actionSaveChanges.triggered.connect(self.saveChanges)\r\n\r\n # tools menu\r\n self.ui.actionValidateFMU.triggered.connect(self.validateFMU)\r\n self.ui.actionCompileDarwinBinary.triggered.connect(lambda: self.compilePlatformBinary('darwin64'))\r\n self.ui.actionCompileLinuxBinary.triggered.connect(lambda: self.compilePlatformBinary('linux64'))\r\n self.ui.actionCompileWin32Binary.triggered.connect(lambda: self.compilePlatformBinary('win32'))\r\n self.ui.actionCompileWin64Binary.triggered.connect(lambda: self.compilePlatformBinary('win64'))\r\n self.ui.actionCreateJupyterNotebook.triggered.connect(self.createJupyterNotebook)\r\n self.ui.actionCreateCMakeProject.triggered.connect(self.createCMakeProject)\r\n self.ui.actionAddWindows32Remoting.triggered.connect(lambda: self.addRemotingBinaries('win64', 'win32'))\r\n self.ui.actionAddLinux64Remoting.triggered.connect(lambda: self.addRemotingBinaries('linux64', 'win64'))\r\n self.ui.actionAddCoSimulationWrapper.triggered.connect(self.addCoSimulationWrapper)\r\n\r\n # help menu\r\n self.ui.actionOpenFMI1SpecCS.triggered.connect(lambda: QDesktopServices.openUrl(QUrl('https://fmi-standard.org/assets/releases/FMI_for_CoSimulation_v1.0.1.pdf')))\r\n self.ui.actionOpenFMI1SpecME.triggered.connect(lambda: QDesktopServices.openUrl(QUrl('https://fmi-standard.org/assets/releases/FMI_for_ModelExchange_v1.0.1.pdf')))\r\n self.ui.actionOpenFMI2Spec.triggered.connect(lambda: QDesktopServices.openUrl(QUrl('https://github.com/modelica/fmi-standard/releases/download/v2.0.3/FMI-Specification-2.0.3.pdf')))\r\n self.ui.actionOpenTestFMUs.triggered.connect(lambda: QDesktopServices.openUrl(QUrl('https://github.com/modelica/fmi-cross-check/tree/master/fmus')))\r\n self.ui.actionOpenWebsite.triggered.connect(lambda: QDesktopServices.openUrl(QUrl('https://github.com/CATIA-Systems/FMPy')))\r\n self.ui.actionShowReleaseNotes.triggered.connect(lambda: QDesktopServices.openUrl(QUrl('https://fmpy.readthedocs.io/en/latest/changelog/')))\r\n\r\n # filter menu\r\n self.filterMenu = QMenu()\r\n self.filterMenu.addAction(self.ui.actionFilterInputs)\r\n self.filterMenu.addAction(self.ui.actionFilterOutputs)\r\n self.filterMenu.addAction(self.ui.actionFilterParameters)\r\n self.filterMenu.addAction(self.ui.actionFilterCalculatedParameters)\r\n self.filterMenu.addAction(self.ui.actionFilterIndependentVariables)\r\n self.filterMenu.addAction(self.ui.actionFilterLocalVariables)\r\n self.ui.filterToolButton.setMenu(self.filterMenu)\r\n\r\n # status bar\r\n self.statusIconLabel = ClickableLabel(self)\r\n self.statusIconLabel.setStyleSheet(\"QLabel { margin-left: 5px; }\")\r\n self.statusIconLabel.clicked.connect(lambda: self.setCurrentPage(self.ui.logPage))\r\n self.ui.statusBar.addPermanentWidget(self.statusIconLabel)\r\n\r\n self.statusTextLabel = ClickableLabel(self)\r\n self.statusTextLabel.setMinimumWidth(10)\r\n self.statusTextLabel.clicked.connect(lambda: self.setCurrentPage(self.ui.logPage))\r\n self.ui.statusBar.addPermanentWidget(self.statusTextLabel)\r\n\r\n self.ui.statusBar.addPermanentWidget(QWidget(self), 1) # spacer\r\n\r\n self.simulationProgressBar = QProgressBar(self)\r\n self.simulationProgressBar.setFixedHeight(18)\r\n self.ui.statusBar.addPermanentWidget(self.simulationProgressBar)\r\n self.simulationProgressBar.setVisible(False)\r\n\r\n # connect signals and slots\r\n self.ui.actionNewWindow.triggered.connect(self.newWindow)\r\n self.ui.openButton.clicked.connect(self.open)\r\n self.ui.actionOpen.triggered.connect(self.open)\r\n self.ui.actionSaveResult.triggered.connect(self.saveResult)\r\n self.ui.actionSavePlottedResult.triggered.connect(lambda: self.saveResult(plotted=True))\r\n self.ui.actionSimulate.triggered.connect(self.startSimulation)\r\n self.ui.actionSettings.triggered.connect(lambda: self.setCurrentPage(self.ui.settingsPage))\r\n self.ui.actionShowLog.triggered.connect(lambda: self.setCurrentPage(self.ui.logPage))\r\n self.ui.actionShowResults.triggered.connect(lambda: self.setCurrentPage(self.ui.resultPage))\r\n self.fmiTypeComboBox.currentTextChanged.connect(self.updateSimulationSettings)\r\n self.ui.solverComboBox.currentTextChanged.connect(self.updateSimulationSettings)\r\n self.variableSelected.connect(self.updatePlotLayout)\r\n self.variableDeselected.connect(self.updatePlotLayout)\r\n self.tableModel.variableSelected.connect(self.selectVariable)\r\n self.tableModel.variableDeselected.connect(self.deselectVariable)\r\n self.treeModel.variableSelected.connect(self.selectVariable)\r\n self.treeModel.variableDeselected.connect(self.deselectVariable)\r\n self.ui.filterLineEdit.textChanged.connect(self.treeFilterModel.setFilterFixedString)\r\n self.ui.filterLineEdit.textChanged.connect(self.tableFilterModel.setFilterFixedString)\r\n self.ui.filterToolButton.toggled.connect(self.treeFilterModel.setFilterByCausality)\r\n self.ui.filterToolButton.toggled.connect(self.tableFilterModel.setFilterByCausality)\r\n self.log.currentMessageChanged.connect(self.setStatusMessage)\r\n self.ui.selectInputButton.clicked.connect(self.selectInputFile)\r\n self.ui.actionShowAboutDialog.triggered.connect(self.showAboutDialog)\r\n\r\n if os.name == 'nt':\r\n self.ui.actionCreateDesktopShortcut.triggered.connect(self.createDesktopShortcut)\r\n self.ui.actionAddFileAssociation.triggered.connect(self.addFileAssociation)\r\n else:\r\n self.ui.actionCreateDesktopShortcut.setEnabled(False)\r\n self.ui.actionAddFileAssociation.setEnabled(False)\r\n\r\n self.ui.tableViewToolButton.toggled.connect(lambda show: self.ui.variablesStackedWidget.setCurrentWidget(self.ui.tablePage if show else self.ui.treePage))\r\n\r\n for model in [self.treeFilterModel, self.tableFilterModel]:\r\n self.ui.actionFilterInputs.triggered.connect(model.setFilterInputs)\r\n self.ui.actionFilterOutputs.triggered.connect(model.setFilterOutputs)\r\n self.ui.actionFilterParameters.triggered.connect(model.setFilterParameters)\r\n self.ui.actionFilterCalculatedParameters.triggered.connect(model.setFilterCalculatedParameters)\r\n self.ui.actionFilterIndependentVariables.triggered.connect(model.setFilterIndependentVariables)\r\n self.ui.actionFilterLocalVariables.triggered.connect(model.setFilterLocalVariables)\r\n\r\n self.ui.treeView.customContextMenuRequested.connect(self.showContextMenu)\r\n self.ui.tableView.customContextMenuRequested.connect(self.showContextMenu)\r\n\r\n def newWindow(self):\r\n window = MainWindow()\r\n window.show()\r\n\r\n def show(self):\r\n super(MainWindow, self).show()\r\n self.move(self.frameGeometry().topLeft() + self.windowOffset)\r\n self.windowOffset += QPoint(20, 20)\r\n\r\n def showContextMenu(self, point):\r\n \"\"\" Update and show the variables context menu \"\"\"\r\n\r\n from .TableDialog import TableDialog\r\n\r\n if self.ui.variablesStackedWidget.currentWidget() == self.ui.treePage:\r\n currentView = self.ui.treeView\r\n else:\r\n currentView = self.ui.tableView\r\n\r\n self.actionExpandAll.setEnabled(currentView == self.ui.treeView)\r\n self.actionCollapseAll.setEnabled(currentView == self.ui.treeView)\r\n\r\n selected = self.getSelectedVariables()\r\n\r\n self.actionEditTable.setEnabled(len(selected) == 1 and TableDialog.canEdit(selected[0]))\r\n\r\n can_copy = len(selected) > 0\r\n\r\n self.actionCopyVariableName.setEnabled(can_copy)\r\n self.actionCopyValueReference.setEnabled(can_copy)\r\n\r\n self.contextMenu.exec_(currentView.mapToGlobal(point))\r\n\r\n def load(self, filename):\r\n\r\n import zipfile\r\n\r\n if not self.isVisible():\r\n self.show()\r\n\r\n try:\r\n self.modelDescription = md = read_model_description(filename)\r\n except Exception as e:\r\n QMessageBox.warning(self, \"Failed to load FMU\", \"Failed to load %s. %s\" % (filename, e))\r\n return\r\n\r\n # show model.png\r\n try:\r\n pixmap = QPixmap()\r\n\r\n # load the model.png\r\n with zipfile.ZipFile(filename, 'r') as zf:\r\n pixmap.loadFromData(zf.read('model.png'), format='PNG')\r\n\r\n # show the unscaled version in tooltip\r\n buffer = QBuffer()\r\n buffer.open(QIODevice.WriteOnly)\r\n pixmap.save(buffer, \"PNG\", quality=100)\r\n image = bytes(buffer.data().toBase64()).decode()\r\n html = '<img src=\"data:image/png;base64,{}\">'.format(image)\r\n self.ui.modelImageLabel.setToolTip(html)\r\n\r\n # show a scaled preview in \"Model Info\"\r\n pixmap = pixmap.scaled(200, 200, Qt.KeepAspectRatio, Qt.SmoothTransformation)\r\n self.ui.modelImageLabel.setPixmap(pixmap)\r\n except:\r\n self.ui.modelImageLabel.setPixmap(QPixmap())\r\n self.ui.modelImageLabel.setToolTip(None)\r\n\r\n self.filename = filename\r\n platforms = supported_platforms(self.filename)\r\n\r\n self.variables.clear()\r\n self.selectedVariables.clear()\r\n self.startValues.clear()\r\n\r\n for v in md.modelVariables:\r\n self.variables[v.name] = v\r\n if v.causality == 'output' and not v.dimensions:\r\n self.selectedVariables.add(v)\r\n\r\n fmi_types = []\r\n if md.coSimulation:\r\n fmi_types.append('Co-Simulation')\r\n if md.modelExchange:\r\n fmi_types.append('Model Exchange')\r\n\r\n experiment = md.defaultExperiment\r\n\r\n # toolbar\r\n if experiment is not None and experiment.stopTime is not None:\r\n self.stopTimeLineEdit.setText(str(experiment.stopTime))\r\n\r\n # actions\r\n self.ui.actionValidateFMU.setEnabled(True)\r\n\r\n can_compile = md.fmiVersion == '2.0' and 'c-code' in platforms\r\n\r\n self.ui.actionCompileDarwinBinary.setEnabled(can_compile and fmpy.system == 'darwin')\r\n self.ui.actionCompileLinuxBinary.setEnabled(can_compile and fmpy.system in ['linux', 'windows'])\r\n self.ui.actionCompileWin32Binary.setEnabled(can_compile and fmpy.system == 'windows')\r\n self.ui.actionCompileWin64Binary.setEnabled(can_compile and fmpy.system == 'windows')\r\n\r\n self.ui.actionCreateCMakeProject.setEnabled(can_compile)\r\n\r\n self.ui.actionCreateJupyterNotebook.setEnabled(True)\r\n\r\n self.ui.actionAddWindows32Remoting.setEnabled(md.fmiVersion == '2.0' and 'win32' in platforms and 'win64' not in platforms)\r\n self.ui.actionAddLinux64Remoting.setEnabled(md.fmiVersion == '2.0' and 'win64' in platforms and 'linux64' not in platforms)\r\n\r\n can_add_cswrapper = md.fmiVersion == '2.0' and md.coSimulation is None and md.modelExchange is not None\r\n self.ui.actionAddCoSimulationWrapper.setEnabled(can_add_cswrapper)\r\n\r\n # variables view\r\n self.treeModel.setModelDescription(md)\r\n self.tableModel.setModelDescription(md)\r\n self.treeFilterModel.invalidate()\r\n self.tableFilterModel.invalidate()\r\n self.ui.treeView.reset()\r\n self.ui.tableView.reset()\r\n\r\n # settings page\r\n self.ui.fmiVersionLabel.setText(md.fmiVersion)\r\n self.ui.fmiTypeLabel.setText(', '.join(fmi_types))\r\n self.ui.platformsLabel.setText(', '.join(platforms))\r\n self.ui.modelNameLabel.setText(md.modelName)\r\n self.ui.descriptionLabel.setText(md.description)\r\n self.ui.numberOfContinuousStatesLabel.setText(str(md.numberOfContinuousStates))\r\n self.ui.numberOfEventIndicatorsLabel.setText(str(md.numberOfEventIndicators))\r\n self.ui.numberOfVariablesLabel.setText(str(len(md.modelVariables)))\r\n self.ui.generationToolLabel.setText(md.generationTool)\r\n self.ui.generationDateAndTimeLabel.setText(md.generationDateAndTime)\r\n\r\n # relative tolerance\r\n if experiment is not None and experiment.tolerance is not None:\r\n relative_tolerance = experiment.tolerance\r\n else:\r\n relative_tolerance = 1e-6\r\n\r\n self.ui.relativeToleranceLineEdit.setText(str(relative_tolerance))\r\n\r\n # output interval\r\n if experiment is not None and experiment.stepSize is not None:\r\n output_interval = float(experiment.stepSize)\r\n while output_interval > 1000:\r\n output_interval *= 0.5\r\n else:\r\n output_interval = float(self.stopTimeLineEdit.text()) / 500\r\n\r\n self.ui.outputIntervalLineEdit.setText(str(output_interval))\r\n\r\n self.fmiTypeComboBox.clear()\r\n self.fmiTypeComboBox.addItems(fmi_types)\r\n\r\n self.updateSimulationSettings()\r\n\r\n self.setCurrentPage(self.ui.settingsPage)\r\n\r\n self.ui.dockWidget.show()\r\n\r\n self.ui.actionReload.setEnabled(True)\r\n self.ui.actionSettings.setEnabled(True)\r\n self.ui.actionShowLog.setEnabled(True)\r\n self.ui.actionShowResults.setEnabled(False)\r\n\r\n can_sim, _ = can_simulate(platforms)\r\n\r\n self.ui.actionLoadStartValues.setEnabled(can_sim)\r\n self.ui.actionSimulate.setEnabled(can_sim)\r\n self.stopTimeLineEdit.setEnabled(can_sim)\r\n self.fmiTypeComboBox.setEnabled(can_sim and len(fmi_types) > 1)\r\n self.ui.settingsGroupBox.setEnabled(can_sim)\r\n\r\n settings = QSettings()\r\n recent_files = settings.value(\"recentFiles\", defaultValue=[])\r\n recent_files = self.removeDuplicates([filename] + recent_files)\r\n\r\n # save the 10 most recent files\r\n settings.setValue('recentFiles', recent_files[:10])\r\n\r\n self.setWindowTitle(\"%s - FMPy\" % os.path.normpath(filename))\r\n\r\n self.createGraphics()\r\n\r\n def open(self):\r\n\r\n start_dir = QDir.homePath()\r\n\r\n settings = QSettings()\r\n recent_files = settings.value(\"recentFiles\", defaultValue=[])\r\n\r\n for filename in recent_files:\r\n dirname = os.path.dirname(filename)\r\n if os.path.isdir(dirname):\r\n start_dir = dirname\r\n break\r\n\r\n filename, _ = QFileDialog.getOpenFileName(parent=self,\r\n caption=\"Open File\",\r\n directory=start_dir,\r\n filter=\"FMUs (*.fmu);;All Files (*.*)\")\r\n\r\n if filename:\r\n self.load(filename)\r\n\r\n def setCurrentPage(self, widget):\r\n \"\"\" Set the current page and the actions \"\"\"\r\n\r\n # block the signals during the update\r\n self.ui.actionSettings.blockSignals(True)\r\n self.ui.actionShowLog.blockSignals(True)\r\n self.ui.actionShowResults.blockSignals(True)\r\n\r\n self.ui.stackedWidget.setCurrentWidget(widget)\r\n\r\n # toggle the actions\r\n self.ui.actionSettings.setChecked(widget == self.ui.settingsPage)\r\n self.ui.actionShowLog.setChecked(widget == self.ui.logPage)\r\n self.ui.actionShowResults.setChecked(widget == self.ui.resultPage)\r\n\r\n # un-block the signals during the update\r\n self.ui.actionSettings.blockSignals(False)\r\n self.ui.actionShowLog.blockSignals(False)\r\n self.ui.actionShowResults.blockSignals(False)\r\n\r\n def selectInputFile(self):\r\n start_dir = os.path.dirname(self.filename)\r\n filename, _ = QFileDialog.getOpenFileName(parent=self,\r\n caption=\"Select Input File\",\r\n directory=start_dir,\r\n filter=\"FMUs (*.csv);;All Files (*.*)\")\r\n if filename:\r\n self.ui.inputFilenameLineEdit.setText(filename)\r\n\r\n def createInputFile(self):\r\n \"\"\" Create an input file based on the input variables in the model description \"\"\"\r\n\r\n input_variables = []\r\n\r\n for variable in self.modelDescription.modelVariables:\r\n if variable.causality == 'input':\r\n input_variables.append(variable)\r\n\r\n if len(input_variables) == 0:\r\n QMessageBox.warning(self,\r\n \"Cannot create input file\",\r\n \"The input file cannot be created because the model has no input variables\")\r\n return\r\n\r\n filename, _ = os.path.splitext(self.filename)\r\n\r\n filename, _ = QFileDialog.getSaveFileName(parent=self,\r\n caption=\"Save Input File\",\r\n directory=filename + '_in.csv',\r\n filter=\"Comma Separated Values (*.csv);;All Files (*.*)\")\r\n\r\n if not filename:\r\n return\r\n\r\n with open(filename, 'w') as f:\r\n\r\n # column names\r\n f.write('\"time\"')\r\n for variable in input_variables:\r\n f.write(',\"%s\"' % variable.name)\r\n f.write('\\n')\r\n\r\n # example data\r\n f.write(','.join(['0'] * (len(input_variables) + 1)) + '\\n')\r\n\r\n self.ui.inputFilenameLineEdit.setText(filename)\r\n\r\n def showInputFileInExplorer(self):\r\n \"\"\" Reveal the input file in the file browser \"\"\"\r\n\r\n filename = self.ui.inputFilenameLineEdit.text()\r\n if not os.path.isfile(filename):\r\n QMessageBox.warning(self, \"Cannot show input file\", \"The input file does not exist\")\r\n return\r\n QDesktopServices.openUrl(QUrl.fromLocalFile(os.path.dirname(filename)))\r\n\r\n def openInputFile(self):\r\n \"\"\" Open the input file in the default application \"\"\"\r\n\r\n filename = self.ui.inputFilenameLineEdit.text()\r\n if not os.path.isfile(filename):\r\n QMessageBox.warning(self, \"Cannot open input file\", \"The input file does not exist\")\r\n return\r\n QDesktopServices.openUrl(QUrl.fromLocalFile(filename))\r\n\r\n def updateSimulationSettings(self):\r\n\r\n if self.fmiTypeComboBox.currentText() == 'Co-Simulation':\r\n self.ui.solverComboBox.setEnabled(False)\r\n self.ui.stepSizeLineEdit.setEnabled(False)\r\n self.ui.relativeToleranceLineEdit.setEnabled(True)\r\n else:\r\n self.ui.solverComboBox.setEnabled(True)\r\n fixed_step = self.ui.solverComboBox.currentText() == 'Fixed-step'\r\n self.ui.stepSizeLineEdit.setEnabled(fixed_step)\r\n self.ui.relativeToleranceLineEdit.setEnabled(not fixed_step)\r\n\r\n def selectVariable(self, variable):\r\n self.selectedVariables.add(variable)\r\n self.variableSelected.emit(variable)\r\n\r\n def deselectVariable(self, variable):\r\n self.selectedVariables.remove(variable)\r\n self.variableDeselected.emit(variable)\r\n\r\n def startSimulation(self):\r\n\r\n from fmpy.gui.simulation import SimulationThread\r\n\r\n try:\r\n stop_time = float(self.stopTimeLineEdit.text())\r\n step_size = float(self.ui.stepSizeLineEdit.text())\r\n relative_tolerance = float(self.ui.relativeToleranceLineEdit.text())\r\n\r\n if self.ui.outputIntervalRadioButton.isChecked():\r\n output_interval = float(self.ui.outputIntervalLineEdit.text())\r\n else:\r\n max_samples = float(self.ui.maxSamplesLineEdit.text())\r\n output_interval = stop_time / max_samples\r\n except Exception as ex:\r\n self.log.log('error', \"Failed to start simulation: %s\" % ex)\r\n self.ui.stackedWidget.setCurrentWidget(self.ui.logPage)\r\n return\r\n\r\n step_size = min(step_size, output_interval)\r\n\r\n if self.ui.solverComboBox.currentText() == 'Fixed-step':\r\n solver = 'Euler'\r\n else:\r\n solver = 'CVode'\r\n\r\n if self.ui.inputCheckBox.isChecked():\r\n\r\n input_variables = []\r\n for variable in self.modelDescription.modelVariables:\r\n if variable.causality == 'input':\r\n input_variables.append(variable.name)\r\n try:\r\n from fmpy.util import read_csv\r\n filename = self.ui.inputFilenameLineEdit.text()\r\n input = read_csv(filename, variable_names=input_variables)\r\n except Exception as e:\r\n self.log.log('error', \"Failed to load input from '%s'. %s\" % (filename, e))\r\n return\r\n else:\r\n input = None\r\n\r\n output = []\r\n for variable in self.modelDescription.modelVariables:\r\n output.append(variable.name)\r\n\r\n fmi_type = 'CoSimulation' if self.fmiTypeComboBox.currentText() == 'Co-Simulation' else 'ModelExchange'\r\n\r\n self.simulationThread = SimulationThread(filename=self.filename,\r\n fmiType=fmi_type,\r\n stopTime=stop_time,\r\n solver=solver,\r\n stepSize=step_size,\r\n relativeTolerance=relative_tolerance,\r\n outputInterval=output_interval,\r\n startValues=self.startValues,\r\n applyDefaultStartValues=self.ui.applyDefaultStartValuesCheckBox.isChecked(),\r\n input=input,\r\n output=output,\r\n debugLogging=self.ui.debugLoggingCheckBox.isChecked(),\r\n fmiLogging=self.ui.logFMICallsCheckBox.isChecked())\r\n\r\n self.ui.actionSimulate.setIcon(QIcon(':/icons/stop.png'))\r\n self.ui.actionSimulate.setToolTip(\"Stop simulation\")\r\n self.ui.actionSimulate.triggered.disconnect(self.startSimulation)\r\n self.ui.actionSimulate.triggered.connect(self.simulationThread.stop)\r\n\r\n self.simulationProgressBar.setVisible(True)\r\n\r\n self.simulationThread.messageChanged.connect(self.log.log)\r\n self.simulationThread.progressChanged.connect(self.simulationProgressBar.setValue)\r\n self.simulationThread.finished.connect(self.simulationFinished)\r\n\r\n if self.ui.clearLogOnStartButton.isChecked():\r\n self.log.clear()\r\n\r\n self.setCurrentPage(self.ui.resultPage)\r\n\r\n self.simulationThread.start()\r\n self.plotUpdateTimer.start(100)\r\n\r\n self.updatePlotLayout()\r\n\r\n def simulationFinished(self):\r\n\r\n # update UI\r\n self.ui.actionSimulate.triggered.disconnect(self.simulationThread.stop)\r\n self.ui.actionSimulate.triggered.connect(self.startSimulation)\r\n self.ui.actionSimulate.setIcon(QIcon(':/icons/play.png'))\r\n self.ui.actionSimulate.setToolTip(\"Start simulation\")\r\n self.plotUpdateTimer.stop()\r\n self.simulationProgressBar.setVisible(False)\r\n self.ui.actionShowResults.setEnabled(True)\r\n self.ui.actionSettings.setEnabled(True)\r\n self.setCurrentPage(self.ui.resultPage)\r\n self.updatePlotLayout()\r\n\r\n if self.result is None:\r\n self.setCurrentPage(self.ui.logPage)\r\n else:\r\n self.ui.actionSaveResult.setEnabled(True)\r\n self.ui.actionSavePlottedResult.setEnabled(True)\r\n\r\n self.result = self.simulationThread.result\r\n\r\n self.simulationThread = None\r\n\r\n self.updatePlotData()\r\n\r\n def updatePlotData(self):\r\n\r\n import numpy as np\r\n\r\n if self.simulationThread is not None and len(self.simulationThread.rows) > 1:\r\n # get results from current simulation\r\n self.result = np.array(self.simulationThread.rows, dtype=np.dtype(self.simulationThread.cols))\r\n\r\n if self.result is None:\r\n return # no results available yet\r\n\r\n time = self.result['time']\r\n\r\n for variable, curve in self.curves:\r\n\r\n if variable.name not in self.result.dtype.names:\r\n continue\r\n\r\n y = self.result[variable.name]\r\n\r\n if variable.type == 'Real':\r\n curve.setData(x=time, y=y)\r\n else:\r\n curve.setData(x=np.repeat(time, 2)[1:], y=np.repeat(y, 2)[:-1])\r\n\r\n def updatePlotLayout(self):\r\n\r\n self.ui.plotWidget.clear()\r\n\r\n self.curves[:] = []\r\n\r\n if self.simulationThread is not None:\r\n stop_time = self.simulationThread.stopTime\r\n elif self.result is not None:\r\n stop_time = self.result['time'][-1]\r\n else:\r\n stop_time = 1.0\r\n\r\n pen = (0, 0, 255)\r\n\r\n for variable in self.selectedVariables:\r\n\r\n self.ui.plotWidget.nextRow()\r\n plot = self.ui.plotWidget.addPlot()\r\n\r\n if variable.type == 'Real':\r\n curve = plot.plot(pen=pen)\r\n else:\r\n if variable.type == 'Boolean':\r\n plot.setYRange(0, 1, padding=0.2)\r\n plot.getAxis('left').setTicks([[(0, 'false'), (1, 'true')], []])\r\n curve = plot.plot(pen=pen, fillLevel=0, fillBrush=(0, 0, 255, 50), antialias=False)\r\n else:\r\n curve = plot.plot(pen=pen, antialias=False)\r\n\r\n plot.setXRange(0, stop_time, padding=0.05)\r\n\r\n plot.setLabel('left', variable.name)\r\n plot.showGrid(x=True, y=True, alpha=0.25)\r\n\r\n # hide the auto-scale button and disable context menu and mouse interaction\r\n plot.hideButtons()\r\n plot.setMouseEnabled(False, False)\r\n plot.setMenuEnabled(False)\r\n\r\n self.curves.append((variable, curve))\r\n\r\n self.updatePlotData()\r\n\r\n def showColumn(self, name, show):\r\n if name in self.showColumnActions:\r\n self.showColumnActions[name].setChecked(show)\r\n i = VariablesModel.COLUMN_NAMES.index(name)\r\n self.ui.treeView.setColumnHidden(i, not show)\r\n self.ui.tableView.setColumnHidden(i, not show)\r\n\r\n def showAllColumns(self):\r\n for name in COLLAPSABLE_COLUMNS:\r\n self.showColumn(name, True)\r\n\r\n def hideAllColumns(self):\r\n for name in COLLAPSABLE_COLUMNS:\r\n self.showColumn(name, False)\r\n\r\n def setStatusMessage(self, level, text):\r\n\r\n if level in ['debug', 'info', 'warning', 'error']:\r\n self.statusIconLabel.setPixmap(QPixmap(':/icons/%s-16x16.png' % level))\r\n else:\r\n self.statusIconLabel.setPixmap(QPixmap())\r\n\r\n self.statusTextLabel.setText(text)\r\n\r\n def dragEnterEvent(self, event):\r\n\r\n for url in event.mimeData().urls():\r\n if not url.isLocalFile():\r\n return\r\n\r\n event.acceptProposedAction()\r\n\r\n def dropEvent(self, event):\r\n\r\n urls = event.mimeData().urls()\r\n\r\n for url in urls:\r\n if url == urls[0]:\r\n window = self\r\n else:\r\n window = MainWindow()\r\n \r\n window.load(url.toLocalFile())\r\n\r\n def saveResult(self, plotted=False):\r\n\r\n filename, _ = os.path.splitext(self.filename)\r\n\r\n filename, _ = QFileDialog.getSaveFileName(parent=self,\r\n caption=\"Save Result\",\r\n directory=filename + '_out.csv',\r\n filter=\"Comma Separated Values (*.csv);;All Files (*.*)\")\r\n\r\n if filename:\r\n from ..util import write_csv\r\n\r\n if plotted:\r\n columns = [variable.name for variable in self.selectedVariables]\r\n else:\r\n columns = None\r\n\r\n try:\r\n write_csv(filename=filename, result=self.result, columns=columns)\r\n except Exception as e:\r\n QMessageBox.critical(self, \"Failed to write result\", '\"Failed to write \"%s\". %s' % (filename, e))\r\n\r\n def createDesktopShortcut(self):\r\n \"\"\" Create a desktop shortcut to start the GUI \"\"\"\r\n\r\n import os\r\n from win32com.client import Dispatch\r\n import sys\r\n\r\n env = os.environ.get('CONDA_DEFAULT_ENV')\r\n\r\n if env is None:\r\n target_path = sys.executable\r\n root, ext = os.path.splitext(target_path)\r\n pythonw = root + 'w' + ext\r\n if os.path.isfile(pythonw):\r\n target_path = pythonw\r\n arguments = '-m fmpy.gui'\r\n else:\r\n for path in os.environ[\"PATH\"].split(os.pathsep):\r\n activate = os.path.join(path, 'activate.bat')\r\n if os.path.isfile(activate):\r\n break\r\n\r\n target_path = r'%windir%\\System32\\cmd.exe'\r\n arguments = '/C \"\"%s\" %s && python -m fmpy.gui\"' % (activate, env)\r\n\r\n file_path = os.path.dirname(__file__)\r\n icon = os.path.join(file_path, 'icons', 'app_icon.ico')\r\n\r\n desktop_locations = QStandardPaths.standardLocations(QStandardPaths.DesktopLocation)\r\n shortcut_path = os.path.join(desktop_locations[0], \"FMPy GUI.lnk\")\r\n\r\n shell = Dispatch('WScript.Shell')\r\n shortcut = shell.CreateShortCut(shortcut_path)\r\n shortcut.Targetpath = target_path\r\n shortcut.Arguments = arguments\r\n # shortcut.WorkingDirectory = ...\r\n shortcut.IconLocation = icon\r\n shortcut.save()\r\n\r\n def showAboutDialog(self):\r\n dialog = AboutDialog(self)\r\n dialog.show()\r\n\r\n @staticmethod\r\n def removeDuplicates(seq):\r\n \"\"\" Remove duplicates from a sequence \"\"\"\r\n seen = set()\r\n seen_add = seen.add\r\n return [x for x in seq if not (x in seen or seen_add(x))]\r\n\r\n def validateFMU(self):\r\n\r\n from ..validation import validate_fmu\r\n\r\n problems = validate_fmu(self.filename)\r\n\r\n if problems:\r\n button = QMessageBox.question(self, \"Validation failed\", \"%d problems have been found. Save validation messages?\" % len(problems))\r\n if button == QMessageBox.Yes:\r\n filename, _ = os.path.splitext(self.filename)\r\n filename, _ = QFileDialog.getSaveFileName(parent=self,\r\n caption=\"Save validation messages\",\r\n directory=filename + '_validation.txt',\r\n filter=\"Text Files (*.txt);;All Files (*.*)\")\r\n if filename:\r\n with open(filename, 'w') as f:\r\n f.writelines(problems)\r\n else:\r\n QMessageBox.information(self, \"Validation successful\", \"No problems have been found.\")\r\n\r\n def addFileAssociation(self):\r\n \"\"\" Associate *.fmu with the FMPy GUI \"\"\"\r\n\r\n try:\r\n from winreg import HKEY_CURRENT_USER, KEY_WRITE, REG_SZ, OpenKey, CreateKey, SetValueEx, CloseKey\r\n\r\n env = os.environ.get('CONDA_DEFAULT_ENV_')\r\n\r\n if env is None:\r\n python = sys.executable\r\n root, ext = os.path.splitext(python)\r\n pythonw = root + 'w' + ext\r\n\r\n if os.path.isfile(pythonw):\r\n python = pythonw\r\n\r\n target = '\"%s\" -m fmpy.gui \"%%1\"' % python\r\n else:\r\n # activate the conda environment\r\n for path in os.environ[\"PATH\"].split(os.pathsep):\r\n activate = os.path.join(path, 'activate.bat')\r\n if os.path.isfile(activate):\r\n break\r\n\r\n windir = os.environ['WINDIR']\r\n cmd = os.path.join(windir, 'System32', 'cmd.exe')\r\n\r\n target = r'%s /C \"\"%s\" %s && python -m fmpy.gui %%1\"' % (cmd, activate, env)\r\n\r\n key_path = r'Software\\Classes\\fmpy.gui\\shell\\open\\command'\r\n\r\n CreateKey(HKEY_CURRENT_USER, key_path)\r\n key = OpenKey(HKEY_CURRENT_USER, key_path, 0, KEY_WRITE)\r\n SetValueEx(key, '', 0, REG_SZ, target)\r\n CloseKey(key)\r\n\r\n key_path = r'SOFTWARE\\Classes\\.fmu'\r\n\r\n CreateKey(HKEY_CURRENT_USER, key_path)\r\n key = OpenKey(HKEY_CURRENT_USER, key_path, 0, KEY_WRITE)\r\n SetValueEx(key, '', 0, REG_SZ, 'fmpy.gui')\r\n CloseKey(key)\r\n\r\n QMessageBox.information(self, \"File association added\", \"The file association for *.fmu has been added\")\r\n except Exception as e:\r\n QMessageBox.critical(self, \"File association failed\", \"The file association for *.fmu could not be added. %s\" % e)\r\n\r\n def copyValueReference(self):\r\n \"\"\" Copy the value references of the selected variables to the clipboard \"\"\"\r\n\r\n text = '\\n'.join([str(v.valueReference) for v in self.getSelectedVariables()])\r\n QApplication.clipboard().setText(text)\r\n\r\n def copyVariableName(self):\r\n \"\"\" Copy the names of the selected variables to the clipboard \"\"\"\r\n\r\n text = '\\n'.join([str(v.name) for v in self.getSelectedVariables()])\r\n QApplication.clipboard().setText(text)\r\n\r\n def getSelectedVariables(self):\r\n \"\"\" Returns a list of selected variables in the current view \"\"\"\r\n\r\n variables = []\r\n\r\n if self.ui.variablesStackedWidget.currentWidget() == self.ui.treePage:\r\n for index in self.ui.treeView.selectionModel().selectedRows():\r\n sourceIndex = self.treeFilterModel.mapToSource(index)\r\n treeItem = sourceIndex.internalPointer()\r\n if treeItem.variable is not None:\r\n variables.append(treeItem.variable)\r\n else:\r\n for index in self.ui.tableView.selectionModel().selectedRows():\r\n sourceIndex = self.tableFilterModel.mapToSource(index)\r\n variable = sourceIndex.internalPointer()\r\n variables.append(variable)\r\n\r\n return variables\r\n\r\n def clearPlots(self):\r\n \"\"\" Clear all plots \"\"\"\r\n self.selectedVariables.clear()\r\n self.updatePlotLayout()\r\n\r\n def createGraphics(self):\r\n \"\"\" Create the graphical representation of the FMU's inputs and outputs \"\"\"\r\n\r\n def variableColor(variable):\r\n if variable.type.startswith(('Float', 'Real')):\r\n return QColor.fromRgb(26, 77, 179)\r\n elif variable.type.startswith(('Enumeration', 'Int', 'UInt')):\r\n return QColor.fromRgb(179, 77, 26)\r\n elif variable.type == 'Boolean':\r\n return QColor.fromRgb(255, 0, 255)\r\n elif variable.type == 'String':\r\n return QColor.fromRgb(26, 114, 16)\r\n elif variable.type == 'Binary':\r\n return QColor.fromRgb(81, 81, 81)\r\n else:\r\n return QColor.fromRgb(0, 0, 0)\r\n\r\n inputVariables = []\r\n outputVariables = []\r\n maxInputLabelWidth = 0\r\n maxOutputLabelWidth = 0\r\n\r\n textItem = QGraphicsTextItem()\r\n fontMetrics = QFontMetricsF(textItem.font())\r\n\r\n for variable in self.modelDescription.modelVariables:\r\n if variable.causality == 'input':\r\n inputVariables.append(variable)\r\n elif variable.causality == 'output':\r\n outputVariables.append(variable)\r\n\r\n for variable in inputVariables:\r\n maxInputLabelWidth = max(maxInputLabelWidth, fontMetrics.width(variable.name))\r\n\r\n for variable in outputVariables:\r\n maxOutputLabelWidth = max(maxOutputLabelWidth, fontMetrics.width(variable.name))\r\n\r\n from math import floor\r\n\r\n scene = QGraphicsScene()\r\n self.ui.graphicsView.setScene(scene)\r\n group = QGraphicsItemGroup()\r\n scene.addItem(group)\r\n group.setPos(200.5, -50.5)\r\n lh = 15 # line height\r\n\r\n w = max(150., maxInputLabelWidth + maxOutputLabelWidth + 20)\r\n h = max(50., 10 + lh * max(len(inputVariables), len(outputVariables)))\r\n\r\n block = QGraphicsRectItem(0, 0, w, h, group)\r\n block.setPen(QColor.fromRgb(0, 0, 0))\r\n\r\n pen = QPen()\r\n pen.setWidthF(1)\r\n\r\n font = QFont()\r\n font.setPixelSize(10)\r\n\r\n # inputs\r\n y = floor((h - len(inputVariables) * lh) / 2 - 2)\r\n for variable in inputVariables:\r\n text = QGraphicsTextItem(variable.name, group)\r\n text.setDefaultTextColor(QColor.fromRgb(0, 0, 0))\r\n text.setFont(font)\r\n text.setX(3)\r\n text.setY(y)\r\n\r\n polygon = QPolygonF([QPointF(-8, y + 7.5), QPointF(-1, y + 11), QPointF(-8, y + 14.5)])\r\n\r\n path = QPainterPath()\r\n path.addPolygon(polygon)\r\n path.closeSubpath()\r\n contour = QGraphicsPathItem(path, group)\r\n contour.setPen(QPen(Qt.NoPen))\r\n contour.setBrush(variableColor(variable))\r\n pen = QPen()\r\n pen.setColor(variableColor(variable))\r\n pen.setJoinStyle(Qt.MiterJoin)\r\n contour.setPen(pen)\r\n\r\n y += lh\r\n\r\n # outputs\r\n y = floor((h - len(outputVariables) * lh) / 2 - 2)\r\n for variable in outputVariables:\r\n text = QGraphicsTextItem(variable.name, group)\r\n text.setDefaultTextColor(QColor.fromRgb(0, 0, 0))\r\n text.setFont(font)\r\n text.setX(w - 3 - text.boundingRect().width())\r\n text.setY(y)\r\n\r\n polygon = QPolygonF([QPointF(w + 1, y + 7.5), QPointF(w + 8, y + 11), QPointF(w + 1, y + 14.5)])\r\n\r\n path = QPainterPath()\r\n path.addPolygon(polygon)\r\n path.closeSubpath()\r\n contour = QGraphicsPathItem(path, group)\r\n contour.setPen(QPen(Qt.NoPen))\r\n contour.setBrush(variableColor(variable))\r\n pen = QPen()\r\n pen.setColor(variableColor(variable))\r\n pen.setJoinStyle(Qt.MiterJoin)\r\n contour.setPen(pen)\r\n\r\n y += lh\r\n\r\n def saveChanges(self):\r\n\r\n from ..util import change_fmu\r\n\r\n output_file, _ = QFileDialog.getSaveFileName(parent=self,\r\n caption='Save Changed FMU',\r\n directory=self.filename,\r\n filter='FMUs (*.fmu)')\r\n\r\n if output_file:\r\n change_fmu(input_file=self.filename, output_file=output_file, start_values=self.startValues)\r\n\r\n def loadStartValues(self):\r\n from ..util import get_start_values\r\n\r\n start_values = get_start_values(self.filename)\r\n\r\n self.startValues.update(start_values)\r\n\r\n self.ui.treeView.reset()\r\n self.ui.tableView.reset()\r\n\r\n def editTable(self):\r\n \"\"\" Open the table dialog \"\"\"\r\n\r\n from .TableDialog import TableDialog\r\n\r\n variables = self.getSelectedVariables()\r\n\r\n if len(variables) == 1:\r\n start_values = self.startValues.copy()\r\n dialog = TableDialog(modelVariables=self.modelDescription.modelVariables,\r\n variable=variables[0],\r\n startValues=start_values)\r\n\r\n if dialog.exec_() == QDialog.Accepted:\r\n self.startValues.clear()\r\n self.startValues.update(start_values)\r\n\r\n def compilePlatformBinary(self, target_platform):\r\n \"\"\" Compile the platform binary \"\"\"\r\n\r\n from ..util import compile_platform_binary\r\n\r\n platforms = supported_platforms(self.filename)\r\n\r\n if target_platform in platforms:\r\n button = QMessageBox.question(self, \"Platform binary already exists\",\r\n f'The FMU already contains a binary for the platform \"{target_platform}\".'\r\n ' Do you want to compile and overwrite the existing binary?')\r\n if button == QMessageBox.No:\r\n return\r\n\r\n try:\r\n compile_platform_binary(self.filename, target_platform=target_platform)\r\n except Exception as e:\r\n QMessageBox.critical(self, \"Failed to compile platform binaries\", str(e))\r\n return\r\n\r\n self.load(self.filename)\r\n\r\n def createJupyterNotebook(self):\r\n \"\"\" Create a Juypyter Notebook to simulate the FMU \"\"\"\r\n\r\n from fmpy.util import create_jupyter_notebook\r\n\r\n filename, ext = os.path.splitext(self.filename)\r\n\r\n filename, _ = QFileDialog.getSaveFileName(\r\n parent=self,\r\n directory=filename + '.ipynb',\r\n filter='Jupyter Notebooks (*.ipynb);;All Files (*)'\r\n )\r\n\r\n if filename:\r\n try:\r\n create_jupyter_notebook(self.filename, filename)\r\n except Exception as e:\r\n QMessageBox.critical(self, \"Failed to create Jupyter Notebook\", str(e))\r\n return\r\n\r\n if QMessageBox.question(self, \"Open Jupyter Notebook?\", f\"Start Jupyter and open {filename}?\") == QMessageBox.Yes:\r\n\r\n from subprocess import run, CREATE_NEW_CONSOLE\r\n\r\n try:\r\n run(['jupyter', 'notebook', filename], creationflags=CREATE_NEW_CONSOLE)\r\n except Exception as e:\r\n QMessageBox.critical(self, \"Failed to start Jupyter\", str(e))\r\n\r\n def createCMakeProject(self):\r\n \"\"\" Create a CMake project from a C code FMU \"\"\"\r\n\r\n from fmpy.util import create_cmake_project\r\n\r\n project_dir = QFileDialog.getExistingDirectory(\r\n parent=self,\r\n caption='Select CMake Project Folder',\r\n directory=os.path.dirname(self.filename))\r\n\r\n if project_dir:\r\n create_cmake_project(self.filename, project_dir)\r\n\r\n\r\n def addRemotingBinaries(self, host_platform, remote_platform):\r\n\r\n from ..util import add_remoting\r\n\r\n try:\r\n add_remoting(self.filename, host_platform, remote_platform)\r\n except Exception as e:\r\n QMessageBox.warning(self, \"Failed to add Remoting Binaries\",\r\n f\"Failed to add remoting binaries to {self.filename}. {e}\")\r\n\r\n self.load(self.filename)\r\n\r\n\r\n def addCoSimulationWrapper(self):\r\n \"\"\" Add the Co-Simulation Wrapper to the FMU \"\"\"\r\n\r\n from ..cswrapper import add_cswrapper\r\n\r\n try:\r\n add_cswrapper(self.filename)\r\n except Exception as e:\r\n QMessageBox.warning(self, \"Failed to add Co-Simulation Wrapper\",\r\n \"Failed to add Co-Simulation Wrapper %s. %s\" % (self.filename, e))\r\n\r\n self.load(self.filename)\r\n",
"import os\r\nimport shutil\r\nimport numpy as np\r\n\r\nfrom fmpy import read_model_description, extract\r\nfrom fmpy.fmi1 import FMU1Slave\r\nfrom fmpy.fmi2 import FMU2Slave\r\nfrom fmpy.ssp.ssd import System, read_ssd, get_connections, find_connectors, find_components\r\n\r\n\r\ndef get_value(component, name):\r\n \"\"\" Get a variable from a component \"\"\"\r\n\r\n variable = component.variables[name]\r\n vr = [variable.valueReference]\r\n\r\n if variable.type == 'Real':\r\n return component.fmu.getReal(vr)[0]\r\n elif variable.type in ['Integer', 'Enumeration']:\r\n return component.fmu.getInteger(vr)[0]\r\n elif variable.type == 'Boolean':\r\n value = component.fmu.getBoolean(vr)[0]\r\n return value != 0\r\n else:\r\n raise Exception(\"Unsupported type: %s\" % variable.type)\r\n\r\n\r\ndef set_value(component, name, value):\r\n \"\"\" Set a variable to a component \"\"\"\r\n\r\n variable = component.variables[name]\r\n vr = [variable.valueReference]\r\n\r\n if variable.type == 'Real':\r\n component.fmu.setReal(vr, [float(value)])\r\n elif variable.type in ['Integer', 'Enumeration']:\r\n component.fmu.setInteger(vr, [int(value)])\r\n elif variable.type == 'Boolean':\r\n # TODO: convert literals\r\n component.fmu.setBoolean(vr, [value != 0.0])\r\n else:\r\n raise Exception(\"Unsupported type: %s\" % variable.type)\r\n\r\n\r\ndef add_path(element, path=''):\r\n\r\n if isinstance(element, System):\r\n for child in element.elements:\r\n add_path(child, path + child.name + '.')\r\n\r\n for connector in element.connectors:\r\n connector.path = path + connector.name\r\n\r\n\r\ndef set_parameters(component, parameter_set):\r\n \"\"\" Apply the parameters (start values) to a component \"\"\"\r\n\r\n path = component.name\r\n\r\n parent = component.parent\r\n\r\n while parent.parent is not None:\r\n path = parent.name + '.' + path\r\n parent = parent.parent\r\n\r\n for parameter in parameter_set.parameters:\r\n if parameter.name.startswith(path):\r\n variable_name = parameter.name[len(path) + 1:]\r\n set_value(component, variable_name, parameter.value)\r\n\r\n\r\ndef instantiate_fmu(component, ssp_unzipdir, start_time, stop_time=None, parameter_set=None):\r\n \"\"\" Instantiate an FMU \"\"\"\r\n\r\n fmu_filename = os.path.join(ssp_unzipdir, component.source)\r\n\r\n component.unzipdir = extract(fmu_filename)\r\n\r\n # read the model description\r\n model_description = read_model_description(fmu_filename, validate=False)\r\n\r\n if model_description.coSimulation is None:\r\n raise Exception(\"%s does not support co-simulation.\" % component.source)\r\n\r\n # collect the value references\r\n component.variables = {}\r\n for variable in model_description.modelVariables:\r\n # component.vrs[variable.name] = variable.valueReference\r\n component.variables[variable.name] = variable\r\n\r\n fmu_kwargs = {'guid': model_description.guid,\r\n 'unzipDirectory': component.unzipdir,\r\n 'modelIdentifier': model_description.coSimulation.modelIdentifier,\r\n 'instanceName': component.name}\r\n\r\n if model_description.fmiVersion == '1.0':\r\n component.fmu = FMU1Slave(**fmu_kwargs)\r\n component.fmu.instantiate()\r\n if parameter_set is not None:\r\n set_parameters(component, parameter_set)\r\n component.fmu.initialize(stopTime=stop_time)\r\n else:\r\n component.fmu = FMU2Slave(**fmu_kwargs)\r\n component.fmu.instantiate()\r\n component.fmu.setupExperiment(startTime=start_time)\r\n if parameter_set is not None:\r\n set_parameters(component, parameter_set)\r\n component.fmu.enterInitializationMode()\r\n component.fmu.exitInitializationMode()\r\n\r\n\r\ndef free_fmu(component):\r\n \"\"\" Free an FMU and remove its unzip dir \"\"\"\r\n\r\n component.fmu.terminate()\r\n component.fmu.freeInstance()\r\n try:\r\n shutil.rmtree(component.unzipdir)\r\n except Exception as e:\r\n print(\"Failed to remove unzip directory. \" + str(e))\r\n\r\n\r\ndef do_step(component, time, step_size):\r\n \"\"\" Perform one simulation step \"\"\"\r\n\r\n # set inputs\r\n for connector in component.connectors:\r\n if connector.kind == 'input':\r\n set_value(component, connector.name, connector.value)\r\n\r\n # do step\r\n component.fmu.doStep(currentCommunicationPoint=time, communicationStepSize=step_size)\r\n\r\n # get outputs\r\n for connector in component.connectors:\r\n if connector.kind == 'output':\r\n connector.value = get_value(component, connector.name)\r\n\r\n\r\ndef simulate_ssp(ssp_filename, start_time=0.0, stop_time=None, step_size=None, parameter_set=None, input={}):\r\n \"\"\" Simulate a system of FMUs \"\"\"\r\n\r\n if stop_time is None:\r\n stop_time = 1.0\r\n\r\n if step_size is None:\r\n step_size = stop_time * 1e-2\r\n\r\n ssd = read_ssd(ssp_filename)\r\n\r\n add_path(ssd.system)\r\n\r\n components = find_components(ssd.system)\r\n connectors = find_connectors(ssd.system)\r\n connections = get_connections(ssd.system)\r\n\r\n # resolve connections\r\n connections_reversed = {}\r\n\r\n for a, b in connections:\r\n connections_reversed[b] = a\r\n\r\n new_connections = []\r\n\r\n # trace connections back to the actual start connector\r\n for a, b in connections:\r\n\r\n while isinstance(a.parent, System) and a.parent.parent is not None:\r\n a = connections_reversed[a]\r\n\r\n new_connections.append((a, b))\r\n\r\n connections = new_connections\r\n\r\n # extract the SSP\r\n ssp_unzipdir = extract(ssp_filename)\r\n\r\n # initialize the connectors\r\n for connector in connectors:\r\n connector.value = 0.0\r\n\r\n # instantiate the FMUs\r\n for component in components:\r\n instantiate_fmu(component, ssp_unzipdir, start_time, stop_time, parameter_set)\r\n\r\n time = start_time\r\n\r\n rows = [] # list to record the results\r\n\r\n # simulation loop\r\n while time < stop_time:\r\n\r\n # apply input\r\n for connector in ssd.system.connectors:\r\n if connector.kind == 'input' and connector.name in input:\r\n connector.value = input[connector.name](time)\r\n\r\n # perform one step\r\n for component in components:\r\n do_step(component, time, step_size)\r\n\r\n # apply connections\r\n for start_connector, end_connector in connections:\r\n end_connector.value = start_connector.value\r\n\r\n # get the results\r\n row = [time]\r\n\r\n for connector in connectors:\r\n row.append(connector.value)\r\n\r\n # append the results\r\n rows.append(tuple(row))\r\n\r\n # advance the time\r\n time += step_size\r\n\r\n # free the FMUs\r\n for component in components:\r\n free_fmu(component)\r\n\r\n # clean up\r\n shutil.rmtree(ssp_unzipdir)\r\n\r\n dtype = [('time', np.float64)]\r\n\r\n for connector, value in zip(connectors, rows[0][1:]):\r\n if type(value) == bool:\r\n dtype.append((connector.path, np.bool_))\r\n elif type(value) == int:\r\n dtype.append((connector.path, np.int32))\r\n else:\r\n dtype.append((connector.path, np.float64))\r\n\r\n # convert the results to a structured NumPy array\r\n return np.array(rows, dtype=np.dtype(dtype))\r\n"
] |
[
[
"numpy.repeat",
"numpy.dtype"
],
[
"numpy.dtype"
]
] |
chengemily/Distributional-Signatures
|
[
"cd7e4659fc9761a8af046e824853aa338b22f2f6"
] |
[
"src/embedding/auxiliary/factory.py"
] |
[
"import datetime\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom embedding.auxiliary.pos import POS\n\n\ndef get_embedding(args):\n '''\n @return AUX module with aggregated embeddings or None if args.aux\n did not provide additional embeddings\n '''\n print(\"{}, Building augmented embedding\".format(\n datetime.datetime.now().strftime('%02y/%02m/%02d %H:%M:%S')))\n\n aux = []\n for ebd in args.auxiliary:\n if ebd == 'pos':\n aux.append(POS(args))\n else:\n raise ValueError('Invalid argument for auxiliary ebd')\n\n if args.cuda != -1:\n aux = [a.cuda(args.cuda) for a in aux]\n\n model = AUX(aux, args)\n\n if args.cuda != -1:\n return model.cuda(args.cuda)\n else:\n return model\n\n\nclass AUX(nn.Module):\n '''\n Wrapper around combination of auxiliary embeddings\n '''\n\n def __init__(self, aux, args):\n super(AUX, self).__init__()\n self.args = args\n # this is a list of nn.Module\n self.aux = nn.ModuleList(aux)\n # this is 0 if self.aux is empty\n self.embedding_dim = sum(a.embedding_dim for a in self.aux)\n\n def forward(self, data, weights=None):\n # torch.cat will discard the empty tensor\n if len(self.aux) == 0:\n if self.args.cuda != -1:\n return torch.FloatTensor().cuda(self.args.cuda)\n return torch.FloatTensor()\n\n # aggregate results from each auxiliary module\n results = [aux(data, weights) for aux in self.aux]\n\n # aux embeddings should only be used with cnn, meta or meta_mlp.\n # concatenate together with word embeddings\n assert (self.args.embedding in ['cnn', 'meta', 'meta_mlp', 'lstmatt'])\n x = torch.cat(results, dim=2)\n\n return x\n"
] |
[
[
"torch.nn.ModuleList",
"torch.FloatTensor",
"torch.cat"
]
] |
Thirty-OneR/Astr496_assignment03
|
[
"e5c8c842906fa9d6c141fb92a0fc9e134810ec64"
] |
[
"dodo.py"
] |
[
"from doit.tools import run_once\nimport h5py\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef task_generate_gaussian():\n N = 32**3\n seed = 0x4d3d3d3\n fn = \"gaussian.h5\"\n def _generate():\n np.random.seed(seed)\n pos = np.random.normal(loc = [0.5, 0.5, 0.5], scale = 0.2, size = (N, 3))\n vel = np.random.random(size = (N, 3)) * 10.0 - 5.0\n with h5py.File(fn, \"w\") as f:\n f.create_dataset(\"/particle_positions\", data = pos)\n f.create_dataset(\"/particle_velocities\", data = vel)\n f.create_dataset(\"/particle_masses\", data = np.ones(N))\n return {'actions': [_generate],\n 'targets': [fn],\n 'uptodate': [run_once]}\n"
] |
[
[
"numpy.random.normal",
"numpy.random.random",
"numpy.random.seed",
"numpy.ones"
]
] |
tkortz/motion_planning_rt
|
[
"08e914642b802f7217a8ad0f6153d41ccdce8c7d"
] |
[
"python_src/adaptive_formation/gradient_interactive.py"
] |
[
"# In order to launch execute:\n# python3 gradient_interactive.py\n\nimport numpy as np\nfrom numpy.linalg import norm\nimport matplotlib.pyplot as plt\nfrom matplotlib import collections\nfrom scipy.ndimage.morphology import distance_transform_edt as bwdist\nfrom math import *\nimport random\nfrom impedance_modeles import *\nimport time\n\nfrom progress.bar import FillingCirclesBar\nfrom tasks import *\nfrom threading import Thread\nfrom multiprocessing import Process\nimport os\n\nimport liblitmus\n\n\n\ndef poly_area(x,y):\n # https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates\n # https://en.wikipedia.org/wiki/Shoelace_formula\n return 0.5*np.abs(np.dot(x,np.roll(y,1))-np.dot(y,np.roll(x,1)))\n\ndef meters2grid(pose_m, nrows=500, ncols=500):\n # [0, 0](m) -> [250, 250]\n # [1, 0](m) -> [250+100, 250]\n # [0,-1](m) -> [250, 250-100]\n pose_on_grid = np.array(pose_m)*100 + np.array([ncols/2, nrows/2])\n return np.array( pose_on_grid, dtype=int)\ndef grid2meters(pose_grid, nrows=500, ncols=500):\n # [250, 250] -> [0, 0](m)\n # [250+100, 250] -> [1, 0](m)\n # [250, 250-100] -> [0,-1](m)\n pose_meters = ( np.array(pose_grid) - np.array([ncols/2, nrows/2]) ) / 100.0\n return pose_meters\n\ndef gradient_planner(f, current_point, ncols=500, nrows=500, movement_rate=0.06):\n \"\"\"\n GradientBasedPlanner : This function computes the next_point\n given current location, goal location and potential map, f.\n It also returns mean velocity, V, of the gradient map in current point.\n \"\"\"\n [gy, gx] = np.gradient(-f);\n iy, ix = np.array( meters2grid(current_point), dtype=int )\n w = 30 # smoothing window size for gradient-velocity\n vx = np.mean(gx[ix-int(w/2) : ix+int(w/2), iy-int(w/2) : iy+int(w/2)])\n vy = np.mean(gy[ix-int(w/2) : ix+int(w/2), iy-int(w/2) : iy+int(w/2)])\n V = np.array([vx, vy])\n dt = 0.06 / norm(V);\n next_point = current_point + dt*V;\n\n return next_point, V\n\ndef combined_potential(obstacles_poses, R_obstacles, goal, nrows=500, ncols=500):\n \"\"\" Repulsive potential \"\"\"\n obstacles_map = map(obstacles_poses, R_obstacles)\n goal = meters2grid(goal)\n d = bwdist(obstacles_map==0);\n d2 = (d/100.) + 1; # Rescale and transform distances\n d0 = 2;\n nu = 200;\n repulsive = nu*((1./d2 - 1./d0)**2);\n repulsive [d2 > d0] = 0;\n \"\"\" Attractive potential \"\"\"\n [x, y] = np.meshgrid(np.arange(ncols), np.arange(nrows))\n xi = 1/700.;\n attractive = xi * ( (x - goal[0])**2 + (y - goal[1])**2 );\n \"\"\" Combine terms \"\"\"\n f = attractive + repulsive;\n return f\n\ndef map(obstacles_poses, R_obstacles, nrows=500, ncols=500):\n \"\"\" Obstacles map \"\"\"\n obstacles_map = np.zeros((nrows, ncols));\n [x, y] = np.meshgrid(np.arange(ncols), np.arange(nrows))\n for pose in obstacles_poses:\n pose = meters2grid(pose)\n x0 = pose[0]; y0 = pose[1]\n # cylindrical obstacles\n t = ((x - x0)**2 + (y - y0)**2) < (100*R_obstacles)**2\n obstacles_map[t] = 1;\n # rectangular obstacles\n obstacles_map[400:, 130:150] = 1;\n obstacles_map[130:150, :200] = 1;\n obstacles_map[330:380, 300:] = 1;\n return obstacles_map\n\ndef move_obstacles(obstacles_poses, obstacles_goal_poses):\n \"\"\" All of the obstacles tend to go to the origin, (0,0) - point \"\"\"\n # for pose in obstacles_poses:\n # dx = random.uniform(0, 0.03); dy = random.uniform(0,0.03);\n # pose[0] -= np.sign(pose[0])*dx; pose[1] -= np.sign(pose[1])*dy;\n\n \"\"\" Each obstacles tends to go to its selected goal point with random speed \"\"\"\n for p in range(len(obstacles_poses)):\n pose = obstacles_poses[p]; goal = obstacles_goal_poses[p]\n dx, dy = (goal - pose) / norm(goal-pose) * 0.05#random.uniform(0,0.05)\n pose[0] += dx; pose[1] += dy;\n\n return obstacles_poses\n\n\ndef formation(num_robots, leader_des, v, R_swarm):\n if num_robots<=1: return []\n u = np.array([-v[1], v[0]])\n des4 = leader_des - v*R_swarm*sqrt(3) # follower\n if num_robots==2: return [des4]\n des2 = leader_des - v*R_swarm*sqrt(3)/2 + u*R_swarm/2 # follower\n des3 = leader_des - v*R_swarm*sqrt(3)/2 - u*R_swarm/2 # follower\n if num_robots==3: return [des2, des3]\n \n return [des2, des3, des4]\n\ndef gradient_interactive():\n \"\"\" initialization \"\"\"\n animate = 1 # show 1-each frame or 0-just final configuration\n random_obstacles = 1 # randomly distributed obstacles on the map\n num_random_obstacles = 8 # number of random circular obstacles on the map\n num_robots = 4 # <=4, number of drones in formation\n moving_obstacles = 1 # 0-static or 1-dynamic obstacles\n impedance = 0 # impedance links between the leader and followers (leader's velocity)\n formation_gradient = 1 # followers are attracting to their formation position and repelling from obstacles\n draw_gradients = 1 # 1-gradients plot, 0-grid\n postprocessing = 0 # show processed data figures after the flight\n \"\"\" human guided swarm params \"\"\"\n interactive = 0 # 1-human guided swarm (requires MoCap system), 0-potential fields as a planner to goal pose\n human_name = 'palm' # vicon mocap object\n pos_coef = 3.0 # scale of the leader's movement relatively to the human operator\n initialized = False # is always inits with False: for relative position control\n max_its = 500 if interactive else 120 # max number of allowed iters for formation to reach the goal\n VISUALIZE = False\n # movie writer\n if VISUALIZE:\n progress_bar = FillingCirclesBar('Number of Iterations', max=max_its)\n should_write_movie = 0; movie_file_name = os.getcwd()+'/videos/output.avi'\n movie_writer = get_movie_writer(should_write_movie, 'Simulation Potential Fields', movie_fps=10., plot_pause_len=0.01)\n\n R_obstacles = 0.05 # [m]\n R_swarm = 0.3 # [m]\n start = np.array([-1.8, 1.8]); goal = np.array([1.8, -1.8])\n V0 = (goal - start) / norm(goal-start) # initial movement direction, |V0| = 1\n U0 = np.array([-V0[1], V0[0]]) / norm(V0) # perpendicular to initial movement direction, |U0|=1\n imp_pose_prev = np.array([0, 0])\n imp_vel_prev = np.array([0, 0])\n imp_time_prev = time.time()\n\n if random_obstacles:\n obstacles_poses = np.random.uniform(low=-2.5, high=2.5, size=(num_random_obstacles,2)) # randomly located obstacles\n obstacles_goal_poses = np.random.uniform(low=-1.3, high=1.3, size=(num_random_obstacles,2)) # randomly located obstacles goal poses\n else:\n obstacles_poses = np.array([[-2, 1], [1.5, 0.5], [-1.0, 1.5], [0.1, 0.1], [1, -2], [-1.8, -1.8]]) # 2D - coordinates [m]\n obstacles_goal_poses = np.array([[-0, 0], [0.0, 0.0], [ 0.0, 0.0], [0.0, 0.0], [0, 0], [ 0.0, 0.0]])\n\n\n \"\"\" Main loop \"\"\"\n\n # drones polygonal formation\n route1 = start # leader\n current_point1 = start\n robots_poses = [start] + formation(num_robots, start, V0, R_swarm)\n routes = [route1] + robots_poses[1:]\n centroid_route = [ sum([p[0] for p in robots_poses])/len(robots_poses), sum([p[1] for p in robots_poses])/len(robots_poses) ]\n des_poses = robots_poses\n vels = []\n for r in range(num_robots): vels.append([])\n norm_vels = []\n for r in range(num_robots): norm_vels.append([])\n\n # variables for postprocessing and performance estimation\n area_array = []\n start_time = time.time()\n\n fig = plt.figure(figsize=(10, 10))\n with get_dummy_context_mgr():\n for i in range(max_its):\n if moving_obstacles: obstacles_poses = move_obstacles(obstacles_poses, obstacles_goal_poses)\n\n \"\"\" Leader's pose update \"\"\"\n f1 = combined_potential(obstacles_poses, R_obstacles, goal)\n des_poses[0], vels[0] = gradient_planner(f1, current_point1)\n direction = ( goal - des_poses[0] ) / norm(goal - des_poses[0])\n norm_vels[0].append(norm(vels[0]))\n\n # drones polygonal formation\n # direction = ( goal - des_poses[0] ) / norm(goal - des_poses[0])\n des_poses[1:] = formation(num_robots, des_poses[0], direction, R_swarm)\n v = direction; u = np.array([-v[1], v[0]])\n\n if formation_gradient:\n # following drones are attracting to desired points - vertices of the polygonal formation\n for p in range(1, num_robots):\n \"\"\" including another robots in formation in obstacles array: \"\"\"\n robots_obstacles = [x for i,x in enumerate(robots_poses) if i!=p]\n # obstacles_poses1 = np.array(robots_obstacles + obstacles_poses.tolist())\n # f = combined_potential(obstacles_poses1, des_poses[p])\n f = combined_potential(obstacles_poses, R_obstacles, des_poses[p])\n des_poses[p], vels[p] = gradient_planner(f, des_poses[p])\n norm_vels[p].append(norm(vels[p]))\n\n for r in range(num_robots):\n routes[r] = np.vstack([routes[r], des_poses[r]])\n\n current_point1 = des_poses[0] # update current point of the leader\n\n pp = des_poses\n centroid = [ sum([p[0] for p in pp])/len(pp), sum([p[1] for p in pp])/len(pp) ]\n centroid_route = np.vstack([centroid_route, centroid])\n dist_to_goal = norm(centroid - goal)\n if dist_to_goal < 1.5*R_swarm:\n print('\\nReached the goal')\n break\n\n if VISUALIZE:\n progress_bar.next()\n plt.cla()\n\n draw_map(start, goal, obstacles_poses, R_obstacles, f1, draw_gradients=draw_gradients)\n draw_robots(current_point1, routes, num_robots, robots_poses, centroid, vels[0])\n if animate:\n plt.draw()\n plt.pause(0.01)\n\n # print('Current simulation time: ', time.time()-start_time)\n\n # Wait for the next period\n global jobs\n jobs += 1\n liblitmus.call_sleep_next_period()\n\n if VISUALIZE:\n print('\\nDone')\n progress_bar.finish()\n plt.show()\n\n end_time = time.time()\n print('Simulation execution time: ', round(end_time-start_time,2))\n\nif __name__ == \"__main__\":\n wcet = 150\n period = 200\n deadline = 200\n phase = 0\n early = False\n\n numReps = 35\n\n jobs = 0\n\n # Make this thread a real-time task\n liblitmus.call_set_rt_task_param(wcet, period, deadline, phase, early)\n print(\"\\nFinished setting rt params.\\n\")\n\n liblitmus.call_init_litmus()\n print(\"\\nCalled init_litmus.\\n\")\n\n liblitmus.set_task_mode_litmusrt()\n print(\"\\nNow a real-time task.\\n\")\n\n print(\"\\nAbout to wait for synchronous release.\\n\")\n liblitmus.call_wait_for_ts_release()\n\n # Do the work\n for i in range(numReps):\n gradient_interactive()\n\n # Make it not a real-time task anymore\n liblitmus.set_task_mode_background()\n print(\"\\nNow a background task again.\\n\")\n\n print(\"Number of jobs:\", jobs)"
] |
[
[
"scipy.ndimage.morphology.distance_transform_edt",
"matplotlib.pyplot.pause",
"numpy.gradient",
"numpy.arange",
"matplotlib.pyplot.cla",
"numpy.linalg.norm",
"matplotlib.pyplot.show",
"matplotlib.pyplot.draw",
"numpy.random.uniform",
"numpy.array",
"numpy.zeros",
"numpy.roll",
"numpy.vstack",
"matplotlib.pyplot.figure"
]
] |
parkjan4/HiggsBoson
|
[
"1e31f9bd2c6cb03c6acc8caed573046bbc0d2c08"
] |
[
"project/Python Code/implementations.py"
] |
[
"from proj1_helpers import *\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plt\n\n######################### Loss Functions #########################\n\n# Compute loss with Mean Squared Error\ndef compute_loss(y, tx, w):\n e = y.reshape((len(y),1)) - tx.dot(w).reshape((len(y),1))\n return 1/2*np.mean(e**2)\n\n# Compute gradient for gradient descent\ndef compute_gradient(y, tx, w):\n e = y.reshape((len(y),1)) - tx.dot(w).reshape((len(y),1))\n grad = -tx.T.dot(e) / len(e)\n return grad\n\ndef sigmoid(x):\n return 1.0 / (1 + np.exp(-x))\n\ndef compute_loss_logistic(y, tx, w):\n # loss formula works only for y = {0,1} \n y[y == -1] = 0\n y = y.reshape((len(y),1))\n sigma = sigmoid(tx.dot(w)).reshape((len(y),1))\n loss = y.T.dot(np.log(sigma)) + (1 - y).T.dot(np.log(1 - sigma))\n return np.squeeze(- loss)\n\ndef compute_gradient_logistic(y, tx, w):\n sigma = sigmoid(tx.dot(w)).reshape((len(y),1))\n y = y.reshape((len(sigma),1))\n grad = tx.T.dot(sigma - y)\n return grad\n\n\n######################### Methods Implementation #########################\n\n# Gradient Descent\ndef least_squares_GD(y, tx, initial_w, max_iters, gamma):\n ws = [initial_w]\n losses = []\n w = initial_w\n\n for n_iter in range(max_iters):\n gradient = compute_gradient(y, tx, w)\n loss = compute_loss(y, tx, w)\n w = w - gamma * gradient\n\n ws.append(w)\n losses.append(loss)\n\n return ws[-1], losses[-1]\n\n# Stochastic Gradient Descent\ndef least_squares_SGD(y, tx, initial_w, max_iters, gamma):\n ws = [initial_w]\n losses = []\n w = initial_w\n n_iter = 0\n batch_size = 1\n\n for batch_y, batch_tx in batch_iter(y, tx, batch_size, max_iters):\n grad = compute_gradient(batch_y, batch_tx, w)\n loss = compute_loss(batch_y, batch_tx, w)\n w = w - gamma * grad\n\n ws.append(w)\n losses.append(loss)\n n_iter += 1\n\n return ws[-1], losses[-1]\n\n \ndef least_squares(y, tx):\n w = np.linalg.solve(tx.T.dot(tx), tx.T.dot(y))\n loss = compute_loss(y, tx ,w)\n return w, loss\n\n\ndef ridge_regression(y, tx, lambda_):\n w = np.linalg.solve(tx.T.dot(tx) + lambda_ * np.eye(tx.shape[1]), tx.T.dot(y))\n loss = compute_loss(y, tx, w)\n return w, loss\n\ndef logistic_regression(y, tx, w, max_iters, gamma):\n for n_iter in range(max_iters):\n loss = compute_loss_logistic(y, tx, w)\n grad = compute_gradient_logistic(y, tx, w)\n w -= gamma * grad\n return w, loss\n\ndef reg_logistic_regression(y, tx, lambda_, w, max_iters, gamma):\n for n_iter in range(max_iters):\n loss = compute_loss_logistic(y, tx, w) + lambda_ * np.squeeze(w.T.dot(w))\n grad = compute_gradient_logistic(y, tx, w) + 2 * lambda_ * w\n w -= gamma * grad\n return w, loss\n\n\n######################### Improvements #########################\n\ndef RR_optimal_lambda_finder(y, tx, learning_algo):\n k_folds = 10\n lambdas = np.logspace(-4,0,30)\n seeds = range(10)\n \n # define an empty matrix to store cross validation errors\n CV_errors = np.empty((len(seeds), len(lambdas)), dtype=float)\n for i, seed in enumerate(seeds):\n for j, lambda_ in enumerate(lambdas):\n errors = cross_validation(y, tx, k_folds, learning_algo, lambda_, seed)\n CV_error = np.mean(errors)\n CV_errors[i, j] = CV_error\n\n best_accuracy = max(np.mean(CV_errors, axis=0))\n opt_lambda = lambdas[np.argmax(np.mean(CV_errors, axis=0))]\n return opt_lambda, best_accuracy \n\n\ndef build_poly(x, degree):\n \"\"\"polynomial basis functions for input data x, for j=0 up to j=degree.\"\"\"\n poly = np.ones((len(x), 1))\n for deg in range(1, degree+1):\n poly = np.c_[poly, np.power(x, deg)]\n return poly[:,1:]\n\n\ndef interaction_forward_selection(y, tx):\n '''For every possible 2nd order interaction term, add to the original \\\n feature set iff its inclusion leads to higher accuracy based on 5-fold CV'''\n \n # define reference accuracy (with NO interaction terms)\n reference = np.mean(cross_validation(y, tx, 5, least_squares, 0, 1))\n \n # define list to store feature indices whose interaction is useful\n interaction_terms = []\n \n counter = 0 \n num_features = 30 # original number of features\n for col1 in range(num_features):\n for col2 in range(num_features):\n if col1 >= col2: continue\n temp_tx = np.c_[tx, tx[:,col1] * tx[:,col2]]\n accuracy = np.mean(cross_validation(y, temp_tx, 5, least_squares, 0, 1))\n \n # if new accuracy is higher, add the term\n if accuracy > reference: \n reference = accuracy\n tx = temp_tx\n interaction_terms.append((col1, col2))\n \n counter += 1 \n print(\"{p:.2f}% complete, best accuracy: {a:.9f}\".format(p=100* counter / 435, a=reference))\n return tx, interaction_terms\n\n\ndef third_interaction_forward_selection(y, tx):\n '''For every possible 3rd order interaction term, add to the original \\\n feature set iff its inclusion leads to higher accuracy based on 5-fold CV'''\n \n # define reference accuracy (with NO interaction terms)\n reference = np.mean(cross_validation(y, tx, 5, least_squares, 0, 1))\n \n # define list to store feature indices whose interaction is useful\n third_interaction_terms = []\n \n counter = 0 # delete this line\n num_features = 30 # original number of features\n for col1 in range(num_features):\n for col2 in range(num_features):\n if col1 >= col2: continue\n for col3 in range(num_features):\n if col2 >= col3: continue\n temp_tx = np.c_[tx, tx[:,col1] * tx[:,col2] * tx[:,col3]]\n accuracy = np.mean(cross_validation(y, temp_tx, 5, least_squares, 0, 1))\n \n # if new accuracy is higher, add the term\n if accuracy > reference: \n reference = accuracy\n tx = temp_tx\n third_interaction_terms.append((col1, col2, col3))\n \n counter += 1 # delete this line\n print(\"{p:.2f}% complete, best accuracy: {a:.9f}\".format(p=100* counter / 4060, a=reference))\n return tx, third_interaction_terms\n\n\ndef build_k_indices(y, k_fold, seed):\n \"\"\"build k indices for k-fold.\"\"\"\n num_row = y.shape[0]\n interval = int(num_row / k_fold)\n np.random.seed(seed)\n indices = np.random.permutation(num_row)\n k_indices = [indices[k * interval: (k + 1) * interval]\n for k in range(k_fold)]\n return np.array(k_indices)\n\n\ndef cross_validation(y, tx, k_folds, learning_algo, lambda_, seed):\n # build k_folds instances of indices \n k_indices = build_k_indices(y, k_folds, seed)\n \n # define list to store cross validation error\n errors = []\n for k in range(k_folds):\n tx_valid = tx[k_indices[k,:]]\n y_valid = y[k_indices[k,:]]\n tx_train = tx[k_indices[list(set(range(k_indices.shape[0])) - set([k])),:].reshape((k_indices.shape[0]-1)*k_indices.shape[1]),:]\n y_train = y[k_indices[list(set(range(k_indices.shape[0])) - set([k])),:].reshape((k_indices.shape[0]-1)*k_indices.shape[1])]\n\n # least squares using normal equations\n if learning_algo == least_squares:\n w, loss_tr = learning_algo(y_train, tx_train)\n \n # ridge regression using normal equations\n elif learning_algo == ridge_regression:\n w, loss_tr = learning_algo(y_train, tx_train, lambda_)\n \n # least squares gradient descent\n elif learning_algo == least_squares_GD:\n initial_w = np.zeros((tx.shape[1],1))\n max_iters = 1000\n gamma = 0.0000001\n w, loss_tr = learning_algo(y_train, tx_train, initial_w, max_iters, gamma)\n \n # least squares stochastic gradient descent\n elif learning_algo == least_squares_SGD:\n initial_w = np.zeros((tx.shape[1],1))\n max_iters = 1000\n gamma = 0.0000001\n w, loss_tr = learning_algo(y_train, tx_train, initial_w, max_iters, gamma)\n \n # logistic regression gradient descent\n elif learning_algo == logistic_regression:\n initial_w = np.zeros((tx.shape[1],1))\n max_iters = 500\n gamma = 0.000000000000001\n w, loss_tr = learning_algo(y_train, tx_train, initial_w, max_iters, gamma)\n \n # regularized logistic regression gradient descent\n elif learning_algo == reg_logistic_regression:\n initial_w = np.zeros((tx.shape[1],1))\n max_iters = 500\n gamma = 0.000000000000001\n w, loss_tr = learning_algo(y_train, tx_train, lambda_, initial_w, max_iters, gamma)\n \n y_hat = predict_labels(w, tx_valid)\n errors.append(sum(y_valid.reshape((len(y_valid),1))==y_hat.reshape((len(y_hat),1))) / len(y_valid))\n \n # return the average error rate across the folds\n return errors\n\ndef data_segmentation(y, tx):\n '''\n PRI_jet_num is a feature which only takes a value of 0, 1, 2, or 3.\n Many features become undefined (-999) based on which value it takes.\n The purpose of this function is to split the data based on the four values.\n Source: http://opendata.cern.ch/record/328 \n Input:\n y: reponse\n tx: data matrix\n Returns:\n four sets of response and data matrices segmented based on PRI_jet_num\n '''\n # data segmentation\n temp_matrix = np.c_[y, tx]\n \n indices_0 = temp_matrix[:,23]==0\n temp_matrix_0 = temp_matrix[indices_0,:]\n y_0 = temp_matrix_0[:,0]\n tx_0 = temp_matrix_0[:,1:]\n \n indices_1 = temp_matrix[:,23]==1\n temp_matrix_1 = temp_matrix[indices_1,:]\n y_1 = temp_matrix_1[:,0]\n tx_1 = temp_matrix_1[:,1:]\n \n indices_2 = temp_matrix[:,23]==2\n temp_matrix_2 = temp_matrix[indices_2,:]\n y_2 = temp_matrix_2[:,0]\n tx_2 = temp_matrix_2[:,1:]\n \n indices_3 = temp_matrix[:,23]==3\n temp_matrix_3 = temp_matrix[indices_3,:]\n y_3 = temp_matrix_3[:,0]\n tx_3 = temp_matrix_3[:,1:]\n \n # when PRI_jet_num is 0, the following features are undefined and thus removed\n tx_0 = np.delete(tx_0, np.s_[4,5,6,12,22,23,24,25,26,27,28,29], axis=1)\n \n # when PRI_jet_num is 1, the following features are undefined and thus removed\n tx_1 = np.delete(tx_1, np.s_[4,5,6,12,22,26,27,28], axis=1)\n \n # at least, PRI_jet_num itself is removed\n tx_2 = np.delete(tx_2, np.s_[22], axis=1)\n tx_3 = np.delete(tx_3, np.s_[22], axis=1)\n \n # replace any remaining -999 values with the mean of that feature\n tx_0 = replace_with_mean(tx_0)\n tx_1 = replace_with_mean(tx_1)\n tx_2 = replace_with_mean(tx_2)\n tx_3 = replace_with_mean(tx_3)\n \n return y_0, tx_0, y_1, tx_1, y_2, tx_2, y_3, tx_3, indices_0, indices_1, indices_2, indices_3\n\ndef backward_selection(y, tx):\n '''Performs backward feature selection using least squares algorithm\n Input:\n y: response\n tx: data matrix\n Output:\n new data matrix with (potentially) fewer features'''\n \n cols_removed = [] \n temp_tx, col_removed = backward_selection_algorithm(y, tx)\n while tx.shape[1] != temp_tx.shape[1]: # means a feature was removed \n tx = temp_tx\n cols_removed.append(col_removed)\n temp_tx, col_removed = backward_selection_algorithm(y, temp_tx)\n return tx, cols_removed\n\ndef backward_selection_algorithm(y, tx):\n k_folds = 10\n seed = 1\n index_to_remove = []\n reference = np.mean(cross_validation(y, tx, k_folds, least_squares, 0.0001, seed))\n for c in range(tx.shape[1]):\n temp_tx = tx[:,list(set(range(tx.shape[1])) - set([c]))]\n CV_accuracy = np.mean(cross_validation(y, temp_tx, k_folds, least_squares, 0.0001, seed))\n if CV_accuracy > reference:\n reference = CV_accuracy\n index_to_remove.append(c)\n \n if len(index_to_remove) == 0: # means no features were removed\n return tx, -1\n \n return tx[:,list(set(range(tx.shape[1])) - set([index_to_remove[-1]]))], index_to_remove[-1]\n\ndef replace_with_mean(tx):\n '''replace all -999 values with mean value of each column'''\n for col in range(tx.shape[1]):\n # find indices for which the value is -999\n indices = tx[:,col]==-999\n # replace with mean value\n tx[indices,col] = np.mean(tx[~indices,col])\n return tx\n\n######################### Helpers #########################\n\n# Creates batches for stochastic gradient descent\ndef batch_iter(y, tx, batch_size, num_batches=1, shuffle=True):\n data_size = len(y)\n\n if shuffle:\n shuffle_indices = np.random.permutation(np.arange(data_size))\n shuffled_y = y[shuffle_indices]\n shuffled_tx = tx[shuffle_indices]\n else:\n shuffled_y = y\n shuffled_tx = tx\n for batch_num in range(num_batches):\n start_index = batch_num * batch_size\n end_index = min((batch_num + 1) * batch_size, data_size)\n if start_index != end_index:\n yield shuffled_y[start_index:end_index], shuffled_tx[start_index:end_index]\n"
] |
[
[
"numpy.log",
"numpy.random.seed",
"numpy.power",
"numpy.logspace",
"numpy.arange",
"numpy.squeeze",
"numpy.eye",
"numpy.delete",
"numpy.random.permutation",
"numpy.mean",
"numpy.exp",
"numpy.array",
"numpy.zeros"
]
] |
WillemWybo/Electrical_compartmentalization_in_neurons
|
[
"1ff297be97412ff40042485479b78148fba11c27"
] |
[
"_matplotlibsettings.py"
] |
[
"import matplotlib\n# matplotlib.use(\"Agg\")\nimport matplotlib.pyplot as pl\nimport matplotlib.animation as manimation\nfrom matplotlib.gridspec import GridSpec\nfrom matplotlib.patches import Rectangle\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom matplotlib import rc, rcParams\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nfrom mpl_toolkits.axes_grid1.inset_locator import inset_axes\nfrom mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes\nfrom mpl_toolkits.axes_grid1.inset_locator import mark_inset\nfrom matplotlib.offsetbox import AnchoredText\n\n# colours = ['DeepPink', 'Purple', 'MediumSlateBlue', 'Blue', 'Teal',\n# 'ForestGreen', 'DarkOliveGreen', 'DarkGoldenRod',\n# 'DarkOrange', 'Coral', 'Red', 'Sienna', 'Black', 'DarkGrey']\ncolours = list(pl.rcParams['axes.prop_cycle'].by_key()['color'])\n\n\n# matplotlib settings\nlegendsize = 10\nlabelsize = 15\nticksize = 15\nlwidth = 1.5\nmarkersize = 6.\nfontsize = 16\nlettersize = 20.\n#~ font = {'family' : 'serif',\n #~ 'weight' : 'normal',\n #~ 'size' : fontsize}\n #'sans-serif':'Helvetica'}\n#'family':'serif','serif':['Palatino']}\n#~ rc('font', **font)\nrc('font',**{'family':'serif','serif':['Palatino'], 'size': 15.0})\nrc('mathtext',**{'fontset': 'stixsans'})\n# rc('text', usetex=True)\n# rcParams['text.latex.preamble'].append(r\"\\usepackage{amsmath}\\usepackage{xfrac}\")\nrc('legend',**{'fontsize': 'medium'})\nrc('xtick',**{'labelsize': 'small'})\nrc('ytick',**{'labelsize': 'small'})\nrc('axes',**{'labelsize': 'large', 'labelweight': 'normal'})\n\n\ncs = ['r', 'b', 'g', 'c', 'y']\nmfs = ['D', 'o', 'v', '^', 's', 'p']\nmls = ['+', '*', 'x', '1', '2']\nlss = ['-', '--', '-.', ':']\ncmap = pl.get_cmap('jet')\n\ndef myAx(ax):\n # customize the ax\n ax.spines['top'].set_color('none')\n ax.spines['right'].set_color('none')\n ax.yaxis.set_ticks_position('left')\n ax.xaxis.set_ticks_position('bottom')\n\n return ax\n\ndef myLegend(ax, add_frame=True, **kwarg):\n leg = ax.legend(**kwarg)\n if add_frame:\n frame = leg.get_frame()\n frame.set_color('white')\n frame.set_alpha(0.8)\n return leg\n\ndef myColorbar(ax, im, **kwargs):\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", \"5%\", pad=\"3%\")\n return pl.colorbar(im, cax=cax, **kwargs)\n\n\n"
] |
[
[
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.get_cmap",
"matplotlib.rc"
]
] |
Thomas-Brd/3D_landslide_detection
|
[
"95ec6fc4dd013ecc7c3c8cb22dbbbc8712467830"
] |
[
"scripts/.ipynb_checkpoints/gdal_functions-checkpoint.py"
] |
[
"# coding: utf-8\n# Thomas Bernard\n# fonctions utilisant gdal\n\nfrom osgeo import gdal\nimport ogr, osr\nimport numpy as np\n#import rasterio\n#from rasterio.plot import show\nimport subprocess\nimport os\nimport matplotlib.pyplot as plt \n\n#import scripts.eros_function as eros\n\n\ndef read_tif_file(path_to_file):\n ds = gdal.Open(path_to_file)\n gt = ds.GetGeoTransform()\n proj = ds.GetProjection()\n band= ds.GetRasterBand(1)\n mask = band.GetNoDataValue()\n array = band.ReadAsArray()\n \n return array, gt, proj, mask\n\ndef ReadRasterfile(dataset):\n \"\"\"\n This function open a raster file, transform it into\n a numpy array and get information from it\n \"\"\"\n \n for x in range(1, dataset.RasterCount + 1):\n band = dataset.GetRasterBand(x)\n # Projection\n projection = dataset.GetProjection()\n # Raster extent \n upx, xres, xskew, upy, yskew, yres = dataset.GetGeoTransform()\n coordinates = [upx, xres, xskew, upy, yskew, yres]\n # Dimensions\n sizeX = dataset.RasterXSize\n sizeY = dataset.RasterYSize\n # Data as a numpy array\n array = band.ReadAsArray()\n # Get nodata value from the GDAL band object\n nodata = band.GetNoDataValue()\n #Create a masked array for making calculations without nodata values\n array = np.ma.masked_equal(array, nodata)\n type(array)\n \n \n return array, sizeX, sizeY, projection, band, coordinates\n del array, sizeX, sizeY, projection, band, coordinates\n\n# converts coordinates to index\n\ndef bbox2ix(bbox,gt):\n xo = int(round((bbox[0] - gt[0])/gt[1]))\n yo = int(round((gt[3] - bbox[3])/gt[1]))\n xd = int(round((bbox[1] - bbox[0])/gt[1]))\n yd = int(round((bbox[3] - bbox[2])/gt[1]))\n return(xo,yo,xd,yd)\n\ndef rasclip(ras,shp):\n ds = gdal.Open(ras)\n gt = ds.GetGeoTransform()\n\n driver = ogr.GetDriverByName(\"ESRI Shapefile\")\n dataSource = driver.Open(shp, 0)\n layer = dataSource.GetLayer()\n\n for feature in layer:\n\n xo,yo,xd,yd = bbox2ix(feature.GetGeometryRef().GetEnvelope(),gt)\n arr = ds.ReadAsArray(xo,yo,xd,yd)\n yield arr\n\n layer.ResetReading()\n ds = None\n dataSource = None\n \n return arr\n\ndef WriteGeoTIF(Tiffname, nb_xpixels, nb_ypixels, size_pixels, y_position, x_position, epsg, array):\n from osgeo import gdal, osr\n \n drv = gdal.GetDriverByName('GTiff')\n ds = drv.Create(Tiffname, nb_xpixels, nb_ypixels, 1, gdal.GDT_Float32)\n gt = [x_position, size_pixels, 0, y_position, 0,-size_pixels ]\n ds.SetGeoTransform(gt)\n srs = osr.SpatialReference()\n srs.ImportFromEPSG(epsg)\n ds.SetProjection(srs.ExportToWkt())\n ds.GetRasterBand(1).WriteArray(array)\n ds.GetRasterBand(1).SetNoDataValue(-9999.0)\n \n return \n \ndef RastertoTXTfile(path,file, output_filename):\n ds = gdal.Open(path+file)\n translate_options = gdal.TranslateOptions(format='XYZ')\n ds = gdal.Translate(destName = path+output_filename, srcDS = ds, options = translate_options)\n return\n \ndef create_masks(path, filename, path_masks):\n \"\"\"\n This function create a mask from a tif file\n What you need:\n A path location of the Watersheds\n A file name of the watersheds tif file\n A path to write the resulted masks\n \"\"\"\n # Remove the previous folder if any\n if os.path.isdir(path_masks) == True:\n contenu=os.listdir(path_masks)\n for x in contenu:\n os.remove(path_masks+x)#on supprime tous les fichier dans le dossier\n os.rmdir(path_masks)#puis on supprime le dossier\n # Create topo folder\n dataset = gdal.Open(path +filename + '.tif', gdal.GA_ReadOnly)\n array,sizeX, sizeY, projection, band, coordinates = ReadRasterfile(dataset)\n array[array<0]=-9999\n # Get origin coordinates \n xyzlohi = [coordinates[0],coordinates[3],coordinates[3]-sizeY,coordinates[0]+sizeX]\n # get id of watersheds\n watershed_id = np.unique(array)\n watershed_id = watershed_id[1:]\n # Create masks\n masks={}\n os.mkdir(path_masks)\n watersheds_filename = {}\n count=1\n for i in watershed_id:\n array_copy = np.copy(array)\n array_copy[array!=i] = -9999\n eros.write(array_copy,sizeX,sizeY,coordinates[1],xyzlohi,path_masks+'SBV'+str(i)+'_mask.alt')\n watersheds_filename['FN{0}'.format(count)] = 'SBV'+str(i)\n count=count+1\n \n del dataset\n return watersheds_filename\n\ndef get_outlets_coordinates(path, filename, outlet_coordinates):\n \"\"\"\n This functions get the outlets coordinates for each watershed in the tif file \"*_watershed.tif\"\n What you need:\n A path location of the Watersheds\n A file name of the watersheds tif file\n A panda dataframe of the coordinates of all points of the river network ('*_coord.txt')\n \"\"\"\n # Origine of the grid in the appropriate coordinate system\n dataset = gdal.Open(path +filename + '.tif', gdal.GA_ReadOnly)\n array, sizeX, sizeY, projection, band, coordinates = ReadRasterfile(dataset)\n outlet_coordinates['Y_origin'] = coordinates[3]\n outlet_coordinates['X_origin'] = coordinates[0]\n # Get outlet coordinates in Eros format\n outlet_coordinates['Xgrid_position'] = outlet_coordinates['X_coordinates'] - outlet_coordinates['X_origin']\n outlet_coordinates['Ygrid_position'] = outlet_coordinates['Y_origin'] - outlet_coordinates['Y_coordinates']\n outlet_coordinates['Ygrid_position']=outlet_coordinates['Ygrid_position'].astype(int)\n outlet_coordinates['Xgrid_position']=outlet_coordinates['Xgrid_position'].astype(int)\n # The coordinates are sorted by descending order of the drainage area\n outlet_coordinates.sort_values(by=['Contributing area'],ascending=False)\n outlet_coordinates.reset_index(drop=True,inplace=True)\n del dataset\n return outlet_coordinates\n\ndef filename_by_Strahler_order(path, filename):\n \"\"\"\n This functions return filename in function of Strahler order \n What you need:\n A path location of the Strahler order grid and watersheds\n A file name of the watersheds tif file and Strahler order (same one)\n \"\"\"\n # Import Strahler grid\n dataset = gdal.Open(path +filename + '_ord.tif', gdal.GA_ReadOnly)\n array_Strahler, sizeX, sizeY, projection, band, coordinates = ReadRasterfile(dataset)\n # Import Watersheds grid\n watersheds = gdal.Open(path +filename + '_watersheds.tif', gdal.GA_ReadOnly)\n array_watersheds, sizeX, sizeY, projection, band, coordinates = ReadRasterfile(watersheds)\n # Where Strahler grid is 1 get id watersheds into list 1\n first_order_array = array_watersheds[array_Strahler==1]\n # get id of watersheds\n first_order_watershed_id = np.unique(first_order_array)\n # ohterwise get id watersheds into list 2\n bigger_order_array = array_watersheds[np.logical_and(array_Strahler!=1,array_Strahler>0)]\n bigger_order_watershed_id = np.unique(bigger_order_array)\n # save filename into dictionnary \n first_order_Watersheds = {}\n high_order_Watersheds = {}\n # Save first order watersheds\n count = 1 \n for i in first_order_watershed_id:\n first_order_Watersheds['FN{0}'.format(count)] = 'SBV' + str(i)\n count = count + 1\n # Save higher order watersheds\n count = 1\n for ii in bigger_order_watershed_id[1:]:\n high_order_Watersheds['FN{0}'.format(count)] = 'SBV' + str(ii)\n count = count+1\n \n # Save last watersheds\n last_watershed = 'SBV2'\n \n return first_order_Watersheds, high_order_Watersheds, last_watershed \n\ndef define_inputs_and_outlets(path,filename,Input_outlet_distance,outlet_coordinates,plot_option):\n \"\"\"\n This function define the inputs coordinates and the outlet coordinates located upstream and downstream \n What you need:\n Input_outlet_distance: Distance in meter where to locate the inputs from the detected outlets\n \"\"\"\n \n # Open watershed tif file for ploting option\n dataset = gdal.Open(path+filename + '_watersheds.tif', gdal.GA_ReadOnly)\n array, sizeX, sizeY, projection, band, coordinates = ReadRasterfile(dataset)\n river_network = gdal.Open(path+filename+ '_ord.tif', gdal.GA_ReadOnly)\n river_array, sizeX, sizeY, projection, band, coordinates = ReadRasterfile(river_network)\n Y_origin = coordinates[3]\n X_origin = coordinates[0]\n\n # Open _coord txt file\n import pandas as pd\n header = ['X_coordinates','Y_coordinates','Distance to the downstream end of a terminal link','Elevation','Contributing area']\n tab_coord=pd.read_csv(path + filename+ '_coord.txt',sep='\\t',names = header,index_col=False,usecols=[1, 2, 3,4, 5],na_values='-9999')\n # Add a line at the end of the txt file for the last outlet\n tab_coord =tab_coord.append({'X_coordinates' : 9999 , 'Y_coordinates' : 9999,'Distance to the downstream end of a terminal link':tab_coord.loc[len(tab_coord)-1,'Distance to the downstream end of a terminal link']+100},ignore_index=True)\n # chercher pour chaque Xgrid_position et Ygrid_positon si valeur après est inf alors prendre coordonnées K lignes après\n outlet_watershed_dico = {}\n input_watershed_dico = {}\n input_area_dico = {}\n count = 1\n for i in outlet_coordinates['Distance to the downstream end of a terminal link']:\n index_list = tab_coord.index[np.around(tab_coord['Distance to the downstream end of a terminal link'],3)== np.around(i,3)].tolist()\n\n # Lists\n input_coord_list =[]\n input_area_list=[]\n \n for ii in index_list:\n # manage the last line of the text file\n if len(tab_coord) - ii > Input_outlet_distance:\n # Take coord k lines after if the +1 line is smaller \n if np.logical_and(tab_coord.loc[ii+1,'Distance to the downstream end of a terminal link'] < tab_coord.loc[ii,'Distance to the downstream end of a terminal link'],np.abs(tab_coord['Distance to the downstream end of a terminal link'][ii+1]-tab_coord['Distance to the downstream end of a terminal link'][ii])<10) :\n outlet_watershed_dico['Outlet{0}'.format(count)] = [tab_coord.loc[ii+Input_outlet_distance,'X_coordinates'],tab_coord.loc[ii+Input_outlet_distance,'Y_coordinates']]\n \n # get the input coordinates\n if np.logical_or(tab_coord.loc[ii+1,'Distance to the downstream end of a terminal link'] > tab_coord.loc[ii,'Distance to the downstream end of a terminal link'],np.abs(tab_coord['Distance to the downstream end of a terminal link'][ii+1]-tab_coord['Distance to the downstream end of a terminal link'][ii])>10):\n input_coord_list.append([tab_coord.loc[ii-Input_outlet_distance,'X_coordinates'],tab_coord.loc[ii-Input_outlet_distance,'Y_coordinates']])\n input_area_list.append(tab_coord.loc[ii-Input_outlet_distance,'Contributing area'])\n input_watershed_dico['Input{0}'.format(count)] = input_coord_list\n input_area_dico['Area{0}'.format(count)] = input_area_list\n count= count + 1\n \n # transform coordinates in grid format in each dictionnary\n for j in outlet_watershed_dico:\n outlet_watershed_dico[str(j)] = [outlet_watershed_dico[str(j)][0]-X_origin, Y_origin - outlet_watershed_dico[str(j)][1]]\n outlet_watershed_dico[str(j)][0] = outlet_watershed_dico[str(j)][0].astype(int)\n outlet_watershed_dico[str(j)][1] = outlet_watershed_dico[str(j)][1].astype(int)\n \n for g in range(1,len(input_watershed_dico)+1):\n for gg in range(0,len(input_watershed_dico['Input{0}'.format(g)])):\n input_watershed_dico['Input{0}'.format(g)][gg] = [input_watershed_dico['Input{0}'.format(g)][gg][0]-X_origin, Y_origin - input_watershed_dico['Input{0}'.format(g)][gg][1]]\n input_watershed_dico['Input{0}'.format(g)][gg][0] = input_watershed_dico['Input{0}'.format(g)][gg][0].astype(int)\n input_watershed_dico['Input{0}'.format(g)][gg][1] = input_watershed_dico['Input{0}'.format(g)][gg][1].astype(int)\n\n \n # plot coordinates\n if plot_option == 1:\n fig, ax = plt.subplots(1, figsize=(20, 20))\n plt.imshow(array)\n masked_river = np.ma.masked_where(river_array < 1, river_array)\n plt.imshow(masked_river,cmap=plt.cm.gray)\n # plot all input and outlet points\n for y in outlet_watershed_dico:\n plt.plot(outlet_watershed_dico[str(y)][0],outlet_watershed_dico[str(y)][1],'k.',markersize=10)\n for p in range(1,len(input_watershed_dico)+1):\n for pp in range(0,len(input_watershed_dico['Input{0}'.format(p)])):\n plt.plot(input_watershed_dico['Input{0}'.format(p)][pp][0],input_watershed_dico['Input{0}'.format(p)][pp][1],'r.',markersize=10)\n del dataset\n return outlet_watershed_dico, input_watershed_dico, input_area_dico\n\n\n\n\n\ndef sort_watersheds(path_masks,Watersheds_filename, outlet_watershed_dico,last_watershed):\n \"\"\"\n This function classify the watersheds by contributing area order\n \"\"\"\n list_watersheds = []\n list_position = []\n for i in range(1,len(Watersheds_filename)+1):\n grd_mask, sizeX, sizeY, cs, xyzlohi = eros.open_file(path_masks+Watersheds_filename['FN{0}'.format(i)]+'_mask.alt')\n if np.size(grd_mask) - np.size(grd_mask[grd_mask==-9999]) < 50:\n pass\n else:\n for ii in range(1,len(outlet_watershed_dico)+1):\n if grd_mask[outlet_watershed_dico['Outlet{0}'.format(ii)][1],outlet_watershed_dico['Outlet{0}'.format(ii)][0]] == np.max(grd_mask):\n list_watersheds.append(Watersheds_filename['FN{0}'.format(i)])\n list_position.append(ii)\n \n watershed_classified = [x for _,x in sorted(zip(list_position,list_watersheds ))]\n\n watershed_classified_dico={}\n watershed_classified_dico['FN1'] = last_watershed\n for j in range(0,len(watershed_classified)):\n watershed_classified_dico['FN{0}'.format(j+2)] = watershed_classified[j]\n \n return watershed_classified_dico\n \n \n \ndef merge_results(path_topo,path_simulations,path_masks,path_tif_foleder,Watershed_name,all_watersheds_filename,extension_dico, results_folders,epsg,y_position,x_position):\n \"\"\"\n This function allows to merge all the eros file results into one\n \"\"\"\n array = {}\n masks ={}\n # Open array topo\n array['Ar0'], sizeX, sizeY, cs, xyzlohi = eros.open_file(path+Watershed_name+'.alt')\n for i in range(1, len(results_extension)+1):\n count=1\n for ii in results_folders:\n # Open simulation result\n array['Ar{0}'.format(count)], sizeX, sizeY, cs, xyzlohi = eros.open_file(path_simulations+ii+'/'+Watershed_name+'.10.'+results_extension['Ext{0}'.format(i)])\n # Open simulation result Open corresponding mask\n masks['masks{0}'.format(count)], sizeX, sizeY, cs, xyzlohi = eros.open_file(path_masks +all_watersheds_filename['FN{0}'.format(count)]+'_mask.alt')\n # \n array['Ar0'][masks['masks{0}'.format(count)]>=0] = array['Ar{0}'.format(count)][masks['masks{0}'.format(count)]>=0]\n count = count + 1\n eros.write(array['Ar0'],sizeX, sizeY, cs, xyzlohi,path_floodos_folder+Watershed_name+'.10.'+results_extension['Ext{0}'.format(i)])\n gdalf.WriteGeoTIF(path_tif_folder+Watershed_name+'_'+results_extension['Ext{0}'.format(i)]+'.tif', sizeX, sizeY, cs, y_position, x_position, epsg, array['Ar1'])\n"
] |
[
[
"matplotlib.pyplot.imshow",
"pandas.read_csv",
"numpy.abs",
"numpy.logical_and",
"numpy.unique",
"numpy.around",
"matplotlib.pyplot.subplots",
"numpy.max",
"numpy.copy",
"numpy.ma.masked_equal",
"numpy.size",
"numpy.ma.masked_where"
]
] |
LC-John/Sorting
|
[
"c34ab338a910a12def0db426495a97b5170b971b"
] |
[
"src/SlowSort.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 23 13:55:02 2018\n\n@author: zhanghuangzhao\n\"\"\"\n\nimport numpy\n\nimport copy\nimport os, shutil\n\nimport imageio\nimport matplotlib.pyplot as plt\n\nsize = 8\narr = None\n \ndef SlowSort(a, idx_begin, idx_end, reverse=False):\n \n a = copy.deepcopy(a)\n proc = []\n \n if idx_begin >= idx_end:\n return a, proc\n \n idx_mid = int(numpy.floor((idx_begin+idx_end)/2))\n a, tmp_proc = SlowSort(a, idx_begin, idx_mid, reverse)\n proc += tmp_proc\n a, tmp_proc = SlowSort(a, idx_mid+1, idx_end, reverse)\n proc += tmp_proc\n \n if (reverse and a[idx_mid] > a[idx_end]) \\\n or ((not reverse) and a[idx_mid] < a[idx_end]):\n (a[idx_mid], a[idx_end]) = (a[idx_end], a[idx_mid])\n proc.append(copy.deepcopy(a))\n \n a, tmp_proc = SlowSort(a, idx_begin, idx_end-1, reverse)\n proc += tmp_proc\n \n return a, proc\n\nif __name__ == \"__main__\":\n \n arr = numpy.random.uniform(0, 1, size=size)\n arr = arr.tolist()\n \n res, proc = SlowSort(arr, 0, len(arr)-1)\n \n tmp_dir = \"../images/tmp\"\n img_buf = []\n if os.path.isdir(tmp_dir):\n shutil.rmtree(tmp_dir)\n os.mkdir(tmp_dir)\n for i in range(len(proc)):\n plt.cla()\n plt.bar(list(range(len(proc[i]))),\n height=proc[i],\n width=0.5)\n plt.xlim([-1, len(arr)])\n plt.ylim([-0.01, 1.01])\n plt.savefig(os.path.join(tmp_dir, (\"%d.jpg\" % i)))\n img_buf.append(imageio.imread(os.path.join(tmp_dir, (\"%d.jpg\" % i))))\n print (\"\\r%d / %d\" % (i+1, len(proc)), end=\"\")\n print(\"\\ndone!\")\n plt.cla()\n init = [imageio.imread(os.path.join(tmp_dir, \"0.jpg\")) for i in range(10)]\n final = [imageio.imread(os.path.join(tmp_dir, (\"%d.jpg\" % (len(proc)-1)))) for i in range(10)]\n img_buf = init + img_buf + final\n shutil.rmtree(tmp_dir)\n imageio.mimsave(\"../images/SlowSort.gif\", img_buf)"
] |
[
[
"matplotlib.pyplot.ylim",
"numpy.random.uniform",
"matplotlib.pyplot.cla",
"numpy.floor"
]
] |
XC-Li/FiscalNote_Project
|
[
"a8343f22156f619f2c8fe9102e6df684d1b4c97f"
] |
[
"deployment/util_code/doc2vec_vectorizer.py"
] |
[
"\"\"\"By: Xiaochi (George) Li: github.com/XC-Li\"\"\"\nfrom gensim.models.doc2vec import Doc2Vec\nimport numpy as np\nfrom scipy.sparse import hstack as sparse_hstack\n\n\nclass D2V(object):\n def __init__(self, file):\n self.model = Doc2Vec.load(file)\n\n def fit(self, X):\n pass\n\n def transform(self, X):\n temp = []\n for speech in X:\n temp.append(self.model.infer_vector(speech))\n return np.vstack(temp)\n\n\nclass StackedD2V(object):\n def __init__(self, file, vectorizer):\n self.d2v = Doc2Vec.load(file)\n self.vectorizer = vectorizer\n\n def fit(self, X):\n self.vectorizer.fit(X)\n\n def d2v_transform(self, X):\n temp = []\n for speech in X:\n temp.append(self.d2v.infer_vector(speech))\n return np.vstack(temp)\n\n def transform(self, X):\n bow = self.vectorizer.transform(X)\n d2v_emb = self.d2v_transform(X)\n combined_emb = sparse_hstack((bow, d2v_emb))\n return combined_emb\n"
] |
[
[
"scipy.sparse.hstack",
"numpy.vstack"
]
] |
cpratim/DALI-Data-Challenge
|
[
"ad0d6d048abb240dd2316ff70590606bd2d8c44f"
] |
[
"modeling/net.py"
] |
[
"import os\n\nimport torch\nfrom torch import nn\nfrom torch import optim\nimport torch.nn.functional as F\nfrom tqdm import tqdm\n\nimport numpy as np\nfrom sklearn.preprocessing import (\n StandardScaler,\n MinMaxScaler,\n)\nfrom sklearn.pipeline import Pipeline\nfrom gplearn.genetic import SymbolicTransformer\nimport pickle\n\nsk_pipeline = Pipeline(\n [\n ('scaler', StandardScaler()),\n #('transformer', SymbolicTransformer(n_jobs=-1))\n ]\n)\n\ndef correlation(x, y):\n return np.corrcoef(x, y)[0, 1]\n\nclass LinearNet(nn.Module):\n\n def __init__(self, n_feat, n_out= 1):\n\n super().__init__()\n\n self.conv1 = nn.Conv1d(n_feat, 15, 1)\n self.pool1 = nn.MaxPool1d(1)\n self.flatten = nn.Flatten()\n self.relu1 = nn.ReLU()\n self.linear1 = nn.Linear(15, 5)\n self.out = nn.Linear(5, n_out)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.pool1(x)\n x = self.flatten(x)\n x = self.relu1(x)\n x = self.linear1(x)\n x = self.out(x)\n return x\n\n\ndef train(\n model,\n X, y,\n epochs=50000,\n batch_size=None,\n learning_rate=1e-5,\n score_func=correlation,\n optimizer=optim.Adam,\n loss_func=nn.MSELoss(),\n):\n \n y_comp = y.detach().cpu().numpy().reshape(-1,)\n optimizer = optimizer(model.parameters(), lr=learning_rate)\n splits = floor(len(X) / batch_size) if batch_size != None else 1\n\n X_batches = torch.tensor_split(X, splits)\n y_batches = torch.tensor_split(y, splits)\n bar = tqdm(range(epochs))\n for epoch in bar:\n for x, y in zip(X_batches, y_batches):\n y_pred = model(x)\n loss = loss_func(y_pred, y)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n y_pred_comp = model(X).detach().cpu().numpy().reshape(-1,)\n score = score_func(y_pred_comp, y_comp)\n bar.set_description(f\"Score: {round(score, 5)}\")\n \n return model\n\n\nclass ModelWrapper(object):\n\n def __init__(self, model = LinearNet, train_func = train, feature_pipeline = sk_pipeline, **kwargs):\n\n self.model = model(**kwargs)\n self.train_func = train_func\n self.feature_pipeline = feature_pipeline\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n def fit(self, X, y, **kwargs):\n \n X = self.feature_pipeline.fit_transform(X, y)\n y = y.reshape(-1, 1)\n X = X.reshape(-1, X.shape[1], 1)\n X = torch.Tensor(X)\n y = torch.Tensor(y)\n X = X.to(self.device)\n y = y.to(self.device)\n self.model = self.model.to(self.device)\n self.model = self.train_func(self.model, X, y, **kwargs)\n\n def detach(self, y):\n return y.detach().cpu().numpy().reshape(-1,)\n\n def predict(self, X):\n \n X = self.feature_pipeline.transform(X)\n X = X.reshape(-1, X.shape[1], 1)\n X = torch.Tensor(X)\n X = X.to(self.device)\n y = self.model(X)\n return self.detach(y) \n\n def save(self, dir):\n with open(dir, 'wb') as f:\n pickle.dump(self, f)\n"
] |
[
[
"torch.Tensor",
"torch.nn.Flatten",
"torch.nn.MaxPool1d",
"torch.nn.Linear",
"sklearn.preprocessing.StandardScaler",
"torch.cuda.is_available",
"torch.nn.Conv1d",
"numpy.corrcoef",
"torch.nn.ReLU",
"torch.tensor_split",
"torch.nn.MSELoss"
]
] |
nbergam2021/ICESat_data_analysis_tools
|
[
"46f4132d2b34efe9a21470cdbaddf195301cfcd3"
] |
[
"crossover_finder.py"
] |
[
"#crossover_finder.py\nimport numpy as np\nimport pandas as pd\nimport scipy.stats as stats\nimport glob\nimport os\nimport sys\n\ndef solve(line_list, box):\n # input: [slope, intersect, other_stuff]\n # output: [ [], [], [], [] ]\n\n # Group slopes, compare lines within each group\n # box is list of top bottom left right lat/lon lines\n # 0 is left, 1 is right, 2 is top, 3 is bottom\n\n # [a1, a2] where each is a point [x1, y1]\n intersects = []\n\n for i in line_list:\n for j in line_list:\n # if lines are different and have opposing slopes\n if i != j and i[0]*j[0] < 0: #assuming slope is index 0\n intersects.append(get_intersect(i, j))\n line_list.remove(i)\n\n for i in intersects:\n if i[0] > box[0] or i[1] > box[2] or i[0] < box[3] or i[1] < box[1]: #checks if point is out of range\n #intersects.remove(i)\n pass\n\n\n return set(intersects)\n\n\ndef get_intersect(point1, point2):\n # slope/intercept form => [slope, intercept]\n x = round( (point2[1]-point1[1]) / (point1[0]-point2[0]) , 3)\n y = round( point1[0]*x + point1[1] , 3)\n return ( x,y)\n\n\ndef get_error(alt):\n df = pd.DataFrame(columns=['Number of Points', 'Range',\n 'Standard Deviation', 'Variance']) #initializes dataframe\n for point in alt:\n #appends each set of # of points, range, stdvar, and variance\n if len(point) > 0:\n df = df.append(dict(zip(df.columns,\n [len(point), (np.max(np.array(point))-np.min(np.array(point))),\n np.std(point), np.var(point)])), ignore_index=True)\n return df\n\ndef magn(vector):\n for i in range(len(vector)):\n if vector[i] is None or vector[i] == np.inf or vector[i] == np.NINF:\n vector[i] = 0\n mag = np.sqrt(vector.dot(vector))\n return mag\n\ndef lerp(lower, higher, intr):\n diff = np.subtract(higher, lower)\n if np.prod(diff[0:2]) == 0:\n return None\n magdiff = magn(diff[0:2])\n intr_diff = np.subtract(intr, lower[0:2])\n magintr = magn(intr_diff)\n mag = magintr/magdiff\n if mag>1:\n return None\n lerped = np.add(lower, mag * diff)\n veri_diff = np.absolute(np.subtract(intr, lerped[0:2]))\n veri = np.sqrt(veri_diff.dot(veri_diff))\n #print(\"lower: ({0},{1}), higher: ({2},{3}), intr: ({4},{5}), veri: {6}\".format(lower[0], lower[1], higher[0], higher[1], intr[0], intr[1], veri))\n if veri < 50:\n return lerped[2]\n return None\n\ndef xovers(sort_list, line_list, intersections):\n xovers = [] #initializes an empty array to store crossovers\n for intr in intersections:\n points = [] #initializes an array to store points at a crossover\n for df in sort_list:\n index = df.iloc[:,0].searchsorted(intr[0]) #gets ideal index\n if index == 0: #accounts for out of bounding box point placement\n lower = df.iloc[index,:] #sets lower bound of possible point\n else:\n lower = df.iloc[index-1,:] #sets lower bound of possible point\n if index >= len(df.iloc[:,0]):\n higher = df.iloc[index-1,:] #sets upper bound of possible point\n else:\n higher = df.iloc[index,:] #sets upper bound of possible point\n #print(\"ideal x: {0}, low x: {1}, high x: {2}\".format(intr[0], lower[0], higher[0]))\n l = lerp(lower, higher, intr)\n if l is not None:\n points.append(l)\n xovers.append(points) #appends points array to xovers\n return xovers\n\ndef xover_error(file):\n df_total = pd.read_csv(file, header=None) #saves complete dataframe O(n)\n df_list = [group for _, group in df_total.groupby(3)] #separates dataframe by ground track O(n)\n line_list = [stats.linregress(df.iloc[:,0],df.iloc[:,1])[0:2] for df in df_list] #creates a list of regression lines\n intersections = solve(line_list, [np.min(np.array(df_total.iloc[:,0])), \\\n np.max(np.array(df_total.iloc[:,0])), np.max(np.array(df_total.iloc[:,1])), \\\n np.min(np.array(df_total.iloc[:,1]))]) #finds potential intersections\n sorted_list = [df.sort_values(by=df.columns[0], kind='mergesort') for df in df_list] #sorts dataframes for binary search O(nlog(n))\n xover_list = xovers(sorted_list, line_list, intersections) #creates a list of crossovers\n error_data = get_error(xover_list) #creates a datframe of error\n new_name = os.path.splitext(file)[0] + \"_crossover_error.csv\" #modifies original filename\n error_data.to_csv(new_name, index=False) #saves csv to file of name new_name\n return error_data, new_name #returns saved dataframe and new filename\n\ndef main():\n input_length = len(sys.argv) #saves length of command line input\n if input_length <= 1:\n print (\"please input a filepath\") #gives error message for lack of input\n else:\n regex = sys.argv[1] #saves filename regex\n file_list = glob.glob(regex) #saves list of filenames\n\n i = 1 #variable for saving current position in list\n for file in file_list:\n output = xover_error(file) #saves new csv file and saves method output\n print (\"Saved new csv file with path: \" + output[1])\n print (\"Output {0} of {1}\".format(i, len(file_list)))\n i+=1 #increases i to new index\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"pandas.read_csv",
"numpy.subtract",
"pandas.DataFrame",
"scipy.stats.linregress",
"numpy.std",
"numpy.prod",
"numpy.var",
"numpy.add",
"numpy.array"
]
] |
terrytangyuan/katib
|
[
"5a7a144a1b33e05466174edd721803349622aabc"
] |
[
"pkg/suggestion/v1alpha1/NAS_Reinforcement_Learning/Operation.py"
] |
[
"import itertools\nimport numpy as np\nfrom pkg.api.v1alpha1.python import api_pb2\n\n\nclass Operation(object):\n def __init__(self, opt_id, opt_type, opt_params):\n self.opt_id = opt_id\n self.opt_type = opt_type\n self.opt_params = opt_params\n \n def get_dict(self):\n opt_dict = dict()\n opt_dict['opt_id'] = self.opt_id\n opt_dict['opt_type'] = self.opt_type\n opt_dict['opt_params'] = self.opt_params\n return opt_dict\n\n def print_op(self, logger):\n logger.info(\"Operation ID: \\n\\t{}\".format(self.opt_id))\n logger.info(\"Operation Type: \\n\\t{}\".format(self.opt_type))\n logger.info(\"Operations Parameters:\")\n for ikey in self.opt_params:\n logger.info(\"\\t{}: {}\".format(ikey, self.opt_params[ikey]))\n logger.info(\"\")\n\n\nclass SearchSpace(object):\n def __init__(self, operations):\n self.operation_list = list(operations.operation)\n self.search_space = list()\n self._parse_operations()\n print()\n self.num_operations = len(self.search_space)\n \n def _parse_operations(self):\n # search_sapce is a list of Operation class\n\n operation_id = 0\n\n for operation_dict in self.operation_list:\n opt_type = operation_dict.operationType\n opt_spec = list(operation_dict.parameter_configs.configs)\n # avail_space is dict with the format {\"spec_nam\": [spec feasible values]}\n avail_space = dict()\n num_spec = len(opt_spec)\n\n for ispec in opt_spec:\n spec_name = ispec.name\n if ispec.parameter_type == api_pb2.CATEGORICAL:\n avail_space[spec_name] = list(ispec.feasible.list)\n elif ispec.parameter_type == api_pb2.INT:\n spec_min = int(ispec.feasible.min)\n spec_max = int(ispec.feasible.max)\n spec_step = int(ispec.feasible.step)\n avail_space[spec_name] = range(spec_min, spec_max+1, spec_step)\n elif ispec.parameter_type == api_pb2.DOUBLE:\n spec_min = float(ispec.feasible.min)\n spec_max = float(ispec.feasible.max)\n spec_step = float(ispec.feasible.step)\n double_list = np.arange(spec_min, spec_max+spec_step, spec_step)\n if double_list[-1] > spec_max:\n del double_list[-1]\n avail_space[spec_name] = double_list\n\n # generate all the combinations of possible operations\n key_avail_space = list(avail_space.keys())\n val_avail_space = list(avail_space.values())\n\n for this_opt_vector in itertools.product(*val_avail_space):\n opt_params = dict()\n for i in range(num_spec):\n opt_params[key_avail_space[i]] = this_opt_vector[i]\n this_opt_class = Operation(operation_id, opt_type, opt_params)\n self.search_space.append(this_opt_class)\n operation_id += 1\n"
] |
[
[
"numpy.arange"
]
] |
mhrah7495/View-Adaptive-Neural-Networks-for-Skeleton-based-Human-Action-Recognition
|
[
"b2113aa6295d7292516d5a74582b619d775a5b3d"
] |
[
"data/ntu/get_raw_denoised_data.py"
] |
[
"# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\nimport os\nimport os.path as osp\nimport numpy as np\nimport pickle\nimport logging\n\nroot_path = './'\nraw_data_file = osp.join(root_path, 'raw_data', 'raw_skes_data.pkl')\nsave_path = osp.join(root_path, 'denoised_data')\n\nif not osp.exists(save_path):\n os.mkdir(save_path)\n\nrgb_ske_path = osp.join(save_path, 'rgb+ske')\nif not osp.exists(rgb_ske_path):\n os.mkdir(rgb_ske_path)\n\nactors_info_dir = osp.join(save_path, 'actors_info')\nif not osp.exists(actors_info_dir):\n os.mkdir(actors_info_dir)\n\nmissing_count = 0\nnoise_len_thres = 11\nnoise_spr_thres1 = 0.8\nnoise_spr_thres2 = 0.69754\nnoise_mot_thres_lo = 0.089925\nnoise_mot_thres_hi = 2\n\nnoise_len_logger = logging.getLogger('noise_length')\nnoise_len_logger.setLevel(logging.INFO)\nnoise_len_logger.addHandler(logging.FileHandler(osp.join(save_path, 'noise_length.log')))\nnoise_len_logger.info('{:^20}\\t{:^17}\\t{:^8}\\t{}'.format('Skeleton', 'bodyID', 'Motion', 'Length'))\n\nnoise_spr_logger = logging.getLogger('noise_spread')\nnoise_spr_logger.setLevel(logging.INFO)\nnoise_spr_logger.addHandler(logging.FileHandler(osp.join(save_path, 'noise_spread.log')))\nnoise_spr_logger.info('{:^20}\\t{:^17}\\t{:^8}\\t{:^8}'.format('Skeleton', 'bodyID', 'Motion', 'Rate'))\n\nnoise_mot_logger = logging.getLogger('noise_motion')\nnoise_mot_logger.setLevel(logging.INFO)\nnoise_mot_logger.addHandler(logging.FileHandler(osp.join(save_path, 'noise_motion.log')))\nnoise_mot_logger.info('{:^20}\\t{:^17}\\t{:^8}'.format('Skeleton', 'bodyID', 'Motion'))\n\nfail_logger_1 = logging.getLogger('noise_outliers_1')\nfail_logger_1.setLevel(logging.INFO)\nfail_logger_1.addHandler(logging.FileHandler(osp.join(save_path, 'denoised_failed_1.log')))\n\nfail_logger_2 = logging.getLogger('noise_outliers_2')\nfail_logger_2.setLevel(logging.INFO)\nfail_logger_2.addHandler(logging.FileHandler(osp.join(save_path, 'denoised_failed_2.log')))\n\nmissing_skes_logger = logging.getLogger('missing_frames')\nmissing_skes_logger.setLevel(logging.INFO)\nmissing_skes_logger.addHandler(logging.FileHandler(osp.join(save_path, 'missing_skes.log')))\nmissing_skes_logger.info('{:^20}\\t{}\\t{}'.format('Skeleton', 'num_frames', 'num_missing'))\n\nmissing_skes_logger1 = logging.getLogger('missing_frames_1')\nmissing_skes_logger1.setLevel(logging.INFO)\nmissing_skes_logger1.addHandler(logging.FileHandler(osp.join(save_path, 'missing_skes_1.log')))\nmissing_skes_logger1.info('{:^20}\\t{}\\t{}\\t{}\\t{}\\t{}'.format('Skeleton', 'num_frames', 'Actor1',\n 'Actor2', 'Start', 'End'))\n\nmissing_skes_logger2 = logging.getLogger('missing_frames_2')\nmissing_skes_logger2.setLevel(logging.INFO)\nmissing_skes_logger2.addHandler(logging.FileHandler(osp.join(save_path, 'missing_skes_2.log')))\nmissing_skes_logger2.info('{:^20}\\t{}\\t{}\\t{}'.format('Skeleton', 'num_frames', 'Actor1', 'Actor2'))\n\n\ndef denoising_by_length(ske_name, bodies_data):\n \"\"\"\n Denoising data based on the frame length for each bodyID.\n Filter out the bodyID which length is less or equal than the predefined threshold.\n\n \"\"\"\n noise_info = str()\n new_bodies_data = bodies_data.copy()\n for (bodyID, body_data) in new_bodies_data.items():\n length = len(body_data['interval'])\n if length <= noise_len_thres:\n noise_info += 'Filter out: %s, %d (length).\\n' % (bodyID, length)\n noise_len_logger.info('{}\\t{}\\t{:.6f}\\t{:^6d}'.format(ske_name, bodyID,\n body_data['motion'], length))\n del bodies_data[bodyID]\n if noise_info != '':\n noise_info += '\\n'\n\n return bodies_data, noise_info\n\n\ndef get_valid_frames_by_spread(points):\n \"\"\"\n Find the valid (or reasonable) frames (index) based on the spread of X and Y.\n\n :param points: joints or colors\n \"\"\"\n num_frames = points.shape[0]\n valid_frames = []\n for i in range(num_frames):\n x = points[i, :, 0]\n y = points[i, :, 1]\n if (x.max() - x.min()) <= noise_spr_thres1 * (y.max() - y.min()): # 0.8\n valid_frames.append(i)\n return valid_frames\n\n\ndef denoising_by_spread(ske_name, bodies_data):\n \"\"\"\n Denoising data based on the spread of Y value and X value.\n Filter out the bodyID which the ratio of noisy frames is higher than the predefined\n threshold.\n\n bodies_data: contains at least 2 bodyIDs\n \"\"\"\n noise_info = str()\n denoised_by_spr = False # mark if this sequence has been processed by spread.\n\n new_bodies_data = bodies_data.copy()\n # for (bodyID, body_data) in bodies_data.items():\n for (bodyID, body_data) in new_bodies_data.items():\n if len(bodies_data) == 1:\n break\n valid_frames = get_valid_frames_by_spread(body_data['joints'].reshape(-1, 25, 3))\n num_frames = len(body_data['interval'])\n num_noise = num_frames - len(valid_frames)\n if num_noise == 0:\n continue\n\n ratio = num_noise / float(num_frames)\n motion = body_data['motion']\n if ratio >= noise_spr_thres2: # 0.69754\n del bodies_data[bodyID]\n denoised_by_spr = True\n noise_info += 'Filter out: %s (spread rate >= %.2f).\\n' % (bodyID, noise_spr_thres2)\n noise_spr_logger.info('%s\\t%s\\t%.6f\\t%.6f' % (ske_name, bodyID, motion, ratio))\n else: # Update motion\n joints = body_data['joints'].reshape(-1, 25, 3)[valid_frames]\n body_data['motion'] = min(motion, np.sum(np.var(joints.reshape(-1, 3), axis=0)))\n noise_info += '%s: motion %.6f -> %.6f\\n' % (bodyID, motion, body_data['motion'])\n # TODO: Consider removing noisy frames for each bodyID\n\n if noise_info != '':\n noise_info += '\\n'\n\n return bodies_data, noise_info, denoised_by_spr\n\n\ndef denoising_by_motion(ske_name, bodies_data, bodies_motion):\n \"\"\"\n Filter out the bodyID which motion is out of the range of predefined interval\n\n \"\"\"\n # Sort bodies based on the motion, return a list of tuples\n # bodies_motion = sorted(bodies_motion.items(), key=lambda x, y: cmp(x[1], y[1]), reverse=True)\n bodies_motion = sorted(bodies_motion.items(), key=lambda x: x[1], reverse=True)\n\n # Reserve the body data with the largest motion\n denoised_bodies_data = [(bodies_motion[0][0], bodies_data[bodies_motion[0][0]])]\n noise_info = str()\n\n for (bodyID, motion) in bodies_motion[1:]:\n if (motion < noise_mot_thres_lo) or (motion > noise_mot_thres_hi):\n noise_info += 'Filter out: %s, %.6f (motion).\\n' % (bodyID, motion)\n noise_mot_logger.info('{}\\t{}\\t{:.6f}'.format(ske_name, bodyID, motion))\n else:\n denoised_bodies_data.append((bodyID, bodies_data[bodyID]))\n if noise_info != '':\n noise_info += '\\n'\n\n return denoised_bodies_data, noise_info\n\n\ndef denoising_bodies_data(bodies_data):\n \"\"\"\n Denoising data based on some heuristic methods, not necessarily correct for all samples.\n\n Return:\n denoised_bodies_data (list): tuple: (bodyID, body_data).\n \"\"\"\n ske_name = bodies_data['name']\n bodies_data = bodies_data['data']\n\n # Step 1: Denoising based on frame length.\n bodies_data, noise_info_len = denoising_by_length(ske_name, bodies_data)\n\n if len(bodies_data) == 1: # only has one bodyID left after step 1\n return bodies_data.items(), noise_info_len\n\n # Step 2: Denoising based on spread.\n bodies_data, noise_info_spr, denoised_by_spr = denoising_by_spread(ske_name, bodies_data)\n\n if len(bodies_data) == 1:\n return bodies_data.items(), noise_info_len + noise_info_spr\n\n bodies_motion = dict() # get body motion\n for (bodyID, body_data) in bodies_data.items():\n bodies_motion[bodyID] = body_data['motion']\n # Sort bodies based on the motion\n # bodies_motion = sorted(bodies_motion.items(), key=lambda x, y: cmp(x[1], y[1]), reverse=True)\n bodies_motion = sorted(bodies_motion.items(), key=lambda x: x[1], reverse=True)\n denoised_bodies_data = list()\n for (bodyID, _) in bodies_motion:\n denoised_bodies_data.append((bodyID, bodies_data[bodyID]))\n\n return denoised_bodies_data, noise_info_len + noise_info_spr\n\n # TODO: Consider denoising further by integrating motion method\n\n # if denoised_by_spr: # this sequence has been denoised by spread\n # bodies_motion = sorted(bodies_motion.items(), lambda x, y: cmp(x[1], y[1]), reverse=True)\n # denoised_bodies_data = list()\n # for (bodyID, _) in bodies_motion:\n # denoised_bodies_data.append((bodyID, bodies_data[bodyID]))\n # return denoised_bodies_data, noise_info\n\n # Step 3: Denoising based on motion\n # bodies_data, noise_info = denoising_by_motion(ske_name, bodies_data, bodies_motion)\n\n # return bodies_data, noise_info\n\n\ndef get_one_actor_points(body_data, num_frames):\n \"\"\"\n Get joints and colors for only one actor.\n For joints, each frame contains 75 X-Y-Z coordinates.\n For colors, each frame contains 25 x 2 (X, Y) coordinates.\n \"\"\"\n joints = np.zeros((num_frames, 75), dtype=np.float32)\n colors = np.ones((num_frames, 1, 25, 2), dtype=np.float32) * np.nan\n start, end = body_data['interval'][0], body_data['interval'][-1]\n joints[start:end + 1] = body_data['joints'].reshape(-1, 75)\n colors[start:end + 1, 0] = body_data['colors']\n\n return joints, colors\n\n\ndef remove_missing_frames(ske_name, joints, colors):\n \"\"\"\n Cut off missing frames which all joints positions are 0s\n\n For the sequence with 2 actors' data, also record the number of missing frames for\n actor1 and actor2, respectively (for debug).\n \"\"\"\n num_frames = joints.shape[0]\n num_bodies = colors.shape[1] # 1 or 2\n\n if num_bodies == 2: # DEBUG\n missing_indices_1 = np.where(joints[:, :75].sum(axis=1) == 0)[0]\n missing_indices_2 = np.where(joints[:, 75:].sum(axis=1) == 0)[0]\n cnt1 = len(missing_indices_1)\n cnt2 = len(missing_indices_2)\n\n start = 1 if 0 in missing_indices_1 else 0\n end = 1 if num_frames - 1 in missing_indices_1 else 0\n if max(cnt1, cnt2) > 0:\n if cnt1 > cnt2:\n info = '{}\\t{:^10d}\\t{:^6d}\\t{:^6d}\\t{:^5d}\\t{:^3d}'.format(ske_name, num_frames,\n cnt1, cnt2, start, end)\n missing_skes_logger1.info(info)\n else:\n info = '{}\\t{:^10d}\\t{:^6d}\\t{:^6d}'.format(ske_name, num_frames, cnt1, cnt2)\n missing_skes_logger2.info(info)\n\n # Find valid frame indices that the data is not missing or lost\n # For two-subjects action, this means both data of actor1 and actor2 is missing.\n valid_indices = np.where(joints.sum(axis=1) != 0)[0] # 0-based index\n missing_indices = np.where(joints.sum(axis=1) == 0)[0]\n num_missing = len(missing_indices)\n\n if num_missing > 0: # Update joints and colors\n joints = joints[valid_indices]\n colors[missing_indices] = np.nan\n global missing_count\n missing_count += 1\n missing_skes_logger.info('{}\\t{:^10d}\\t{:^11d}'.format(ske_name, num_frames, num_missing))\n\n return joints, colors\n\n\ndef get_bodies_info(bodies_data):\n bodies_info = '{:^17}\\t{}\\t{:^8}\\n'.format('bodyID', 'Interval', 'Motion')\n for (bodyID, body_data) in bodies_data.items():\n start, end = body_data['interval'][0], body_data['interval'][-1]\n bodies_info += '{}\\t{:^8}\\t{:f}\\n'.format(bodyID, str([start, end]), body_data['motion'])\n\n return bodies_info + '\\n'\n\n\ndef get_two_actors_points(bodies_data):\n \"\"\"\n Get the first and second actor's joints positions and colors locations.\n\n # Arguments:\n bodies_data (dict): 3 key-value pairs: 'name', 'data', 'num_frames'.\n bodies_data['data'] is also a dict, while the key is bodyID, the value is\n the corresponding body_data which is also a dict with 4 keys:\n - joints: raw 3D joints positions. Shape: (num_frames x 25, 3)\n - colors: raw 2D color locations. Shape: (num_frames, 25, 2)\n - interval: a list which records the frame indices.\n - motion: motion amount\n\n # Return:\n joints, colors.\n \"\"\"\n ske_name = bodies_data['name']\n label = int(ske_name[-2:])\n num_frames = bodies_data['num_frames']\n bodies_info = get_bodies_info(bodies_data['data'])\n\n bodies_data, noise_info = denoising_bodies_data(bodies_data) # Denoising data\n bodies_info += noise_info\n\n bodies_data = list(bodies_data)\n if len(bodies_data) == 1: # Only left one actor after denoising\n if label >= 50: # DEBUG: Denoising failed for two-subjects action\n fail_logger_2.info(ske_name)\n\n bodyID, body_data = bodies_data[0]\n joints, colors = get_one_actor_points(body_data, num_frames)\n bodies_info += 'Main actor: %s' % bodyID\n else:\n if label < 50: # DEBUG: Denoising failed for one-subject action\n fail_logger_1.info(ske_name)\n\n joints = np.zeros((num_frames, 150), dtype=np.float32)\n colors = np.ones((num_frames, 2, 25, 2), dtype=np.float32) * np.nan\n\n bodyID, actor1 = bodies_data[0] # the 1st actor with largest motion\n start1, end1 = actor1['interval'][0], actor1['interval'][-1]\n joints[start1:end1 + 1, :75] = actor1['joints'].reshape(-1, 75)\n colors[start1:end1 + 1, 0] = actor1['colors']\n actor1_info = '{:^17}\\t{}\\t{:^8}\\n'.format('Actor1', 'Interval', 'Motion') + \\\n '{}\\t{:^8}\\t{:f}\\n'.format(bodyID, str([start1, end1]), actor1['motion'])\n del bodies_data[0]\n\n actor2_info = '{:^17}\\t{}\\t{:^8}\\n'.format('Actor2', 'Interval', 'Motion')\n start2, end2 = [0, 0] # initial interval for actor2 (virtual)\n\n while len(bodies_data) > 0:\n bodyID, actor = bodies_data[0]\n start, end = actor['interval'][0], actor['interval'][-1]\n if min(end1, end) - max(start1, start) <= 0: # no overlap with actor1\n joints[start:end + 1, :75] = actor['joints'].reshape(-1, 75)\n colors[start:end + 1, 0] = actor['colors']\n actor1_info += '{}\\t{:^8}\\t{:f}\\n'.format(bodyID, str([start, end]), actor['motion'])\n # Update the interval of actor1\n start1 = min(start, start1)\n end1 = max(end, end1)\n elif min(end2, end) - max(start2, start) <= 0: # no overlap with actor2\n joints[start:end + 1, 75:] = actor['joints'].reshape(-1, 75)\n colors[start:end + 1, 1] = actor['colors']\n actor2_info += '{}\\t{:^8}\\t{:f}\\n'.format(bodyID, str([start, end]), actor['motion'])\n # Update the interval of actor2\n start2 = min(start, start2)\n end2 = max(end, end2)\n del bodies_data[0]\n\n bodies_info += ('\\n' + actor1_info + '\\n' + actor2_info)\n\n with open(osp.join(actors_info_dir, ske_name + '.txt'), 'w') as fw:\n fw.write(bodies_info + '\\n')\n\n return joints, colors\n\n\ndef get_raw_denoised_data():\n \"\"\"\n Get denoised data (joints positions and color locations) from raw skeleton sequences.\n\n For each frame of a skeleton sequence, an actor's 3D positions of 25 joints represented\n by an 2D array (shape: 25 x 3) is reshaped into a 75-dim vector by concatenating each\n 3-dim (x, y, z) coordinates along the row dimension in joint order. Each frame contains\n two actor's joints positions constituting a 150-dim vector. If there is only one actor,\n then the last 75 values are filled with zeros. Otherwise, select the main actor and the\n second actor based on the motion amount. Each 150-dim vector as a row vector is put into\n a 2D numpy array where the number of rows equals the number of valid frames. All such\n 2D arrays are put into a list and finally the list is serialized into a cPickle file.\n\n For the skeleton sequence which contains two or more actors (mostly corresponds to the\n last 11 classes), the filename and actors' information are recorded into log files.\n For better understanding, also generate RGB+skeleton videos for visualization.\n \"\"\"\n\n with open(raw_data_file, 'rb') as fr: # load raw skeletons data\n raw_skes_data = pickle.load(fr)\n\n num_skes = len(raw_skes_data)\n print('Found %d available skeleton sequences.' % num_skes)\n\n raw_denoised_joints = []\n raw_denoised_colors = []\n frames_cnt = []\n\n for (idx, bodies_data) in enumerate(raw_skes_data):\n ske_name = bodies_data['name']\n print('Processing %s' % ske_name)\n num_bodies = len(bodies_data['data'])\n\n if num_bodies == 1: # only 1 actor\n num_frames = bodies_data['num_frames']\n body_data = list(bodies_data['data'].values())[0]\n joints, colors = get_one_actor_points(body_data, num_frames)\n else: # more than 1 actor, select two main actors\n joints, colors = get_two_actors_points(bodies_data)\n # Remove missing frames\n joints, colors = remove_missing_frames(ske_name, joints, colors)\n num_frames = joints.shape[0] # Update\n # Visualize selected actors' skeletons on RGB videos.\n\n raw_denoised_joints.append(joints)\n raw_denoised_colors.append(colors)\n frames_cnt.append(num_frames)\n\n if (idx + 1) % 1000 == 0:\n print('Processed: %.2f%% (%d / %d), ' % \\\n (100.0 * (idx + 1) / num_skes, idx + 1, num_skes) + \\\n 'Missing count: %d' % missing_count)\n\n raw_skes_joints_pkl = osp.join(save_path, 'raw_denoised_joints.pkl')\n with open(raw_skes_joints_pkl, 'wb') as f:\n pickle.dump(raw_denoised_joints, f, pickle.HIGHEST_PROTOCOL)\n\n raw_skes_colors_pkl = osp.join(save_path, 'raw_denoised_colors.pkl')\n with open(raw_skes_colors_pkl, 'wb') as f:\n pickle.dump(raw_denoised_colors, f, pickle.HIGHEST_PROTOCOL)\n\n frames_cnt = np.array(frames_cnt, dtype=np.int)\n np.savetxt(osp.join(save_path, 'frames_cnt.txt'), frames_cnt, fmt='%d')\n\n print('Saved raw denoised positions of {} frames into {}'.format(np.sum(frames_cnt),\n raw_skes_joints_pkl))\n print('Found %d files that have missing data' % missing_count)\n\nif __name__ == '__main__':\n\n get_raw_denoised_data()\n"
] |
[
[
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.ones"
]
] |
louisponet/aiida-core
|
[
"3214236df66a3792ee57fe38a06c0c3bb65861ab"
] |
[
"tests/backends/aiida_django/migrations/test_migrations_many.py"
] |
[
"# -*- coding: utf-8 -*-\n###########################################################################\n# Copyright (c), The AiiDA team. All rights reserved. #\n# This file is part of the AiiDA code. #\n# #\n# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #\n# For further information on the license, see the LICENSE.txt file #\n# For further information please visit http://www.aiida.net #\n###########################################################################\n# pylint: disable=invalid-name, import-error, no-name-in-module\n\"\"\"\nThis file contains the majority of the migration tests that are too short to\ngo to a separate file.\n\"\"\"\nimport numpy\n\nfrom aiida.backends.testbase import AiidaTestCase\nfrom aiida.backends.djsite.db.migrations import ModelModifierV0025\nfrom aiida.backends.general.migrations import utils\nfrom aiida.common.exceptions import IntegrityError\nfrom .test_migrations_common import TestMigrations\n\n\nclass TestMigrationsModelModifierV0025(TestMigrations):\n \"\"\"Sub class of `TestMigrations` that need to work on node attributes using the `ModelModifierV0025`.\"\"\"\n\n def set_attribute(self, node, key, value):\n DbAttribute = self.apps.get_model('db', 'DbAttribute')\n modifier = ModelModifierV0025(self.apps, DbAttribute)\n modifier.set_value_for_node(node.pk, key, value)\n\n def get_attribute(self, node, key, default=None): # pylint: disable=missing-docstring\n DbAttribute = self.apps.get_model('db', 'DbAttribute')\n modifier = ModelModifierV0025(self.apps, DbAttribute)\n try:\n return modifier.get_value_for_node(node.pk, key)\n except AttributeError:\n return default\n\n @staticmethod\n def get_node_array(node, name):\n return utils.load_numpy_array_from_repository(node.uuid, name)\n\n def set_node_array(self, node, name, array):\n \"\"\"Store a new numpy array inside a node. Possibly overwrite the array if it already existed.\n\n Internally, it stores a name.npy file in numpy format.\n\n :param name: The name of the array.\n :param array: The numpy array to store.\n \"\"\"\n utils.store_numpy_array_in_repository(node.uuid, name, array)\n self.set_attribute(node, f'array|{name}', list(array.shape))\n\n\nclass TestNoMigrations(AiidaTestCase):\n \"\"\"Verify that no django migrations remain.\"\"\"\n\n def test_no_remaining_migrations(self): # pylint: disable=no-self-use\n \"\"\"\n Verify that no django migrations remain.\n Equivalent to python manage.py makemigrations --check\n \"\"\"\n\n from django.core.management import call_command\n\n # Raises SystemExit, if migrations remain\n call_command('makemigrations', '--check', verbosity=0)\n\n\nclass TestDuplicateNodeUuidMigration(TestMigrations):\n \"\"\"Test the migration that verifies that there are no duplicate UUIDs\"\"\"\n\n migrate_from = '0013_django_1_8'\n migrate_to = '0014_add_node_uuid_unique_constraint'\n\n def setUpBeforeMigration(self):\n from aiida.common.utils import get_new_uuid\n from aiida.backends.general.migrations.utils import deduplicate_uuids, verify_uuid_uniqueness\n self.file_name = 'test.temp'\n self.file_content = '#!/bin/bash\\n\\necho test run\\n'\n\n self.nodes_boolean = []\n self.nodes_integer = []\n self.n_bool_duplicates = 2\n self.n_int_duplicates = 4\n\n node_bool = self.DbNode(type='data.bool.Bool.', user_id=self.default_user.id, uuid=get_new_uuid())\n node_bool.save()\n\n node_int = self.DbNode(type='data.int.Int.', user_id=self.default_user.id, uuid=get_new_uuid())\n node_int.save()\n\n self.nodes_boolean.append(node_bool)\n self.nodes_integer.append(node_int)\n\n for _ in range(self.n_bool_duplicates):\n node = self.DbNode(type='data.bool.Bool.', user_id=self.default_user.id, uuid=node_bool.uuid)\n node.save()\n utils.put_object_from_string(node.uuid, self.file_name, self.file_content)\n self.nodes_boolean.append(node)\n\n for _ in range(self.n_int_duplicates):\n node = self.DbNode(type='data.int.Int.', user_id=self.default_user.id, uuid=node_int.uuid)\n node.save()\n utils.put_object_from_string(node.uuid, self.file_name, self.file_content)\n self.nodes_integer.append(node)\n\n # Verify that there are duplicate UUIDs by checking that the following function raises\n with self.assertRaises(IntegrityError):\n verify_uuid_uniqueness(table='db_dbnode')\n\n # Now run the function responsible for solving duplicate UUIDs which would also be called by the user\n # through the `verdi database integrity detect-duplicate-uuid` command\n deduplicate_uuids(table='db_dbnode')\n\n def test_deduplicated_uuids(self):\n \"\"\"Verify that after the migration, all expected nodes are still there with unique UUIDs.\"\"\"\n # If the duplicate UUIDs were successfully fixed, the following should not raise.\n from aiida.backends.general.migrations.utils import verify_uuid_uniqueness\n\n verify_uuid_uniqueness(table='db_dbnode')\n\n # Reload the nodes by PK and check that all UUIDs are now unique\n nodes_boolean = [self.load_node(node.pk) for node in self.nodes_boolean]\n uuids_boolean = [node.uuid for node in nodes_boolean]\n self.assertEqual(len(set(uuids_boolean)), len(nodes_boolean))\n\n nodes_integer = [self.load_node(node.pk) for node in self.nodes_integer]\n uuids_integer = [node.uuid for node in nodes_integer]\n self.assertEqual(len(set(uuids_integer)), len(nodes_integer))\n\n for node in nodes_boolean:\n self.assertEqual(utils.get_object_from_repository(node.uuid, self.file_name), self.file_content)\n\n\nclass TestUuidMigration(TestMigrations):\n \"\"\"\n This test class checks the migration 0018_django_1_11 which switches from the django_extensions\n UUID field to the native UUIDField of django 1.11. It also introduces unique constraints\n on all uuid columns (previously existed only on dbnode).\n \"\"\"\n\n migrate_from = '0017_drop_dbcalcstate'\n migrate_to = '0018_django_1_11'\n\n def setUpBeforeMigration(self):\n node = self.DbNode(type='node.process.calculation.calcjob.CalcJobNode.', user_id=self.default_user.id)\n node.save()\n\n self.node_uuid = str(node.uuid)\n self.node_id = node.id\n\n def test_uuid_untouched(self):\n \"\"\"Verify that Node uuids remain unchanged.\"\"\"\n node = self.load_node(self.node_id)\n self.assertEqual(self.node_uuid, str(node.uuid))\n\n\nclass TestGroupRenamingMigration(TestMigrations):\n \"\"\"\n This test class checks the migration 0022_dbgroup_type_string_change_content which updates the\n type_string column of the groups.\n \"\"\"\n\n migrate_from = '0021_dbgroup_name_to_label_type_to_type_string'\n migrate_to = '0022_dbgroup_type_string_change_content'\n\n def setUpBeforeMigration(self):\n # Create group\n DbGroup = self.apps.get_model('db', 'DbGroup')\n\n # test user group type_string: '' -> 'user'\n group_user = DbGroup(label='test_user_group', user_id=self.default_user.id, type_string='')\n group_user.save()\n self.group_user_pk = group_user.pk\n\n # test data.upf group type_string: 'data.upf.family' -> 'data.upf'\n group_data_upf = DbGroup(\n label='test_data_upf_group', user_id=self.default_user.id, type_string='data.upf.family'\n )\n group_data_upf.save()\n self.group_data_upf_pk = group_data_upf.pk\n\n # test auto.import group type_string: 'aiida.import' -> 'auto.import'\n group_autoimport = DbGroup(label='test_import_group', user_id=self.default_user.id, type_string='aiida.import')\n group_autoimport.save()\n self.group_autoimport_pk = group_autoimport.pk\n\n # test auto.run group type_string: 'autogroup.run' -> 'auto.run'\n group_autorun = DbGroup(label='test_autorun_group', user_id=self.default_user.id, type_string='autogroup.run')\n group_autorun.save()\n self.group_autorun_pk = group_autorun.pk\n\n def test_group_string_update(self):\n \"\"\" Test that the type_string were updated correctly \"\"\"\n DbGroup = self.apps.get_model('db', 'DbGroup')\n\n # test user group type_string: '' -> 'user'\n group_user = DbGroup.objects.get(pk=self.group_user_pk)\n self.assertEqual(group_user.type_string, 'user')\n\n # test data.upf group type_string: 'data.upf.family' -> 'data.upf'\n group_data_upf = DbGroup.objects.get(pk=self.group_data_upf_pk)\n self.assertEqual(group_data_upf.type_string, 'data.upf')\n\n # test auto.import group type_string: 'aiida.import' -> 'auto.import'\n group_autoimport = DbGroup.objects.get(pk=self.group_autoimport_pk)\n self.assertEqual(group_autoimport.type_string, 'auto.import')\n\n # test auto.run group type_string: 'autogroup.run' -> 'auto.run'\n group_autorun = DbGroup.objects.get(pk=self.group_autorun_pk)\n self.assertEqual(group_autorun.type_string, 'auto.run')\n\n\nclass TestCalcAttributeKeysMigration(TestMigrationsModelModifierV0025):\n \"\"\"\n This test class checks that the migration 0023_calc_job_option_attribute_keys works as expected\n which migrates CalcJobNode attributes for metadata options whose key changed.\n \"\"\"\n\n migrate_from = '0022_dbgroup_type_string_change_content'\n migrate_to = '0023_calc_job_option_attribute_keys'\n\n KEY_RESOURCES_OLD = 'jobresource_params'\n KEY_RESOURCES_NEW = 'resources'\n KEY_PARSER_NAME_OLD = 'parser'\n KEY_PARSER_NAME_NEW = 'parser_name'\n KEY_PROCESS_LABEL_OLD = '_process_label'\n KEY_PROCESS_LABEL_NEW = 'process_label'\n KEY_ENVIRONMENT_VARIABLES_OLD = 'custom_environment_variables'\n KEY_ENVIRONMENT_VARIABLES_NEW = 'environment_variables'\n\n def setUpBeforeMigration(self):\n self.process_label = 'TestLabel'\n self.resources = {'number_machines': 1}\n self.environment_variables = {}\n self.parser_name = 'aiida.parsers:parser'\n\n self.node_work = self.DbNode(type='node.process.workflow.WorkflowNode.', user_id=self.default_user.id)\n self.node_work.save()\n self.set_attribute(self.node_work, self.KEY_PROCESS_LABEL_OLD, self.process_label)\n\n self.node_calc = self.DbNode(type='node.process.calculation.calcjob.CalcJobNode.', user_id=self.default_user.id)\n self.node_calc.save()\n self.set_attribute(self.node_calc, self.KEY_PROCESS_LABEL_OLD, self.process_label)\n self.set_attribute(self.node_calc, self.KEY_RESOURCES_OLD, self.resources)\n self.set_attribute(self.node_calc, self.KEY_ENVIRONMENT_VARIABLES_OLD, self.environment_variables)\n self.set_attribute(self.node_calc, self.KEY_PARSER_NAME_OLD, self.parser_name)\n\n # Create a node of a different type to ensure that its attributes are not updated\n self.node_other = self.DbNode(type='node.othernode.', user_id=self.default_user.id)\n self.node_other.save()\n self.set_attribute(self.node_other, self.KEY_PROCESS_LABEL_OLD, self.process_label)\n self.set_attribute(self.node_other, self.KEY_RESOURCES_OLD, self.resources)\n self.set_attribute(self.node_other, self.KEY_ENVIRONMENT_VARIABLES_OLD, self.environment_variables)\n self.set_attribute(self.node_other, self.KEY_PARSER_NAME_OLD, self.parser_name)\n\n def test_attribute_key_changes(self):\n \"\"\"Verify that the keys are successfully changed of the affected attributes.\"\"\"\n NOT_FOUND = tuple([0])\n\n self.assertEqual(self.get_attribute(self.node_work, self.KEY_PROCESS_LABEL_NEW), self.process_label)\n self.assertEqual(self.get_attribute(self.node_work, self.KEY_PROCESS_LABEL_OLD, default=NOT_FOUND), NOT_FOUND)\n\n self.assertEqual(self.get_attribute(self.node_calc, self.KEY_PROCESS_LABEL_NEW), self.process_label)\n self.assertEqual(self.get_attribute(self.node_calc, self.KEY_RESOURCES_NEW), self.resources)\n self.assertEqual(\n self.get_attribute(self.node_calc, self.KEY_ENVIRONMENT_VARIABLES_NEW), self.environment_variables\n )\n self.assertEqual(self.get_attribute(self.node_calc, self.KEY_PARSER_NAME_NEW), self.parser_name)\n self.assertEqual(self.get_attribute(self.node_calc, self.KEY_PROCESS_LABEL_OLD, default=NOT_FOUND), NOT_FOUND)\n self.assertEqual(self.get_attribute(self.node_calc, self.KEY_RESOURCES_OLD, default=NOT_FOUND), NOT_FOUND)\n self.assertEqual(\n self.get_attribute(self.node_calc, self.KEY_ENVIRONMENT_VARIABLES_OLD, default=NOT_FOUND), NOT_FOUND\n )\n self.assertEqual(self.get_attribute(self.node_calc, self.KEY_PARSER_NAME_OLD, default=NOT_FOUND), NOT_FOUND)\n\n # The following node should not be migrated even if its attributes have the matching keys because\n # the node is not a ProcessNode\n self.assertEqual(self.get_attribute(self.node_other, self.KEY_PROCESS_LABEL_OLD), self.process_label)\n self.assertEqual(self.get_attribute(self.node_other, self.KEY_RESOURCES_OLD), self.resources)\n self.assertEqual(\n self.get_attribute(self.node_other, self.KEY_ENVIRONMENT_VARIABLES_OLD), self.environment_variables\n )\n self.assertEqual(self.get_attribute(self.node_other, self.KEY_PARSER_NAME_OLD), self.parser_name)\n self.assertEqual(self.get_attribute(self.node_other, self.KEY_PROCESS_LABEL_NEW, default=NOT_FOUND), NOT_FOUND)\n self.assertEqual(self.get_attribute(self.node_other, self.KEY_RESOURCES_NEW, default=NOT_FOUND), NOT_FOUND)\n self.assertEqual(\n self.get_attribute(self.node_other, self.KEY_ENVIRONMENT_VARIABLES_NEW, default=NOT_FOUND), NOT_FOUND\n )\n self.assertEqual(self.get_attribute(self.node_other, self.KEY_PARSER_NAME_NEW, default=NOT_FOUND), NOT_FOUND)\n\n\nclass TestDbLogMigrationRecordCleaning(TestMigrations):\n \"\"\"\n This test class checks that the migration 0024_dblog_update works as expected.\n That migration updates of the DbLog table and adds uuids\n \"\"\"\n\n migrate_from = '0023_calc_job_option_attribute_keys'\n migrate_to = '0024_dblog_update'\n\n def setUpBeforeMigration(self): # pylint: disable=too-many-locals\n import json\n import importlib\n from aiida.backends.general.migrations.utils import dumps_json\n\n update_024 = importlib.import_module('aiida.backends.djsite.db.migrations.0024_dblog_update')\n\n DbNode = self.apps.get_model('db', 'DbNode')\n DbWorkflow = self.apps.get_model('db', 'DbWorkflow')\n DbLog = self.apps.get_model('db', 'DbLog')\n\n # Creating the needed nodes & workflows\n calc_1 = DbNode(type='node.process.calculation.CalculationNode.', user_id=self.default_user.id)\n param = DbNode(type='data.dict.Dict.', user_id=self.default_user.id)\n leg_workf = DbWorkflow(label='Legacy WorkflowNode', user_id=self.default_user.id)\n calc_2 = DbNode(type='node.process.calculation.CalculationNode.', user_id=self.default_user.id)\n\n # Storing them\n calc_1.save()\n param.save()\n leg_workf.save()\n calc_2.save()\n\n # Creating the corresponding log records and storing them\n log_1 = DbLog(\n loggername='CalculationNode logger',\n objpk=calc_1.pk,\n objname='node.calculation.job.quantumespresso.pw.',\n message='calculation node 1',\n metadata=json.dumps({\n 'msecs': 719.0849781036377,\n 'objpk': calc_1.pk,\n 'lineno': 350,\n 'thread': 140011612940032,\n 'asctime': '10/21/2018 12:39:51 PM',\n 'created': 1540118391.719085,\n 'levelno': 23,\n 'message': 'calculation node 1',\n 'objname': 'node.calculation.job.quantumespresso.pw.',\n })\n )\n log_2 = DbLog(\n loggername='something.else logger',\n objpk=param.pk,\n objname='something.else.',\n message='parameter data with log message'\n )\n log_3 = DbLog(\n loggername='TopologicalWorkflow logger',\n objpk=leg_workf.pk,\n objname='aiida.workflows.user.topologicalworkflows.topo.TopologicalWorkflow',\n message='parameter data with log message'\n )\n log_4 = DbLog(\n loggername='CalculationNode logger',\n objpk=calc_2.pk,\n objname='node.calculation.job.quantumespresso.pw.',\n message='calculation node 2',\n metadata=json.dumps({\n 'msecs': 719.0849781036377,\n 'objpk': calc_2.pk,\n 'lineno': 360,\n 'levelno': 23,\n 'message': 'calculation node 1',\n 'objname': 'node.calculation.job.quantumespresso.pw.',\n })\n )\n # Creating two more log records that don't correspond to a node\n log_5 = DbLog(\n loggername='CalculationNode logger',\n objpk=(calc_2.pk + 1000),\n objname='node.calculation.job.quantumespresso.pw.',\n message='calculation node 1000',\n metadata=json.dumps({\n 'msecs': 718,\n 'objpk': (calc_2.pk + 1000),\n 'lineno': 361,\n 'levelno': 25,\n 'message': 'calculation node 1000',\n 'objname': 'node.calculation.job.quantumespresso.pw.',\n })\n )\n log_6 = DbLog(\n loggername='CalculationNode logger',\n objpk=(calc_2.pk + 1001),\n objname='node.calculation.job.quantumespresso.pw.',\n message='calculation node 10001',\n metadata=json.dumps({\n 'msecs': 722,\n 'objpk': (calc_2.pk + 1001),\n 'lineno': 362,\n 'levelno': 24,\n 'message': 'calculation node 1001',\n 'objname': 'node.calculation.job.quantumespresso.pw.',\n })\n )\n\n # Storing the log records\n log_1.save()\n log_2.save()\n log_3.save()\n log_4.save()\n log_5.save()\n log_6.save()\n\n # Storing temporarily information needed for the check at the test\n self.to_check = dict()\n\n # Keeping calculation & calculation log ids\n self.to_check['CalculationNode'] = (\n calc_1.pk,\n log_1.pk,\n calc_2.pk,\n log_4.pk,\n )\n\n # Getting the serialized Dict logs\n param_data = DbLog.objects.filter(objpk=param.pk).filter(objname='something.else.'\n ).values(*update_024.values_to_export)[:1]\n serialized_param_data = dumps_json(list(param_data))\n # Getting the serialized logs for the unknown entity logs (as the export migration fuction\n # provides them) - this should coincide to the above\n serialized_unknown_exp_logs = update_024.get_serialized_unknown_entity_logs(self.schema_editor)\n # Getting their number\n unknown_exp_logs_number = update_024.get_unknown_entity_log_number(self.schema_editor)\n self.to_check['Dict'] = (serialized_param_data, serialized_unknown_exp_logs, unknown_exp_logs_number)\n\n # Getting the serialized legacy workflow logs\n leg_wf = DbLog.objects.filter(objpk=leg_workf.pk).filter(\n objname='aiida.workflows.user.topologicalworkflows.topo.TopologicalWorkflow'\n ).values(*update_024.values_to_export)[:1]\n serialized_leg_wf_logs = dumps_json(list(leg_wf))\n # Getting the serialized logs for the legacy workflow logs (as the export migration function\n # provides them) - this should coincide to the above\n serialized_leg_wf_exp_logs = update_024.get_serialized_legacy_workflow_logs(self.schema_editor)\n eg_wf_exp_logs_number = update_024.get_legacy_workflow_log_number(self.schema_editor)\n self.to_check['WorkflowNode'] = (serialized_leg_wf_logs, serialized_leg_wf_exp_logs, eg_wf_exp_logs_number)\n\n # Getting the serialized logs that don't correspond to a DbNode record\n logs_no_node = DbLog.objects.filter(id__in=[log_5.id, log_6.id]).values(*update_024.values_to_export)\n serialized_logs_no_node = dumps_json(list(logs_no_node))\n # Getting the serialized logs that don't correspond to a node (as the export migration function\n # provides them) - this should coincide to the above\n serialized_logs_exp_no_node = update_024.get_serialized_logs_with_no_nodes(self.schema_editor)\n logs_no_node_number = update_024.get_logs_with_no_nodes_number(self.schema_editor)\n self.to_check['NoNode'] = (serialized_logs_no_node, serialized_logs_exp_no_node, logs_no_node_number)\n\n def tearDown(self):\n \"\"\"Cleaning the DbLog, DbUser, DbWorkflow and DbNode records\"\"\"\n DbUser = self.apps.get_model('db', 'DbUser')\n DbNode = self.apps.get_model('db', 'DbNode')\n DbWorkflow = self.apps.get_model('db', 'DbWorkflow')\n DbLog = self.apps.get_model('db', 'DbLog')\n\n DbLog.objects.all().delete()\n DbNode.objects.all().delete() # pylint: disable=no-member\n DbWorkflow.objects.all().delete() # pylint: disable=no-member\n DbUser.objects.all().delete() # pylint: disable=no-member\n super().tearDown()\n\n def test_dblog_calculation_node(self):\n \"\"\"\n Verify that after the migration there is only two log records left and verify that they corresponds to\n the CalculationNodes.\n \"\"\"\n DbLog = self.apps.get_model('db', 'DbLog')\n\n # Check that only two log records exist\n self.assertEqual(DbLog.objects.count(), 2, 'There should be two log records left')\n\n # Get the node id of the log record referencing the node and verify that it is the correct one\n dbnode_id_1 = DbLog.objects.filter(pk=self.to_check['CalculationNode'][1]\n ).values('dbnode_id')[:1].get()['dbnode_id']\n self.assertEqual(dbnode_id_1, self.to_check['CalculationNode'][0], 'referenced node is not the expected one')\n dbnode_id_2 = DbLog.objects.filter(pk=self.to_check['CalculationNode'][3]\n ).values('dbnode_id')[:1].get()['dbnode_id']\n self.assertEqual(dbnode_id_2, self.to_check['CalculationNode'][2], 'referenced node is not the expected one')\n\n def test_dblog_correct_export_of_logs(self):\n \"\"\"\n Verify that export log methods for legacy workflows, unknown entities and log records that\n don't correspond to nodes, work as expected\n \"\"\"\n import json\n\n self.assertEqual(self.to_check['Dict'][0], self.to_check['Dict'][1])\n self.assertEqual(self.to_check['Dict'][2], 1)\n\n self.assertEqual(self.to_check['WorkflowNode'][0], self.to_check['WorkflowNode'][1])\n self.assertEqual(self.to_check['WorkflowNode'][2], 1)\n\n self.assertEqual(\n sorted(list(json.loads(self.to_check['NoNode'][0])), key=lambda k: k['id']),\n sorted(list(json.loads(self.to_check['NoNode'][1])), key=lambda k: k['id'])\n )\n self.assertEqual(self.to_check['NoNode'][2], 2)\n\n def test_dblog_unique_uuids(self):\n \"\"\"\n Verify that the UUIDs of the log records are unique\n \"\"\"\n DbLog = self.apps.get_model('db', 'DbLog')\n\n l_uuids = list(_['uuid'] for _ in DbLog.objects.values('uuid'))\n s_uuids = set(l_uuids)\n self.assertEqual(len(l_uuids), len(s_uuids), 'The UUIDs are not all unique.')\n\n def test_metadata_correctness(self):\n \"\"\"\n Verify that the metadata of the remaining records don't have an objpk and objmetadata values.\n \"\"\"\n import json\n\n DbLog = self.apps.get_model('db', 'DbLog')\n\n metadata = list(json.loads(_['metadata']) for _ in DbLog.objects.values('metadata'))\n # Verify that the objpk and objname are no longer part of the metadata\n for m_res in metadata:\n self.assertNotIn('objpk', m_res.keys(), 'objpk should not exist any more in metadata')\n self.assertNotIn('objname', m_res.keys(), 'objname should not exist any more in metadata')\n\n\nclass TestDbLogMigrationBackward(TestMigrations):\n \"\"\"\n Check that backward migrations work also for the DbLog migration(s).\n \"\"\"\n\n migrate_from = '0024_dblog_update'\n migrate_to = '0023_calc_job_option_attribute_keys'\n\n def setUpBeforeMigration(self):\n import json\n\n DbNode = self.apps.get_model('db', 'DbNode')\n DbLog = self.apps.get_model('db', 'DbLog')\n\n # Creating the needed nodes & workflows\n calc_1 = DbNode(type='node.process.calculation.CalculationNode.1', user_id=self.default_user.id)\n calc_2 = DbNode(type='node.process.calculation.CalculationNode.2', user_id=self.default_user.id)\n\n # Storing them\n calc_1.save()\n calc_2.save()\n\n # Creating the corresponding log records and storing them\n log_1 = DbLog(\n loggername='CalculationNode logger',\n dbnode_id=calc_1.pk,\n message='calculation node 1',\n metadata=json.dumps({\n 'msecs': 719.0849781036377,\n 'lineno': 350,\n 'thread': 140011612940032,\n 'asctime': '10/21/2018 12:39:51 PM',\n 'created': 1540118391.719085,\n 'levelno': 23,\n 'message': 'calculation node 1',\n })\n )\n log_2 = DbLog(\n loggername='CalculationNode logger',\n dbnode_id=calc_2.pk,\n message='calculation node 2',\n metadata=json.dumps({\n 'msecs': 719.0849781036377,\n 'lineno': 360,\n 'levelno': 23,\n 'message': 'calculation node 1',\n })\n )\n\n # Storing the log records\n log_1.save()\n log_2.save()\n\n # Keeping what is needed to be verified at the test\n self.to_check = dict()\n self.to_check[log_1.pk] = (log_1.dbnode_id, calc_1.type)\n self.to_check[log_2.pk] = (log_2.dbnode_id, calc_2.type)\n\n def test_objpk_objname(self):\n \"\"\"\n This test verifies that the objpk and objname have the right values\n after a forward and a backward migration.\n \"\"\"\n import json\n DbLog = self.apps.get_model('db', 'DbLog')\n\n # Check that only two log records exist with the correct objpk objname\n for log_pk, to_check_value in self.to_check.items():\n log_entry = DbLog.objects.filter(pk=log_pk)[:1].get()\n log_dbnode_id, node_type = to_check_value\n self.assertEqual(\n log_dbnode_id, log_entry.objpk,\n 'The dbnode_id ({}) of the 0024 schema version should be identical to the objpk ({}) of '\n 'the 0023 schema version.'.format(log_dbnode_id, log_entry.objpk)\n )\n self.assertEqual(\n node_type, log_entry.objname,\n 'The type ({}) of the linked node of the 0024 schema version should be identical to the '\n 'objname ({}) of the 0023 schema version.'.format(node_type, log_entry.objname)\n )\n self.assertEqual(\n log_dbnode_id,\n json.loads(log_entry.metadata)['objpk'],\n 'The dbnode_id ({}) of the 0024 schema version should be identical to the objpk ({}) of '\n 'the 0023 schema version stored in the metadata.'.format(\n log_dbnode_id,\n json.loads(log_entry.metadata)['objpk']\n )\n )\n self.assertEqual(\n node_type,\n json.loads(log_entry.metadata)['objname'],\n 'The type ({}) of the linked node of the 0024 schema version should be identical to the '\n 'objname ({}) of the 0023 schema version stored in the metadata.'.format(\n node_type,\n json.loads(log_entry.metadata)['objname']\n )\n )\n\n\nclass TestDataMoveWithinNodeMigration(TestMigrations):\n \"\"\"\n Check that backward migrations work also for the DbLog migration(s).\n \"\"\"\n\n migrate_from = '0024_dblog_update'\n migrate_to = '0025_move_data_within_node_module'\n\n def setUpBeforeMigration(self):\n self.node_calc = self.DbNode(type='node.process.calculation.calcjob.CalcJobNode.', user_id=self.default_user.id)\n self.node_data = self.DbNode(type='data.int.Int.', user_id=self.default_user.id)\n self.node_calc.save()\n self.node_data.save()\n\n def test_data_type_string(self):\n \"\"\"Verify that type string of the Data node was successfully adapted.\"\"\"\n node_calc = self.load_node(self.node_calc.id)\n node_data = self.load_node(self.node_data.id)\n self.assertEqual(node_data.type, 'node.data.int.Int.')\n self.assertEqual(node_calc.type, 'node.process.calculation.calcjob.CalcJobNode.')\n\n\nclass TestTrajectoryDataMigration(TestMigrationsModelModifierV0025):\n \"\"\"\n This test class checks that the migrations 0026_trajectory_symbols_to_attribute and\n 0027_delete_trajectory_symbols_array work as expected.\n These are data migrations for `TrajectoryData` nodes where symbol lists are moved\n from repository array to attributes.\n \"\"\"\n\n migrate_from = '0025_move_data_within_node_module'\n migrate_to = '0027_delete_trajectory_symbols_array'\n\n stepids = numpy.array([60, 70])\n times = stepids * 0.01\n positions = numpy.array([[[0., 0., 0.], [0.5, 0.5, 0.5], [1.5, 1.5, 1.5]],\n [[0., 0., 0.], [0.5, 0.5, 0.5], [1.5, 1.5, 1.5]]])\n velocities = numpy.array([[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],\n [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5], [-0.5, -0.5, -0.5]]])\n cells = numpy.array([[[2., 0., 0.], [0., 2., 0.], [0., 0., 2.]], [[3., 0., 0.], [0., 3., 0.], [0., 0., 3.]]])\n\n def setUpBeforeMigration(self):\n symbols = numpy.array(['H', 'O', 'C'])\n self.node = self.DbNode(type='node.data.array.trajectory.TrajectoryData.', user_id=self.default_user.id)\n self.node.save()\n\n self.set_node_array(self.node, 'steps', self.stepids)\n self.set_node_array(self.node, 'cells', self.cells)\n self.set_node_array(self.node, 'symbols', symbols)\n self.set_node_array(self.node, 'positions', self.positions)\n self.set_node_array(self.node, 'times', self.times)\n self.set_node_array(self.node, 'velocities', self.velocities)\n\n def test_trajectory_symbols(self):\n \"\"\" Check that the trajectories are migrated correctly \"\"\"\n node = self.load_node(self.node.id)\n self.assertSequenceEqual(self.get_attribute(node, 'symbols'), ['H', 'O', 'C'])\n self.assertSequenceEqual(self.get_node_array(node, 'velocities').tolist(), self.velocities.tolist())\n self.assertSequenceEqual(self.get_node_array(node, 'positions').tolist(), self.positions.tolist())\n with self.assertRaises(IOError):\n self.get_node_array(node, 'symbols')\n\n\nclass TestNodePrefixRemovalMigration(TestMigrations):\n \"\"\"\n This test class checks that the migration 0028_remove_node_prefix works as expected.\n\n That is the final data migration for `Nodes` after `aiida.orm.nodes` reorganization\n was finalized to remove the `node.` prefix\n \"\"\"\n\n migrate_from = '0027_delete_trajectory_symbols_array'\n migrate_to = '0028_remove_node_prefix'\n\n def setUpBeforeMigration(self):\n self.node_calc = self.DbNode(type='node.process.calculation.calcjob.CalcJobNode.', user_id=self.default_user.id)\n self.node_data = self.DbNode(type='node.data.int.Int.', user_id=self.default_user.id)\n self.node_calc.save()\n self.node_data.save()\n\n def test_data_node_type_string(self):\n \"\"\"Verify that type string of the nodes was successfully adapted.\"\"\"\n node_calc = self.load_node(self.node_calc.id)\n node_data = self.load_node(self.node_data.id)\n\n self.assertEqual(node_data.type, 'data.int.Int.')\n self.assertEqual(node_calc.type, 'process.calculation.calcjob.CalcJobNode.')\n\n\nclass TestParameterDataToDictMigration(TestMigrations):\n \"\"\"\n This test class checks that the migration 0029_rename_parameter_data_to_dict works as expected.\n\n This is a data migration for the renaming of `ParameterData` to `Dict`.\n \"\"\"\n\n migrate_from = '0028_remove_node_prefix'\n migrate_to = '0029_rename_parameter_data_to_dict'\n\n def setUpBeforeMigration(self):\n self.node = self.DbNode(type='data.parameter.ParameterData.', user_id=self.default_user.id)\n self.node.save()\n\n def test_data_node_type_string(self):\n \"\"\"Verify that type string of the nodes was successfully adapted.\"\"\"\n node = self.load_node(self.node.id)\n self.assertEqual(node.type, 'data.dict.Dict.')\n\n\nclass TestTextFieldToJSONFieldMigration(TestMigrations): # pylint: disable=too-many-instance-attributes\n \"\"\"\n This test class checks that the migration 0033_replace_text_field_with_json_field works as expected.\n\n That migration replaces the use of text fields to store JSON data with builtin JSONFields.\n \"\"\"\n\n migrate_from = '0032_remove_legacy_workflows'\n migrate_to = '0033_replace_text_field_with_json_field'\n\n def setUpBeforeMigration(self):\n from aiida.common import json\n\n self.DbNode = self.apps.get_model('db', 'DbNode')\n self.DbComputer = self.apps.get_model('db', 'DbComputer')\n self.DbAuthInfo = self.apps.get_model('db', 'DbAuthInfo')\n self.DbLog = self.apps.get_model('db', 'DbLog')\n\n self.node = self.DbNode(node_type='node.process.calculation.CalculationNode.', user_id=self.default_user.id)\n self.node.save()\n\n self.computer_metadata = {\n 'shebang': '#!/bin/bash',\n 'workdir': '/scratch/',\n 'append_text': '',\n 'prepend_text': '',\n 'mpirun_command': ['mpirun', '-np', '{tot_num_mpiprocs}'],\n 'default_mpiprocs_per_machine': 1\n }\n self.computer_kwargs = {\n 'name': 'localhost_testing',\n 'hostname': 'localhost',\n 'transport_type': 'local',\n 'scheduler_type': 'direct',\n 'metadata': json.dumps(self.computer_metadata),\n }\n self.computer = self.DbComputer(**self.computer_kwargs)\n self.computer.save()\n\n self.auth_info_auth_params = {'safe_interval': 2}\n self.auth_info_metadata = {'safe_interval': 2}\n self.auth_info_kwargs = {\n 'aiidauser_id': self.default_user.pk,\n 'dbcomputer': self.computer,\n 'auth_params': json.dumps(self.auth_info_auth_params),\n 'metadata': json.dumps(self.auth_info_metadata),\n }\n self.auth_info = self.DbAuthInfo(**self.auth_info_kwargs)\n self.auth_info.save()\n\n self.log_metadata = {\n 'msecs': 719.0849781036377,\n 'lineno': 350,\n 'thread': 140011612940032,\n 'asctime': '10/21/2018 12:39:51 PM',\n 'created': 1540118391.719085,\n 'levelno': 23,\n 'message': 'calculation node 1',\n }\n self.log_kwargs = {\n 'loggername': 'localhost',\n 'levelname': 'localhost',\n 'dbnode_id': self.node.id,\n 'metadata': json.dumps(self.log_metadata)\n }\n self.log = self.DbLog(**self.log_kwargs)\n self.log.save()\n\n def test_text_field_to_json_field_migration(self):\n \"\"\"Verify that the values in the text fields were maintained after migrating the field to JSONField.\"\"\"\n # Reload the objects to make sure the new data is loaded\n computer = self.DbComputer.objects.get(pk=self.computer.id)\n auth_info = self.DbAuthInfo.objects.get(pk=self.auth_info.id)\n log = self.DbLog.objects.get(pk=self.log.id)\n\n # Make sure that the migrated data matches the original\n self.assertDictEqual(computer.metadata, self.computer_metadata)\n self.assertDictEqual(auth_info.metadata, self.auth_info_metadata)\n self.assertDictEqual(auth_info.auth_params, self.auth_info_auth_params)\n self.assertDictEqual(log.metadata, self.log_metadata)\n\n\nclass TestResetHash(TestMigrations):\n \"\"\"\n This test class checks that only the hash extra is removed.\n \"\"\"\n\n migrate_from = '0038_data_migration_legacy_job_calculations'\n migrate_to = '0039_reset_hash'\n\n def setUpBeforeMigration(self):\n self.node = self.DbNode(\n node_type='process.calculation.calcjob.CalcJobNode.',\n user_id=self.default_user.id,\n extras={\n 'something': 123,\n '_aiida_hash': 'abcd'\n }\n )\n self.node.save()\n\n def test_data_migrated(self):\n \"\"\"Verify that type string of the nodes was successfully adapted.\"\"\"\n node = self.load_node(self.node.id)\n extras = node.extras\n self.assertEqual(extras.get('something'), 123) # Other extras should be untouched\n self.assertNotIn('_aiida_hash', extras) # The hash extra should have been removed\n"
] |
[
[
"numpy.array"
]
] |
michaelhalim168/GiftFinder
|
[
"7fda99be827a06e5ef2d112174ded93ad1d3b7b1"
] |
[
"scripts/classifier.py"
] |
[
"import pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.svm import LinearSVC\nfrom sklearn.preprocessing import LabelEncoder\nfrom vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\nfrom data_cleaning import *\n\nclass OVR_SVC:\n\n def __init__(self, data):\n self.X = data['clean-text']\n self.y = data['category']\n\n le = LabelEncoder()\n self.y = le.fit_transform(self.y)\n\n self.reference = dict(zip(data['category'].to_numpy()), y)\n self.reference = {k:v for k,v in sorted(self.reference.items(), key=lambda item: item[1])}\n\n self.model = OneVsRestClassifier(LinearSVC(random_state=0))\n\n self.x_train, self.x_test, self.y_train, self.y_test = train_test_split(self.X, self.y, test_size=0.3)\n\n def vectorizer(self, type='tfidf'):\n if type == 'tfidf':\n vectorizer = TfidfVectorizer()\n elif type == 'count':\n vectorizer = CountVectorizer()\n\n self.x_train = vectorizer.fit_transform(self.x_train)\n self.x_test = vectorizer.transform(self.x_test)\n \n def train_model(self):\n self.model.fit(self.x_train, self.y_train)\n \n def evaluate_model(self):\n y_predicted = self.model.predict(self.x_test)\n accuracy = self.model.score(self.x_test, self.y_test)\n return y_predicted, accuracy\n\n\nclass TweetCategory:\n\n def __init__(self, model, vectorizer, tweet_data, reference):\n self.data = tweet_data\n self.model = model\n self.vectorizer = vectorizer\n self.ref = reference\n self.analyzer = SentimentIntensityAnalyzer()\n\n def process_user_tweets(self):\n self.data['clean-tweet'] = self.data['Tweet Content'].map(tweet_preprocess)\n self.data = self.data[['Tweet Content', 'clean-tweet']].rename(columns={'Tweet Content': 'tweet'})\n\n self.data['vader-sentiment'] = self.data['tweet'].apply(lambda x: self.analyzer.polarity_scores(x))\n self.data['vader-pos'] = self.data['vader-sentiment'].apply(lambda x: x['pos'])\n self.data['vader-neu'] = self.data['vader-sentiment'].apply(lambda x: x['neu'])\n self.data['vader-neg'] = self.data['vader-sentiment'].apply(lambda x: x['neg'])\n self.data['vader-compound'] = self.data['vader-sentiment'].apply(lambda x: x['compound'])\n\n\n def predict_topics(self, sentiment_thresh, confidence_thresh):\n self.predict_df = self.data[(self.data['vader-compound'] >= sentiment_thresh) & (self.data['clean-tweet'] != '')]\n \n tweets_transformed = self.vectorizer.transform(self.predict_df['clean-tweet'])\n predicted_category = self.model.predict(tweets_transformed)\n\n p = np.array(self.model.decision_function(tweets_transformed))\n probability = np.exp(p)/np.sum(np.exp(p), axis=1, keepdims=True)\n probability_list = [max(prob) for prob in probability]\n\n self.predict_df['predicted_label'] = predicted_category\n self.predict_df['probability'] = probability_list\n self.predict_df['predicted'] = self.predict_df['predicted_label'].apply(lambda x: self.ref[x])\n\n top_categories = self.predict_df[self.predict_df['probability'] >= confidence_thresh]['predicted'].value_counts()[:3] \n\n return top_categories\n \ndef user_tweet_df(tweets):\n all_tweets = []\n username = tweets[0]._json['user']['screen_name']\n for tweet in tweets:\n all_tweets.append(tweet._json['full_text'])\n \n df = pd.DataFrame({'user': username, 'Tweet Content': all_tweets})\n return df"
] |
[
[
"sklearn.model_selection.train_test_split",
"pandas.DataFrame",
"sklearn.feature_extraction.text.CountVectorizer",
"sklearn.svm.LinearSVC",
"numpy.exp",
"sklearn.preprocessing.LabelEncoder",
"sklearn.feature_extraction.text.TfidfVectorizer"
]
] |
scott-trinkle/fiberorient
|
[
"306cf2741008eb46a97cfccdcf81e9ec33189a8d"
] |
[
"tests/test_vis.py"
] |
[
"import pytest\nimport numpy as np\nfrom numpy.testing import assert_array_equal, assert_array_almost_equal\n\nfrom context import fiberorient as fo\n\n\ndef test_img_to_dec(img, vectors):\n true_dec = np.zeros_like(vectors)\n true_dec[..., 0] = fo.util.rescale(img, scale=255).astype(np.uint8)\n test_dec = fo.vis.img_to_dec(img, vectors)\n assert_array_equal(true_dec, test_dec)\n"
] |
[
[
"numpy.testing.assert_array_equal",
"numpy.zeros_like"
]
] |
daithimarkham/pands-project
|
[
"f3d6dcb82fda1db851a3d78571a9d4a48f908eba"
] |
[
"scatterplot.py"
] |
[
"# David Markham\n# Fisher Iris Data set\n\n# Use a Multivariate scatter-plot to distinguish the relationship between the flowers.\n\n\n# Import Libraries \nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sn\nfrom pandas.plotting import scatter_matrix\n\n\n# Load dataset\ndata = (\"iris.csv\")\nnames = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width', 'species']\ndataset = pd.read_csv(data, header = 0)\n\n\nscatter_matrix(dataset)\nplt.show()"
] |
[
[
"pandas.read_csv",
"pandas.plotting.scatter_matrix",
"matplotlib.pyplot.show"
]
] |
bopopescu/wso2-spark
|
[
"6982456ded39a8fef0ad26600218f8f575aac2a5"
] |
[
"python/pyspark/tests.py"
] |
[
"#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nUnit tests for PySpark; additional tests are implemented as doctests in\nindividual modules.\n\"\"\"\n\nfrom array import array\nfrom glob import glob\nimport os\nimport re\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\nimport time\nimport zipfile\nimport random\nimport threading\nimport hashlib\n\nfrom py4j.protocol import Py4JJavaError\n\nif sys.version_info[:2] <= (2, 6):\n try:\n import unittest2 as unittest\n except ImportError:\n sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')\n sys.exit(1)\nelse:\n import unittest\n if sys.version_info[0] >= 3:\n xrange = range\n basestring = str\n\nif sys.version >= \"3\":\n from io import StringIO\nelse:\n from StringIO import StringIO\n\n\nfrom pyspark.conf import SparkConf\nfrom pyspark.context import SparkContext\nfrom pyspark.rdd import RDD\nfrom pyspark.files import SparkFiles\nfrom pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, \\\n CloudPickleSerializer, CompressedSerializer, UTF8Deserializer, NoOpSerializer, \\\n PairDeserializer, CartesianDeserializer, AutoBatchedSerializer, AutoSerializer, \\\n FlattenedValuesSerializer\nfrom pyspark.shuffle import Aggregator, InMemoryMerger, ExternalMerger, ExternalSorter\nfrom pyspark import shuffle\nfrom pyspark.profiler import BasicProfiler\n\n_have_scipy = False\n_have_numpy = False\ntry:\n import scipy.sparse\n _have_scipy = True\nexcept:\n # No SciPy, but that's okay, we'll skip those tests\n pass\ntry:\n import numpy as np\n _have_numpy = True\nexcept:\n # No NumPy, but that's okay, we'll skip those tests\n pass\n\n\nSPARK_HOME = os.environ[\"SPARK_HOME\"]\n\n\nclass MergerTests(unittest.TestCase):\n\n def setUp(self):\n self.N = 1 << 12\n self.l = [i for i in xrange(self.N)]\n self.data = list(zip(self.l, self.l))\n self.agg = Aggregator(lambda x: [x],\n lambda x, y: x.append(y) or x,\n lambda x, y: x.extend(y) or x)\n\n def test_in_memory(self):\n m = InMemoryMerger(self.agg)\n m.mergeValues(self.data)\n self.assertEqual(sum(sum(v) for k, v in m.items()),\n sum(xrange(self.N)))\n\n m = InMemoryMerger(self.agg)\n m.mergeCombiners(map(lambda x_y: (x_y[0], [x_y[1]]), self.data))\n self.assertEqual(sum(sum(v) for k, v in m.items()),\n sum(xrange(self.N)))\n\n def test_small_dataset(self):\n m = ExternalMerger(self.agg, 1000)\n m.mergeValues(self.data)\n self.assertEqual(m.spills, 0)\n self.assertEqual(sum(sum(v) for k, v in m.items()),\n sum(xrange(self.N)))\n\n m = ExternalMerger(self.agg, 1000)\n m.mergeCombiners(map(lambda x_y1: (x_y1[0], [x_y1[1]]), self.data))\n self.assertEqual(m.spills, 0)\n self.assertEqual(sum(sum(v) for k, v in m.items()),\n sum(xrange(self.N)))\n\n def test_medium_dataset(self):\n m = ExternalMerger(self.agg, 20)\n m.mergeValues(self.data)\n self.assertTrue(m.spills >= 1)\n self.assertEqual(sum(sum(v) for k, v in m.items()),\n sum(xrange(self.N)))\n\n m = ExternalMerger(self.agg, 10)\n m.mergeCombiners(map(lambda x_y2: (x_y2[0], [x_y2[1]]), self.data * 3))\n self.assertTrue(m.spills >= 1)\n self.assertEqual(sum(sum(v) for k, v in m.items()),\n sum(xrange(self.N)) * 3)\n\n def test_huge_dataset(self):\n m = ExternalMerger(self.agg, 5, partitions=3)\n m.mergeCombiners(map(lambda k_v: (k_v[0], [str(k_v[1])]), self.data * 10))\n self.assertTrue(m.spills >= 1)\n self.assertEqual(sum(len(v) for k, v in m.items()),\n self.N * 10)\n m._cleanup()\n\n def test_group_by_key(self):\n\n def gen_data(N, step):\n for i in range(1, N + 1, step):\n for j in range(i):\n yield (i, [j])\n\n def gen_gs(N, step=1):\n return shuffle.GroupByKey(gen_data(N, step))\n\n self.assertEqual(1, len(list(gen_gs(1))))\n self.assertEqual(2, len(list(gen_gs(2))))\n self.assertEqual(100, len(list(gen_gs(100))))\n self.assertEqual(list(range(1, 101)), [k for k, _ in gen_gs(100)])\n self.assertTrue(all(list(range(k)) == list(vs) for k, vs in gen_gs(100)))\n\n for k, vs in gen_gs(50002, 10000):\n self.assertEqual(k, len(vs))\n self.assertEqual(list(range(k)), list(vs))\n\n ser = PickleSerializer()\n l = ser.loads(ser.dumps(list(gen_gs(50002, 30000))))\n for k, vs in l:\n self.assertEqual(k, len(vs))\n self.assertEqual(list(range(k)), list(vs))\n\n\nclass SorterTests(unittest.TestCase):\n def test_in_memory_sort(self):\n l = list(range(1024))\n random.shuffle(l)\n sorter = ExternalSorter(1024)\n self.assertEqual(sorted(l), list(sorter.sorted(l)))\n self.assertEqual(sorted(l, reverse=True), list(sorter.sorted(l, reverse=True)))\n self.assertEqual(sorted(l, key=lambda x: -x), list(sorter.sorted(l, key=lambda x: -x)))\n self.assertEqual(sorted(l, key=lambda x: -x, reverse=True),\n list(sorter.sorted(l, key=lambda x: -x, reverse=True)))\n\n def test_external_sort(self):\n class CustomizedSorter(ExternalSorter):\n def _next_limit(self):\n return self.memory_limit\n l = list(range(1024))\n random.shuffle(l)\n sorter = CustomizedSorter(1)\n self.assertEqual(sorted(l), list(sorter.sorted(l)))\n self.assertGreater(shuffle.DiskBytesSpilled, 0)\n last = shuffle.DiskBytesSpilled\n self.assertEqual(sorted(l, reverse=True), list(sorter.sorted(l, reverse=True)))\n self.assertGreater(shuffle.DiskBytesSpilled, last)\n last = shuffle.DiskBytesSpilled\n self.assertEqual(sorted(l, key=lambda x: -x), list(sorter.sorted(l, key=lambda x: -x)))\n self.assertGreater(shuffle.DiskBytesSpilled, last)\n last = shuffle.DiskBytesSpilled\n self.assertEqual(sorted(l, key=lambda x: -x, reverse=True),\n list(sorter.sorted(l, key=lambda x: -x, reverse=True)))\n self.assertGreater(shuffle.DiskBytesSpilled, last)\n\n def test_external_sort_in_rdd(self):\n conf = SparkConf().set(\"spark.python.worker.memory\", \"1m\")\n sc = SparkContext(conf=conf)\n l = list(range(10240))\n random.shuffle(l)\n rdd = sc.parallelize(l, 4)\n self.assertEqual(sorted(l), rdd.sortBy(lambda x: x).collect())\n sc.stop()\n\n\nclass SerializationTestCase(unittest.TestCase):\n\n def test_namedtuple(self):\n from collections import namedtuple\n from pickle import dumps, loads\n P = namedtuple(\"P\", \"x y\")\n p1 = P(1, 3)\n p2 = loads(dumps(p1, 2))\n self.assertEqual(p1, p2)\n\n def test_itemgetter(self):\n from operator import itemgetter\n ser = CloudPickleSerializer()\n d = range(10)\n getter = itemgetter(1)\n getter2 = ser.loads(ser.dumps(getter))\n self.assertEqual(getter(d), getter2(d))\n\n getter = itemgetter(0, 3)\n getter2 = ser.loads(ser.dumps(getter))\n self.assertEqual(getter(d), getter2(d))\n\n def test_function_module_name(self):\n ser = CloudPickleSerializer()\n func = lambda x: x\n func2 = ser.loads(ser.dumps(func))\n self.assertEqual(func.__module__, func2.__module__)\n\n def test_attrgetter(self):\n from operator import attrgetter\n ser = CloudPickleSerializer()\n\n class C(object):\n def __getattr__(self, item):\n return item\n d = C()\n getter = attrgetter(\"a\")\n getter2 = ser.loads(ser.dumps(getter))\n self.assertEqual(getter(d), getter2(d))\n getter = attrgetter(\"a\", \"b\")\n getter2 = ser.loads(ser.dumps(getter))\n self.assertEqual(getter(d), getter2(d))\n\n d.e = C()\n getter = attrgetter(\"e.a\")\n getter2 = ser.loads(ser.dumps(getter))\n self.assertEqual(getter(d), getter2(d))\n getter = attrgetter(\"e.a\", \"e.b\")\n getter2 = ser.loads(ser.dumps(getter))\n self.assertEqual(getter(d), getter2(d))\n\n # Regression test for SPARK-3415\n def test_pickling_file_handles(self):\n ser = CloudPickleSerializer()\n out1 = sys.stderr\n out2 = ser.loads(ser.dumps(out1))\n self.assertEqual(out1, out2)\n\n def test_func_globals(self):\n\n class Unpicklable(object):\n def __reduce__(self):\n raise Exception(\"not picklable\")\n\n global exit\n exit = Unpicklable()\n\n ser = CloudPickleSerializer()\n self.assertRaises(Exception, lambda: ser.dumps(exit))\n\n def foo():\n sys.exit(0)\n\n self.assertTrue(\"exit\" in foo.__code__.co_names)\n ser.dumps(foo)\n\n def test_compressed_serializer(self):\n ser = CompressedSerializer(PickleSerializer())\n try:\n from StringIO import StringIO\n except ImportError:\n from io import BytesIO as StringIO\n io = StringIO()\n ser.dump_stream([\"abc\", u\"123\", range(5)], io)\n io.seek(0)\n self.assertEqual([\"abc\", u\"123\", range(5)], list(ser.load_stream(io)))\n ser.dump_stream(range(1000), io)\n io.seek(0)\n self.assertEqual([\"abc\", u\"123\", range(5)] + list(range(1000)), list(ser.load_stream(io)))\n io.close()\n\n def test_hash_serializer(self):\n hash(NoOpSerializer())\n hash(UTF8Deserializer())\n hash(PickleSerializer())\n hash(MarshalSerializer())\n hash(AutoSerializer())\n hash(BatchedSerializer(PickleSerializer()))\n hash(AutoBatchedSerializer(MarshalSerializer()))\n hash(PairDeserializer(NoOpSerializer(), UTF8Deserializer()))\n hash(CartesianDeserializer(NoOpSerializer(), UTF8Deserializer()))\n hash(CompressedSerializer(PickleSerializer()))\n hash(FlattenedValuesSerializer(PickleSerializer()))\n\n\nclass QuietTest(object):\n def __init__(self, sc):\n self.log4j = sc._jvm.org.apache.log4j\n\n def __enter__(self):\n self.old_level = self.log4j.LogManager.getRootLogger().getLevel()\n self.log4j.LogManager.getRootLogger().setLevel(self.log4j.Level.FATAL)\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.log4j.LogManager.getRootLogger().setLevel(self.old_level)\n\n\nclass PySparkTestCase(unittest.TestCase):\n\n def setUp(self):\n self._old_sys_path = list(sys.path)\n class_name = self.__class__.__name__\n self.sc = SparkContext('local[4]', class_name)\n\n def tearDown(self):\n self.sc.stop()\n sys.path = self._old_sys_path\n\n\nclass ReusedPySparkTestCase(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.sc = SparkContext('local[4]', cls.__name__)\n\n @classmethod\n def tearDownClass(cls):\n cls.sc.stop()\n\n\nclass CheckpointTests(ReusedPySparkTestCase):\n\n def setUp(self):\n self.checkpointDir = tempfile.NamedTemporaryFile(delete=False)\n os.unlink(self.checkpointDir.name)\n self.sc.setCheckpointDir(self.checkpointDir.name)\n\n def tearDown(self):\n shutil.rmtree(self.checkpointDir.name)\n\n def test_basic_checkpointing(self):\n parCollection = self.sc.parallelize([1, 2, 3, 4])\n flatMappedRDD = parCollection.flatMap(lambda x: range(1, x + 1))\n\n self.assertFalse(flatMappedRDD.isCheckpointed())\n self.assertTrue(flatMappedRDD.getCheckpointFile() is None)\n\n flatMappedRDD.checkpoint()\n result = flatMappedRDD.collect()\n time.sleep(1) # 1 second\n self.assertTrue(flatMappedRDD.isCheckpointed())\n self.assertEqual(flatMappedRDD.collect(), result)\n self.assertEqual(\"file:\" + self.checkpointDir.name,\n os.path.dirname(os.path.dirname(flatMappedRDD.getCheckpointFile())))\n\n def test_checkpoint_and_restore(self):\n parCollection = self.sc.parallelize([1, 2, 3, 4])\n flatMappedRDD = parCollection.flatMap(lambda x: [x])\n\n self.assertFalse(flatMappedRDD.isCheckpointed())\n self.assertTrue(flatMappedRDD.getCheckpointFile() is None)\n\n flatMappedRDD.checkpoint()\n flatMappedRDD.count() # forces a checkpoint to be computed\n time.sleep(1) # 1 second\n\n self.assertTrue(flatMappedRDD.getCheckpointFile() is not None)\n recovered = self.sc._checkpointFile(flatMappedRDD.getCheckpointFile(),\n flatMappedRDD._jrdd_deserializer)\n self.assertEqual([1, 2, 3, 4], recovered.collect())\n\n\nclass AddFileTests(PySparkTestCase):\n\n def test_add_py_file(self):\n # To ensure that we're actually testing addPyFile's effects, check that\n # this job fails due to `userlibrary` not being on the Python path:\n # disable logging in log4j temporarily\n def func(x):\n from userlibrary import UserClass\n return UserClass().hello()\n with QuietTest(self.sc):\n self.assertRaises(Exception, self.sc.parallelize(range(2)).map(func).first)\n\n # Add the file, so the job should now succeed:\n path = os.path.join(SPARK_HOME, \"python/test_support/userlibrary.py\")\n self.sc.addPyFile(path)\n res = self.sc.parallelize(range(2)).map(func).first()\n self.assertEqual(\"Hello World!\", res)\n\n def test_add_file_locally(self):\n path = os.path.join(SPARK_HOME, \"python/test_support/hello.txt\")\n self.sc.addFile(path)\n download_path = SparkFiles.get(\"hello.txt\")\n self.assertNotEqual(path, download_path)\n with open(download_path) as test_file:\n self.assertEqual(\"Hello World!\\n\", test_file.readline())\n\n def test_add_py_file_locally(self):\n # To ensure that we're actually testing addPyFile's effects, check that\n # this fails due to `userlibrary` not being on the Python path:\n def func():\n from userlibrary import UserClass\n self.assertRaises(ImportError, func)\n path = os.path.join(SPARK_HOME, \"python/test_support/userlibrary.py\")\n self.sc.addPyFile(path)\n from userlibrary import UserClass\n self.assertEqual(\"Hello World!\", UserClass().hello())\n\n def test_add_egg_file_locally(self):\n # To ensure that we're actually testing addPyFile's effects, check that\n # this fails due to `userlibrary` not being on the Python path:\n def func():\n from userlib import UserClass\n self.assertRaises(ImportError, func)\n path = os.path.join(SPARK_HOME, \"python/test_support/userlib-0.1.zip\")\n self.sc.addPyFile(path)\n from userlib import UserClass\n self.assertEqual(\"Hello World from inside a package!\", UserClass().hello())\n\n def test_overwrite_system_module(self):\n self.sc.addPyFile(os.path.join(SPARK_HOME, \"python/test_support/SimpleHTTPServer.py\"))\n\n import SimpleHTTPServer\n self.assertEqual(\"My Server\", SimpleHTTPServer.__name__)\n\n def func(x):\n import SimpleHTTPServer\n return SimpleHTTPServer.__name__\n\n self.assertEqual([\"My Server\"], self.sc.parallelize(range(1)).map(func).collect())\n\n\nclass RDDTests(ReusedPySparkTestCase):\n\n def test_range(self):\n self.assertEqual(self.sc.range(1, 1).count(), 0)\n self.assertEqual(self.sc.range(1, 0, -1).count(), 1)\n self.assertEqual(self.sc.range(0, 1 << 40, 1 << 39).count(), 2)\n\n def test_id(self):\n rdd = self.sc.parallelize(range(10))\n id = rdd.id()\n self.assertEqual(id, rdd.id())\n rdd2 = rdd.map(str).filter(bool)\n id2 = rdd2.id()\n self.assertEqual(id + 1, id2)\n self.assertEqual(id2, rdd2.id())\n\n def test_empty_rdd(self):\n rdd = self.sc.emptyRDD()\n self.assertTrue(rdd.isEmpty())\n\n def test_sum(self):\n self.assertEqual(0, self.sc.emptyRDD().sum())\n self.assertEqual(6, self.sc.parallelize([1, 2, 3]).sum())\n\n def test_save_as_textfile_with_unicode(self):\n # Regression test for SPARK-970\n x = u\"\\u00A1Hola, mundo!\"\n data = self.sc.parallelize([x])\n tempFile = tempfile.NamedTemporaryFile(delete=True)\n tempFile.close()\n data.saveAsTextFile(tempFile.name)\n raw_contents = b''.join(open(p, 'rb').read()\n for p in glob(tempFile.name + \"/part-0000*\"))\n self.assertEqual(x, raw_contents.strip().decode(\"utf-8\"))\n\n def test_save_as_textfile_with_utf8(self):\n x = u\"\\u00A1Hola, mundo!\"\n data = self.sc.parallelize([x.encode(\"utf-8\")])\n tempFile = tempfile.NamedTemporaryFile(delete=True)\n tempFile.close()\n data.saveAsTextFile(tempFile.name)\n raw_contents = b''.join(open(p, 'rb').read()\n for p in glob(tempFile.name + \"/part-0000*\"))\n self.assertEqual(x, raw_contents.strip().decode('utf8'))\n\n def test_transforming_cartesian_result(self):\n # Regression test for SPARK-1034\n rdd1 = self.sc.parallelize([1, 2])\n rdd2 = self.sc.parallelize([3, 4])\n cart = rdd1.cartesian(rdd2)\n result = cart.map(lambda x_y3: x_y3[0] + x_y3[1]).collect()\n\n def test_transforming_pickle_file(self):\n # Regression test for SPARK-2601\n data = self.sc.parallelize([u\"Hello\", u\"World!\"])\n tempFile = tempfile.NamedTemporaryFile(delete=True)\n tempFile.close()\n data.saveAsPickleFile(tempFile.name)\n pickled_file = self.sc.pickleFile(tempFile.name)\n pickled_file.map(lambda x: x).collect()\n\n def test_cartesian_on_textfile(self):\n # Regression test for\n path = os.path.join(SPARK_HOME, \"python/test_support/hello.txt\")\n a = self.sc.textFile(path)\n result = a.cartesian(a).collect()\n (x, y) = result[0]\n self.assertEqual(u\"Hello World!\", x.strip())\n self.assertEqual(u\"Hello World!\", y.strip())\n\n def test_deleting_input_files(self):\n # Regression test for SPARK-1025\n tempFile = tempfile.NamedTemporaryFile(delete=False)\n tempFile.write(b\"Hello World!\")\n tempFile.close()\n data = self.sc.textFile(tempFile.name)\n filtered_data = data.filter(lambda x: True)\n self.assertEqual(1, filtered_data.count())\n os.unlink(tempFile.name)\n with QuietTest(self.sc):\n self.assertRaises(Exception, lambda: filtered_data.count())\n\n def test_sampling_default_seed(self):\n # Test for SPARK-3995 (default seed setting)\n data = self.sc.parallelize(xrange(1000), 1)\n subset = data.takeSample(False, 10)\n self.assertEqual(len(subset), 10)\n\n def test_aggregate_mutable_zero_value(self):\n # Test for SPARK-9021; uses aggregate and treeAggregate to build dict\n # representing a counter of ints\n # NOTE: dict is used instead of collections.Counter for Python 2.6\n # compatibility\n from collections import defaultdict\n\n # Show that single or multiple partitions work\n data1 = self.sc.range(10, numSlices=1)\n data2 = self.sc.range(10, numSlices=2)\n\n def seqOp(x, y):\n x[y] += 1\n return x\n\n def comboOp(x, y):\n for key, val in y.items():\n x[key] += val\n return x\n\n counts1 = data1.aggregate(defaultdict(int), seqOp, comboOp)\n counts2 = data2.aggregate(defaultdict(int), seqOp, comboOp)\n counts3 = data1.treeAggregate(defaultdict(int), seqOp, comboOp, 2)\n counts4 = data2.treeAggregate(defaultdict(int), seqOp, comboOp, 2)\n\n ground_truth = defaultdict(int, dict((i, 1) for i in range(10)))\n self.assertEqual(counts1, ground_truth)\n self.assertEqual(counts2, ground_truth)\n self.assertEqual(counts3, ground_truth)\n self.assertEqual(counts4, ground_truth)\n\n def test_aggregate_by_key_mutable_zero_value(self):\n # Test for SPARK-9021; uses aggregateByKey to make a pair RDD that\n # contains lists of all values for each key in the original RDD\n\n # list(range(...)) for Python 3.x compatibility (can't use * operator\n # on a range object)\n # list(zip(...)) for Python 3.x compatibility (want to parallelize a\n # collection, not a zip object)\n tuples = list(zip(list(range(10))*2, [1]*20))\n # Show that single or multiple partitions work\n data1 = self.sc.parallelize(tuples, 1)\n data2 = self.sc.parallelize(tuples, 2)\n\n def seqOp(x, y):\n x.append(y)\n return x\n\n def comboOp(x, y):\n x.extend(y)\n return x\n\n values1 = data1.aggregateByKey([], seqOp, comboOp).collect()\n values2 = data2.aggregateByKey([], seqOp, comboOp).collect()\n # Sort lists to ensure clean comparison with ground_truth\n values1.sort()\n values2.sort()\n\n ground_truth = [(i, [1]*2) for i in range(10)]\n self.assertEqual(values1, ground_truth)\n self.assertEqual(values2, ground_truth)\n\n def test_fold_mutable_zero_value(self):\n # Test for SPARK-9021; uses fold to merge an RDD of dict counters into\n # a single dict\n # NOTE: dict is used instead of collections.Counter for Python 2.6\n # compatibility\n from collections import defaultdict\n\n counts1 = defaultdict(int, dict((i, 1) for i in range(10)))\n counts2 = defaultdict(int, dict((i, 1) for i in range(3, 8)))\n counts3 = defaultdict(int, dict((i, 1) for i in range(4, 7)))\n counts4 = defaultdict(int, dict((i, 1) for i in range(5, 6)))\n all_counts = [counts1, counts2, counts3, counts4]\n # Show that single or multiple partitions work\n data1 = self.sc.parallelize(all_counts, 1)\n data2 = self.sc.parallelize(all_counts, 2)\n\n def comboOp(x, y):\n for key, val in y.items():\n x[key] += val\n return x\n\n fold1 = data1.fold(defaultdict(int), comboOp)\n fold2 = data2.fold(defaultdict(int), comboOp)\n\n ground_truth = defaultdict(int)\n for counts in all_counts:\n for key, val in counts.items():\n ground_truth[key] += val\n self.assertEqual(fold1, ground_truth)\n self.assertEqual(fold2, ground_truth)\n\n def test_fold_by_key_mutable_zero_value(self):\n # Test for SPARK-9021; uses foldByKey to make a pair RDD that contains\n # lists of all values for each key in the original RDD\n\n tuples = [(i, range(i)) for i in range(10)]*2\n # Show that single or multiple partitions work\n data1 = self.sc.parallelize(tuples, 1)\n data2 = self.sc.parallelize(tuples, 2)\n\n def comboOp(x, y):\n x.extend(y)\n return x\n\n values1 = data1.foldByKey([], comboOp).collect()\n values2 = data2.foldByKey([], comboOp).collect()\n # Sort lists to ensure clean comparison with ground_truth\n values1.sort()\n values2.sort()\n\n # list(range(...)) for Python 3.x compatibility\n ground_truth = [(i, list(range(i))*2) for i in range(10)]\n self.assertEqual(values1, ground_truth)\n self.assertEqual(values2, ground_truth)\n\n def test_aggregate_by_key(self):\n data = self.sc.parallelize([(1, 1), (1, 1), (3, 2), (5, 1), (5, 3)], 2)\n\n def seqOp(x, y):\n x.add(y)\n return x\n\n def combOp(x, y):\n x |= y\n return x\n\n sets = dict(data.aggregateByKey(set(), seqOp, combOp).collect())\n self.assertEqual(3, len(sets))\n self.assertEqual(set([1]), sets[1])\n self.assertEqual(set([2]), sets[3])\n self.assertEqual(set([1, 3]), sets[5])\n\n def test_itemgetter(self):\n rdd = self.sc.parallelize([range(10)])\n from operator import itemgetter\n self.assertEqual([1], rdd.map(itemgetter(1)).collect())\n self.assertEqual([(2, 3)], rdd.map(itemgetter(2, 3)).collect())\n\n def test_namedtuple_in_rdd(self):\n from collections import namedtuple\n Person = namedtuple(\"Person\", \"id firstName lastName\")\n jon = Person(1, \"Jon\", \"Doe\")\n jane = Person(2, \"Jane\", \"Doe\")\n theDoes = self.sc.parallelize([jon, jane])\n self.assertEqual([jon, jane], theDoes.collect())\n\n def test_large_broadcast(self):\n N = 10000\n data = [[float(i) for i in range(300)] for i in range(N)]\n bdata = self.sc.broadcast(data) # 27MB\n m = self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()\n self.assertEqual(N, m)\n\n def test_multiple_broadcasts(self):\n N = 1 << 21\n b1 = self.sc.broadcast(set(range(N))) # multiple blocks in JVM\n r = list(range(1 << 15))\n random.shuffle(r)\n s = str(r).encode()\n checksum = hashlib.md5(s).hexdigest()\n b2 = self.sc.broadcast(s)\n r = list(set(self.sc.parallelize(range(10), 10).map(\n lambda x: (len(b1.value), hashlib.md5(b2.value).hexdigest())).collect()))\n self.assertEqual(1, len(r))\n size, csum = r[0]\n self.assertEqual(N, size)\n self.assertEqual(checksum, csum)\n\n random.shuffle(r)\n s = str(r).encode()\n checksum = hashlib.md5(s).hexdigest()\n b2 = self.sc.broadcast(s)\n r = list(set(self.sc.parallelize(range(10), 10).map(\n lambda x: (len(b1.value), hashlib.md5(b2.value).hexdigest())).collect()))\n self.assertEqual(1, len(r))\n size, csum = r[0]\n self.assertEqual(N, size)\n self.assertEqual(checksum, csum)\n\n def test_large_closure(self):\n N = 200000\n data = [float(i) for i in xrange(N)]\n rdd = self.sc.parallelize(range(1), 1).map(lambda x: len(data))\n self.assertEqual(N, rdd.first())\n # regression test for SPARK-6886\n self.assertEqual(1, rdd.map(lambda x: (x, 1)).groupByKey().count())\n\n def test_zip_with_different_serializers(self):\n a = self.sc.parallelize(range(5))\n b = self.sc.parallelize(range(100, 105))\n self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])\n a = a._reserialize(BatchedSerializer(PickleSerializer(), 2))\n b = b._reserialize(MarshalSerializer())\n self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])\n # regression test for SPARK-4841\n path = os.path.join(SPARK_HOME, \"python/test_support/hello.txt\")\n t = self.sc.textFile(path)\n cnt = t.count()\n self.assertEqual(cnt, t.zip(t).count())\n rdd = t.map(str)\n self.assertEqual(cnt, t.zip(rdd).count())\n # regression test for bug in _reserializer()\n self.assertEqual(cnt, t.zip(rdd).count())\n\n def test_zip_with_different_object_sizes(self):\n # regress test for SPARK-5973\n a = self.sc.parallelize(xrange(10000)).map(lambda i: '*' * i)\n b = self.sc.parallelize(xrange(10000, 20000)).map(lambda i: '*' * i)\n self.assertEqual(10000, a.zip(b).count())\n\n def test_zip_with_different_number_of_items(self):\n a = self.sc.parallelize(range(5), 2)\n # different number of partitions\n b = self.sc.parallelize(range(100, 106), 3)\n self.assertRaises(ValueError, lambda: a.zip(b))\n with QuietTest(self.sc):\n # different number of batched items in JVM\n b = self.sc.parallelize(range(100, 104), 2)\n self.assertRaises(Exception, lambda: a.zip(b).count())\n # different number of items in one pair\n b = self.sc.parallelize(range(100, 106), 2)\n self.assertRaises(Exception, lambda: a.zip(b).count())\n # same total number of items, but different distributions\n a = self.sc.parallelize([2, 3], 2).flatMap(range)\n b = self.sc.parallelize([3, 2], 2).flatMap(range)\n self.assertEqual(a.count(), b.count())\n self.assertRaises(Exception, lambda: a.zip(b).count())\n\n def test_count_approx_distinct(self):\n rdd = self.sc.parallelize(xrange(1000))\n self.assertTrue(950 < rdd.countApproxDistinct(0.03) < 1050)\n self.assertTrue(950 < rdd.map(float).countApproxDistinct(0.03) < 1050)\n self.assertTrue(950 < rdd.map(str).countApproxDistinct(0.03) < 1050)\n self.assertTrue(950 < rdd.map(lambda x: (x, -x)).countApproxDistinct(0.03) < 1050)\n\n rdd = self.sc.parallelize([i % 20 for i in range(1000)], 7)\n self.assertTrue(18 < rdd.countApproxDistinct() < 22)\n self.assertTrue(18 < rdd.map(float).countApproxDistinct() < 22)\n self.assertTrue(18 < rdd.map(str).countApproxDistinct() < 22)\n self.assertTrue(18 < rdd.map(lambda x: (x, -x)).countApproxDistinct() < 22)\n\n self.assertRaises(ValueError, lambda: rdd.countApproxDistinct(0.00000001))\n\n def test_histogram(self):\n # empty\n rdd = self.sc.parallelize([])\n self.assertEqual([0], rdd.histogram([0, 10])[1])\n self.assertEqual([0, 0], rdd.histogram([0, 4, 10])[1])\n self.assertRaises(ValueError, lambda: rdd.histogram(1))\n\n # out of range\n rdd = self.sc.parallelize([10.01, -0.01])\n self.assertEqual([0], rdd.histogram([0, 10])[1])\n self.assertEqual([0, 0], rdd.histogram((0, 4, 10))[1])\n\n # in range with one bucket\n rdd = self.sc.parallelize(range(1, 5))\n self.assertEqual([4], rdd.histogram([0, 10])[1])\n self.assertEqual([3, 1], rdd.histogram([0, 4, 10])[1])\n\n # in range with one bucket exact match\n self.assertEqual([4], rdd.histogram([1, 4])[1])\n\n # out of range with two buckets\n rdd = self.sc.parallelize([10.01, -0.01])\n self.assertEqual([0, 0], rdd.histogram([0, 5, 10])[1])\n\n # out of range with two uneven buckets\n rdd = self.sc.parallelize([10.01, -0.01])\n self.assertEqual([0, 0], rdd.histogram([0, 4, 10])[1])\n\n # in range with two buckets\n rdd = self.sc.parallelize([1, 2, 3, 5, 6])\n self.assertEqual([3, 2], rdd.histogram([0, 5, 10])[1])\n\n # in range with two bucket and None\n rdd = self.sc.parallelize([1, 2, 3, 5, 6, None, float('nan')])\n self.assertEqual([3, 2], rdd.histogram([0, 5, 10])[1])\n\n # in range with two uneven buckets\n rdd = self.sc.parallelize([1, 2, 3, 5, 6])\n self.assertEqual([3, 2], rdd.histogram([0, 5, 11])[1])\n\n # mixed range with two uneven buckets\n rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.0, 11.01])\n self.assertEqual([4, 3], rdd.histogram([0, 5, 11])[1])\n\n # mixed range with four uneven buckets\n rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0, 199.0, 200.0, 200.1])\n self.assertEqual([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])\n\n # mixed range with uneven buckets and NaN\n rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0,\n 199.0, 200.0, 200.1, None, float('nan')])\n self.assertEqual([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])\n\n # out of range with infinite buckets\n rdd = self.sc.parallelize([10.01, -0.01, float('nan'), float(\"inf\")])\n self.assertEqual([1, 2], rdd.histogram([float('-inf'), 0, float('inf')])[1])\n\n # invalid buckets\n self.assertRaises(ValueError, lambda: rdd.histogram([]))\n self.assertRaises(ValueError, lambda: rdd.histogram([1]))\n self.assertRaises(ValueError, lambda: rdd.histogram(0))\n self.assertRaises(TypeError, lambda: rdd.histogram({}))\n\n # without buckets\n rdd = self.sc.parallelize(range(1, 5))\n self.assertEqual(([1, 4], [4]), rdd.histogram(1))\n\n # without buckets single element\n rdd = self.sc.parallelize([1])\n self.assertEqual(([1, 1], [1]), rdd.histogram(1))\n\n # without bucket no range\n rdd = self.sc.parallelize([1] * 4)\n self.assertEqual(([1, 1], [4]), rdd.histogram(1))\n\n # without buckets basic two\n rdd = self.sc.parallelize(range(1, 5))\n self.assertEqual(([1, 2.5, 4], [2, 2]), rdd.histogram(2))\n\n # without buckets with more requested than elements\n rdd = self.sc.parallelize([1, 2])\n buckets = [1 + 0.2 * i for i in range(6)]\n hist = [1, 0, 0, 0, 1]\n self.assertEqual((buckets, hist), rdd.histogram(5))\n\n # invalid RDDs\n rdd = self.sc.parallelize([1, float('inf')])\n self.assertRaises(ValueError, lambda: rdd.histogram(2))\n rdd = self.sc.parallelize([float('nan')])\n self.assertRaises(ValueError, lambda: rdd.histogram(2))\n\n # string\n rdd = self.sc.parallelize([\"ab\", \"ac\", \"b\", \"bd\", \"ef\"], 2)\n self.assertEqual([2, 2], rdd.histogram([\"a\", \"b\", \"c\"])[1])\n self.assertEqual(([\"ab\", \"ef\"], [5]), rdd.histogram(1))\n self.assertRaises(TypeError, lambda: rdd.histogram(2))\n\n def test_repartitionAndSortWithinPartitions(self):\n rdd = self.sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)], 2)\n\n repartitioned = rdd.repartitionAndSortWithinPartitions(2, lambda key: key % 2)\n partitions = repartitioned.glom().collect()\n self.assertEqual(partitions[0], [(0, 5), (0, 8), (2, 6)])\n self.assertEqual(partitions[1], [(1, 3), (3, 8), (3, 8)])\n\n def test_distinct(self):\n rdd = self.sc.parallelize((1, 2, 3)*10, 10)\n self.assertEqual(rdd.getNumPartitions(), 10)\n self.assertEqual(rdd.distinct().count(), 3)\n result = rdd.distinct(5)\n self.assertEqual(result.getNumPartitions(), 5)\n self.assertEqual(result.count(), 3)\n\n def test_external_group_by_key(self):\n self.sc._conf.set(\"spark.python.worker.memory\", \"1m\")\n N = 200001\n kv = self.sc.parallelize(xrange(N)).map(lambda x: (x % 3, x))\n gkv = kv.groupByKey().cache()\n self.assertEqual(3, gkv.count())\n filtered = gkv.filter(lambda kv: kv[0] == 1)\n self.assertEqual(1, filtered.count())\n self.assertEqual([(1, N // 3)], filtered.mapValues(len).collect())\n self.assertEqual([(N // 3, N // 3)],\n filtered.values().map(lambda x: (len(x), len(list(x)))).collect())\n result = filtered.collect()[0][1]\n self.assertEqual(N // 3, len(result))\n self.assertTrue(isinstance(result.data, shuffle.ExternalListOfList))\n\n def test_sort_on_empty_rdd(self):\n self.assertEqual([], self.sc.parallelize(zip([], [])).sortByKey().collect())\n\n def test_sample(self):\n rdd = self.sc.parallelize(range(0, 100), 4)\n wo = rdd.sample(False, 0.1, 2).collect()\n wo_dup = rdd.sample(False, 0.1, 2).collect()\n self.assertSetEqual(set(wo), set(wo_dup))\n wr = rdd.sample(True, 0.2, 5).collect()\n wr_dup = rdd.sample(True, 0.2, 5).collect()\n self.assertSetEqual(set(wr), set(wr_dup))\n wo_s10 = rdd.sample(False, 0.3, 10).collect()\n wo_s20 = rdd.sample(False, 0.3, 20).collect()\n self.assertNotEqual(set(wo_s10), set(wo_s20))\n wr_s11 = rdd.sample(True, 0.4, 11).collect()\n wr_s21 = rdd.sample(True, 0.4, 21).collect()\n self.assertNotEqual(set(wr_s11), set(wr_s21))\n\n def test_null_in_rdd(self):\n jrdd = self.sc._jvm.PythonUtils.generateRDDWithNull(self.sc._jsc)\n rdd = RDD(jrdd, self.sc, UTF8Deserializer())\n self.assertEqual([u\"a\", None, u\"b\"], rdd.collect())\n rdd = RDD(jrdd, self.sc, NoOpSerializer())\n self.assertEqual([b\"a\", None, b\"b\"], rdd.collect())\n\n def test_multiple_python_java_RDD_conversions(self):\n # Regression test for SPARK-5361\n data = [\n (u'1', {u'director': u'David Lean'}),\n (u'2', {u'director': u'Andrew Dominik'})\n ]\n data_rdd = self.sc.parallelize(data)\n data_java_rdd = data_rdd._to_java_object_rdd()\n data_python_rdd = self.sc._jvm.SerDe.javaToPython(data_java_rdd)\n converted_rdd = RDD(data_python_rdd, self.sc)\n self.assertEqual(2, converted_rdd.count())\n\n # conversion between python and java RDD threw exceptions\n data_java_rdd = converted_rdd._to_java_object_rdd()\n data_python_rdd = self.sc._jvm.SerDe.javaToPython(data_java_rdd)\n converted_rdd = RDD(data_python_rdd, self.sc)\n self.assertEqual(2, converted_rdd.count())\n\n def test_narrow_dependency_in_join(self):\n rdd = self.sc.parallelize(range(10)).map(lambda x: (x, x))\n parted = rdd.partitionBy(2)\n self.assertEqual(2, parted.union(parted).getNumPartitions())\n self.assertEqual(rdd.getNumPartitions() + 2, parted.union(rdd).getNumPartitions())\n self.assertEqual(rdd.getNumPartitions() + 2, rdd.union(parted).getNumPartitions())\n\n tracker = self.sc.statusTracker()\n\n self.sc.setJobGroup(\"test1\", \"test\", True)\n d = sorted(parted.join(parted).collect())\n self.assertEqual(10, len(d))\n self.assertEqual((0, (0, 0)), d[0])\n jobId = tracker.getJobIdsForGroup(\"test1\")[0]\n self.assertEqual(2, len(tracker.getJobInfo(jobId).stageIds))\n\n self.sc.setJobGroup(\"test2\", \"test\", True)\n d = sorted(parted.join(rdd).collect())\n self.assertEqual(10, len(d))\n self.assertEqual((0, (0, 0)), d[0])\n jobId = tracker.getJobIdsForGroup(\"test2\")[0]\n self.assertEqual(3, len(tracker.getJobInfo(jobId).stageIds))\n\n self.sc.setJobGroup(\"test3\", \"test\", True)\n d = sorted(parted.cogroup(parted).collect())\n self.assertEqual(10, len(d))\n self.assertEqual([[0], [0]], list(map(list, d[0][1])))\n jobId = tracker.getJobIdsForGroup(\"test3\")[0]\n self.assertEqual(2, len(tracker.getJobInfo(jobId).stageIds))\n\n self.sc.setJobGroup(\"test4\", \"test\", True)\n d = sorted(parted.cogroup(rdd).collect())\n self.assertEqual(10, len(d))\n self.assertEqual([[0], [0]], list(map(list, d[0][1])))\n jobId = tracker.getJobIdsForGroup(\"test4\")[0]\n self.assertEqual(3, len(tracker.getJobInfo(jobId).stageIds))\n\n # Regression test for SPARK-6294\n def test_take_on_jrdd(self):\n rdd = self.sc.parallelize(xrange(1 << 20)).map(lambda x: str(x))\n rdd._jrdd.first()\n\n def test_sortByKey_uses_all_partitions_not_only_first_and_last(self):\n # Regression test for SPARK-5969\n seq = [(i * 59 % 101, i) for i in range(101)] # unsorted sequence\n rdd = self.sc.parallelize(seq)\n for ascending in [True, False]:\n sort = rdd.sortByKey(ascending=ascending, numPartitions=5)\n self.assertEqual(sort.collect(), sorted(seq, reverse=not ascending))\n sizes = sort.glom().map(len).collect()\n for size in sizes:\n self.assertGreater(size, 0)\n\n\nclass ProfilerTests(PySparkTestCase):\n\n def setUp(self):\n self._old_sys_path = list(sys.path)\n class_name = self.__class__.__name__\n conf = SparkConf().set(\"spark.python.profile\", \"true\")\n self.sc = SparkContext('local[4]', class_name, conf=conf)\n\n def test_profiler(self):\n self.do_computation()\n\n profilers = self.sc.profiler_collector.profilers\n self.assertEqual(1, len(profilers))\n id, profiler, _ = profilers[0]\n stats = profiler.stats()\n self.assertTrue(stats is not None)\n width, stat_list = stats.get_print_list([])\n func_names = [func_name for fname, n, func_name in stat_list]\n self.assertTrue(\"heavy_foo\" in func_names)\n\n old_stdout = sys.stdout\n sys.stdout = io = StringIO()\n self.sc.show_profiles()\n self.assertTrue(\"heavy_foo\" in io.getvalue())\n sys.stdout = old_stdout\n\n d = tempfile.gettempdir()\n self.sc.dump_profiles(d)\n self.assertTrue(\"rdd_%d.pstats\" % id in os.listdir(d))\n\n def test_custom_profiler(self):\n class TestCustomProfiler(BasicProfiler):\n def show(self, id):\n self.result = \"Custom formatting\"\n\n self.sc.profiler_collector.profiler_cls = TestCustomProfiler\n\n self.do_computation()\n\n profilers = self.sc.profiler_collector.profilers\n self.assertEqual(1, len(profilers))\n _, profiler, _ = profilers[0]\n self.assertTrue(isinstance(profiler, TestCustomProfiler))\n\n self.sc.show_profiles()\n self.assertEqual(\"Custom formatting\", profiler.result)\n\n def do_computation(self):\n def heavy_foo(x):\n for i in range(1 << 18):\n x = 1\n\n rdd = self.sc.parallelize(range(100))\n rdd.foreach(heavy_foo)\n\n\nclass InputFormatTests(ReusedPySparkTestCase):\n\n @classmethod\n def setUpClass(cls):\n ReusedPySparkTestCase.setUpClass()\n cls.tempdir = tempfile.NamedTemporaryFile(delete=False)\n os.unlink(cls.tempdir.name)\n cls.sc._jvm.WriteInputFormatTestDataGenerator.generateData(cls.tempdir.name, cls.sc._jsc)\n\n @classmethod\n def tearDownClass(cls):\n ReusedPySparkTestCase.tearDownClass()\n shutil.rmtree(cls.tempdir.name)\n\n @unittest.skipIf(sys.version >= \"3\", \"serialize array of byte\")\n def test_sequencefiles(self):\n basepath = self.tempdir.name\n ints = sorted(self.sc.sequenceFile(basepath + \"/sftestdata/sfint/\",\n \"org.apache.hadoop.io.IntWritable\",\n \"org.apache.hadoop.io.Text\").collect())\n ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]\n self.assertEqual(ints, ei)\n\n doubles = sorted(self.sc.sequenceFile(basepath + \"/sftestdata/sfdouble/\",\n \"org.apache.hadoop.io.DoubleWritable\",\n \"org.apache.hadoop.io.Text\").collect())\n ed = [(1.0, u'aa'), (1.0, u'aa'), (2.0, u'aa'), (2.0, u'bb'), (2.0, u'bb'), (3.0, u'cc')]\n self.assertEqual(doubles, ed)\n\n bytes = sorted(self.sc.sequenceFile(basepath + \"/sftestdata/sfbytes/\",\n \"org.apache.hadoop.io.IntWritable\",\n \"org.apache.hadoop.io.BytesWritable\").collect())\n ebs = [(1, bytearray('aa', 'utf-8')),\n (1, bytearray('aa', 'utf-8')),\n (2, bytearray('aa', 'utf-8')),\n (2, bytearray('bb', 'utf-8')),\n (2, bytearray('bb', 'utf-8')),\n (3, bytearray('cc', 'utf-8'))]\n self.assertEqual(bytes, ebs)\n\n text = sorted(self.sc.sequenceFile(basepath + \"/sftestdata/sftext/\",\n \"org.apache.hadoop.io.Text\",\n \"org.apache.hadoop.io.Text\").collect())\n et = [(u'1', u'aa'),\n (u'1', u'aa'),\n (u'2', u'aa'),\n (u'2', u'bb'),\n (u'2', u'bb'),\n (u'3', u'cc')]\n self.assertEqual(text, et)\n\n bools = sorted(self.sc.sequenceFile(basepath + \"/sftestdata/sfbool/\",\n \"org.apache.hadoop.io.IntWritable\",\n \"org.apache.hadoop.io.BooleanWritable\").collect())\n eb = [(1, False), (1, True), (2, False), (2, False), (2, True), (3, True)]\n self.assertEqual(bools, eb)\n\n nulls = sorted(self.sc.sequenceFile(basepath + \"/sftestdata/sfnull/\",\n \"org.apache.hadoop.io.IntWritable\",\n \"org.apache.hadoop.io.BooleanWritable\").collect())\n en = [(1, None), (1, None), (2, None), (2, None), (2, None), (3, None)]\n self.assertEqual(nulls, en)\n\n maps = self.sc.sequenceFile(basepath + \"/sftestdata/sfmap/\",\n \"org.apache.hadoop.io.IntWritable\",\n \"org.apache.hadoop.io.MapWritable\").collect()\n em = [(1, {}),\n (1, {3.0: u'bb'}),\n (2, {1.0: u'aa'}),\n (2, {1.0: u'cc'}),\n (3, {2.0: u'dd'})]\n for v in maps:\n self.assertTrue(v in em)\n\n # arrays get pickled to tuples by default\n tuples = sorted(self.sc.sequenceFile(\n basepath + \"/sftestdata/sfarray/\",\n \"org.apache.hadoop.io.IntWritable\",\n \"org.apache.spark.api.python.DoubleArrayWritable\").collect())\n et = [(1, ()),\n (2, (3.0, 4.0, 5.0)),\n (3, (4.0, 5.0, 6.0))]\n self.assertEqual(tuples, et)\n\n # with custom converters, primitive arrays can stay as arrays\n arrays = sorted(self.sc.sequenceFile(\n basepath + \"/sftestdata/sfarray/\",\n \"org.apache.hadoop.io.IntWritable\",\n \"org.apache.spark.api.python.DoubleArrayWritable\",\n valueConverter=\"org.apache.spark.api.python.WritableToDoubleArrayConverter\").collect())\n ea = [(1, array('d')),\n (2, array('d', [3.0, 4.0, 5.0])),\n (3, array('d', [4.0, 5.0, 6.0]))]\n self.assertEqual(arrays, ea)\n\n clazz = sorted(self.sc.sequenceFile(basepath + \"/sftestdata/sfclass/\",\n \"org.apache.hadoop.io.Text\",\n \"org.apache.spark.api.python.TestWritable\").collect())\n cname = u'org.apache.spark.api.python.TestWritable'\n ec = [(u'1', {u'__class__': cname, u'double': 1.0, u'int': 1, u'str': u'test1'}),\n (u'2', {u'__class__': cname, u'double': 2.3, u'int': 2, u'str': u'test2'}),\n (u'3', {u'__class__': cname, u'double': 3.1, u'int': 3, u'str': u'test3'}),\n (u'4', {u'__class__': cname, u'double': 4.2, u'int': 4, u'str': u'test4'}),\n (u'5', {u'__class__': cname, u'double': 5.5, u'int': 5, u'str': u'test56'})]\n self.assertEqual(clazz, ec)\n\n unbatched_clazz = sorted(self.sc.sequenceFile(basepath + \"/sftestdata/sfclass/\",\n \"org.apache.hadoop.io.Text\",\n \"org.apache.spark.api.python.TestWritable\",\n ).collect())\n self.assertEqual(unbatched_clazz, ec)\n\n def test_oldhadoop(self):\n basepath = self.tempdir.name\n ints = sorted(self.sc.hadoopFile(basepath + \"/sftestdata/sfint/\",\n \"org.apache.hadoop.mapred.SequenceFileInputFormat\",\n \"org.apache.hadoop.io.IntWritable\",\n \"org.apache.hadoop.io.Text\").collect())\n ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]\n self.assertEqual(ints, ei)\n\n hellopath = os.path.join(SPARK_HOME, \"python/test_support/hello.txt\")\n oldconf = {\"mapred.input.dir\": hellopath}\n hello = self.sc.hadoopRDD(\"org.apache.hadoop.mapred.TextInputFormat\",\n \"org.apache.hadoop.io.LongWritable\",\n \"org.apache.hadoop.io.Text\",\n conf=oldconf).collect()\n result = [(0, u'Hello World!')]\n self.assertEqual(hello, result)\n\n def test_newhadoop(self):\n basepath = self.tempdir.name\n ints = sorted(self.sc.newAPIHadoopFile(\n basepath + \"/sftestdata/sfint/\",\n \"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat\",\n \"org.apache.hadoop.io.IntWritable\",\n \"org.apache.hadoop.io.Text\").collect())\n ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]\n self.assertEqual(ints, ei)\n\n hellopath = os.path.join(SPARK_HOME, \"python/test_support/hello.txt\")\n newconf = {\"mapred.input.dir\": hellopath}\n hello = self.sc.newAPIHadoopRDD(\"org.apache.hadoop.mapreduce.lib.input.TextInputFormat\",\n \"org.apache.hadoop.io.LongWritable\",\n \"org.apache.hadoop.io.Text\",\n conf=newconf).collect()\n result = [(0, u'Hello World!')]\n self.assertEqual(hello, result)\n\n def test_newolderror(self):\n basepath = self.tempdir.name\n self.assertRaises(Exception, lambda: self.sc.hadoopFile(\n basepath + \"/sftestdata/sfint/\",\n \"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat\",\n \"org.apache.hadoop.io.IntWritable\",\n \"org.apache.hadoop.io.Text\"))\n\n self.assertRaises(Exception, lambda: self.sc.newAPIHadoopFile(\n basepath + \"/sftestdata/sfint/\",\n \"org.apache.hadoop.mapred.SequenceFileInputFormat\",\n \"org.apache.hadoop.io.IntWritable\",\n \"org.apache.hadoop.io.Text\"))\n\n def test_bad_inputs(self):\n basepath = self.tempdir.name\n self.assertRaises(Exception, lambda: self.sc.sequenceFile(\n basepath + \"/sftestdata/sfint/\",\n \"org.apache.hadoop.io.NotValidWritable\",\n \"org.apache.hadoop.io.Text\"))\n self.assertRaises(Exception, lambda: self.sc.hadoopFile(\n basepath + \"/sftestdata/sfint/\",\n \"org.apache.hadoop.mapred.NotValidInputFormat\",\n \"org.apache.hadoop.io.IntWritable\",\n \"org.apache.hadoop.io.Text\"))\n self.assertRaises(Exception, lambda: self.sc.newAPIHadoopFile(\n basepath + \"/sftestdata/sfint/\",\n \"org.apache.hadoop.mapreduce.lib.input.NotValidInputFormat\",\n \"org.apache.hadoop.io.IntWritable\",\n \"org.apache.hadoop.io.Text\"))\n\n def test_converters(self):\n # use of custom converters\n basepath = self.tempdir.name\n maps = sorted(self.sc.sequenceFile(\n basepath + \"/sftestdata/sfmap/\",\n \"org.apache.hadoop.io.IntWritable\",\n \"org.apache.hadoop.io.MapWritable\",\n keyConverter=\"org.apache.spark.api.python.TestInputKeyConverter\",\n valueConverter=\"org.apache.spark.api.python.TestInputValueConverter\").collect())\n em = [(u'\\x01', []),\n (u'\\x01', [3.0]),\n (u'\\x02', [1.0]),\n (u'\\x02', [1.0]),\n (u'\\x03', [2.0])]\n self.assertEqual(maps, em)\n\n def test_binary_files(self):\n path = os.path.join(self.tempdir.name, \"binaryfiles\")\n os.mkdir(path)\n data = b\"short binary data\"\n with open(os.path.join(path, \"part-0000\"), 'wb') as f:\n f.write(data)\n [(p, d)] = self.sc.binaryFiles(path).collect()\n self.assertTrue(p.endswith(\"part-0000\"))\n self.assertEqual(d, data)\n\n def test_binary_records(self):\n path = os.path.join(self.tempdir.name, \"binaryrecords\")\n os.mkdir(path)\n with open(os.path.join(path, \"part-0000\"), 'w') as f:\n for i in range(100):\n f.write('%04d' % i)\n result = self.sc.binaryRecords(path, 4).map(int).collect()\n self.assertEqual(list(range(100)), result)\n\n\nclass OutputFormatTests(ReusedPySparkTestCase):\n\n def setUp(self):\n self.tempdir = tempfile.NamedTemporaryFile(delete=False)\n os.unlink(self.tempdir.name)\n\n def tearDown(self):\n shutil.rmtree(self.tempdir.name, ignore_errors=True)\n\n @unittest.skipIf(sys.version >= \"3\", \"serialize array of byte\")\n def test_sequencefiles(self):\n basepath = self.tempdir.name\n ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]\n self.sc.parallelize(ei).saveAsSequenceFile(basepath + \"/sfint/\")\n ints = sorted(self.sc.sequenceFile(basepath + \"/sfint/\").collect())\n self.assertEqual(ints, ei)\n\n ed = [(1.0, u'aa'), (1.0, u'aa'), (2.0, u'aa'), (2.0, u'bb'), (2.0, u'bb'), (3.0, u'cc')]\n self.sc.parallelize(ed).saveAsSequenceFile(basepath + \"/sfdouble/\")\n doubles = sorted(self.sc.sequenceFile(basepath + \"/sfdouble/\").collect())\n self.assertEqual(doubles, ed)\n\n ebs = [(1, bytearray(b'\\x00\\x07spam\\x08')), (2, bytearray(b'\\x00\\x07spam\\x08'))]\n self.sc.parallelize(ebs).saveAsSequenceFile(basepath + \"/sfbytes/\")\n bytes = sorted(self.sc.sequenceFile(basepath + \"/sfbytes/\").collect())\n self.assertEqual(bytes, ebs)\n\n et = [(u'1', u'aa'),\n (u'2', u'bb'),\n (u'3', u'cc')]\n self.sc.parallelize(et).saveAsSequenceFile(basepath + \"/sftext/\")\n text = sorted(self.sc.sequenceFile(basepath + \"/sftext/\").collect())\n self.assertEqual(text, et)\n\n eb = [(1, False), (1, True), (2, False), (2, False), (2, True), (3, True)]\n self.sc.parallelize(eb).saveAsSequenceFile(basepath + \"/sfbool/\")\n bools = sorted(self.sc.sequenceFile(basepath + \"/sfbool/\").collect())\n self.assertEqual(bools, eb)\n\n en = [(1, None), (1, None), (2, None), (2, None), (2, None), (3, None)]\n self.sc.parallelize(en).saveAsSequenceFile(basepath + \"/sfnull/\")\n nulls = sorted(self.sc.sequenceFile(basepath + \"/sfnull/\").collect())\n self.assertEqual(nulls, en)\n\n em = [(1, {}),\n (1, {3.0: u'bb'}),\n (2, {1.0: u'aa'}),\n (2, {1.0: u'cc'}),\n (3, {2.0: u'dd'})]\n self.sc.parallelize(em).saveAsSequenceFile(basepath + \"/sfmap/\")\n maps = self.sc.sequenceFile(basepath + \"/sfmap/\").collect()\n for v in maps:\n self.assertTrue(v, em)\n\n def test_oldhadoop(self):\n basepath = self.tempdir.name\n dict_data = [(1, {}),\n (1, {\"row1\": 1.0}),\n (2, {\"row2\": 2.0})]\n self.sc.parallelize(dict_data).saveAsHadoopFile(\n basepath + \"/oldhadoop/\",\n \"org.apache.hadoop.mapred.SequenceFileOutputFormat\",\n \"org.apache.hadoop.io.IntWritable\",\n \"org.apache.hadoop.io.MapWritable\")\n result = self.sc.hadoopFile(\n basepath + \"/oldhadoop/\",\n \"org.apache.hadoop.mapred.SequenceFileInputFormat\",\n \"org.apache.hadoop.io.IntWritable\",\n \"org.apache.hadoop.io.MapWritable\").collect()\n for v in result:\n self.assertTrue(v, dict_data)\n\n conf = {\n \"mapred.output.format.class\": \"org.apache.hadoop.mapred.SequenceFileOutputFormat\",\n \"mapred.output.key.class\": \"org.apache.hadoop.io.IntWritable\",\n \"mapred.output.value.class\": \"org.apache.hadoop.io.MapWritable\",\n \"mapred.output.dir\": basepath + \"/olddataset/\"\n }\n self.sc.parallelize(dict_data).saveAsHadoopDataset(conf)\n input_conf = {\"mapred.input.dir\": basepath + \"/olddataset/\"}\n result = self.sc.hadoopRDD(\n \"org.apache.hadoop.mapred.SequenceFileInputFormat\",\n \"org.apache.hadoop.io.IntWritable\",\n \"org.apache.hadoop.io.MapWritable\",\n conf=input_conf).collect()\n for v in result:\n self.assertTrue(v, dict_data)\n\n def test_newhadoop(self):\n basepath = self.tempdir.name\n data = [(1, \"\"),\n (1, \"a\"),\n (2, \"bcdf\")]\n self.sc.parallelize(data).saveAsNewAPIHadoopFile(\n basepath + \"/newhadoop/\",\n \"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat\",\n \"org.apache.hadoop.io.IntWritable\",\n \"org.apache.hadoop.io.Text\")\n result = sorted(self.sc.newAPIHadoopFile(\n basepath + \"/newhadoop/\",\n \"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat\",\n \"org.apache.hadoop.io.IntWritable\",\n \"org.apache.hadoop.io.Text\").collect())\n self.assertEqual(result, data)\n\n conf = {\n \"mapreduce.outputformat.class\":\n \"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat\",\n \"mapred.output.key.class\": \"org.apache.hadoop.io.IntWritable\",\n \"mapred.output.value.class\": \"org.apache.hadoop.io.Text\",\n \"mapred.output.dir\": basepath + \"/newdataset/\"\n }\n self.sc.parallelize(data).saveAsNewAPIHadoopDataset(conf)\n input_conf = {\"mapred.input.dir\": basepath + \"/newdataset/\"}\n new_dataset = sorted(self.sc.newAPIHadoopRDD(\n \"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat\",\n \"org.apache.hadoop.io.IntWritable\",\n \"org.apache.hadoop.io.Text\",\n conf=input_conf).collect())\n self.assertEqual(new_dataset, data)\n\n @unittest.skipIf(sys.version >= \"3\", \"serialize of array\")\n def test_newhadoop_with_array(self):\n basepath = self.tempdir.name\n # use custom ArrayWritable types and converters to handle arrays\n array_data = [(1, array('d')),\n (1, array('d', [1.0, 2.0, 3.0])),\n (2, array('d', [3.0, 4.0, 5.0]))]\n self.sc.parallelize(array_data).saveAsNewAPIHadoopFile(\n basepath + \"/newhadoop/\",\n \"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat\",\n \"org.apache.hadoop.io.IntWritable\",\n \"org.apache.spark.api.python.DoubleArrayWritable\",\n valueConverter=\"org.apache.spark.api.python.DoubleArrayToWritableConverter\")\n result = sorted(self.sc.newAPIHadoopFile(\n basepath + \"/newhadoop/\",\n \"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat\",\n \"org.apache.hadoop.io.IntWritable\",\n \"org.apache.spark.api.python.DoubleArrayWritable\",\n valueConverter=\"org.apache.spark.api.python.WritableToDoubleArrayConverter\").collect())\n self.assertEqual(result, array_data)\n\n conf = {\n \"mapreduce.outputformat.class\":\n \"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat\",\n \"mapred.output.key.class\": \"org.apache.hadoop.io.IntWritable\",\n \"mapred.output.value.class\": \"org.apache.spark.api.python.DoubleArrayWritable\",\n \"mapred.output.dir\": basepath + \"/newdataset/\"\n }\n self.sc.parallelize(array_data).saveAsNewAPIHadoopDataset(\n conf,\n valueConverter=\"org.apache.spark.api.python.DoubleArrayToWritableConverter\")\n input_conf = {\"mapred.input.dir\": basepath + \"/newdataset/\"}\n new_dataset = sorted(self.sc.newAPIHadoopRDD(\n \"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat\",\n \"org.apache.hadoop.io.IntWritable\",\n \"org.apache.spark.api.python.DoubleArrayWritable\",\n valueConverter=\"org.apache.spark.api.python.WritableToDoubleArrayConverter\",\n conf=input_conf).collect())\n self.assertEqual(new_dataset, array_data)\n\n def test_newolderror(self):\n basepath = self.tempdir.name\n rdd = self.sc.parallelize(range(1, 4)).map(lambda x: (x, \"a\" * x))\n self.assertRaises(Exception, lambda: rdd.saveAsHadoopFile(\n basepath + \"/newolderror/saveAsHadoopFile/\",\n \"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat\"))\n self.assertRaises(Exception, lambda: rdd.saveAsNewAPIHadoopFile(\n basepath + \"/newolderror/saveAsNewAPIHadoopFile/\",\n \"org.apache.hadoop.mapred.SequenceFileOutputFormat\"))\n\n def test_bad_inputs(self):\n basepath = self.tempdir.name\n rdd = self.sc.parallelize(range(1, 4)).map(lambda x: (x, \"a\" * x))\n self.assertRaises(Exception, lambda: rdd.saveAsHadoopFile(\n basepath + \"/badinputs/saveAsHadoopFile/\",\n \"org.apache.hadoop.mapred.NotValidOutputFormat\"))\n self.assertRaises(Exception, lambda: rdd.saveAsNewAPIHadoopFile(\n basepath + \"/badinputs/saveAsNewAPIHadoopFile/\",\n \"org.apache.hadoop.mapreduce.lib.output.NotValidOutputFormat\"))\n\n def test_converters(self):\n # use of custom converters\n basepath = self.tempdir.name\n data = [(1, {3.0: u'bb'}),\n (2, {1.0: u'aa'}),\n (3, {2.0: u'dd'})]\n self.sc.parallelize(data).saveAsNewAPIHadoopFile(\n basepath + \"/converters/\",\n \"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat\",\n keyConverter=\"org.apache.spark.api.python.TestOutputKeyConverter\",\n valueConverter=\"org.apache.spark.api.python.TestOutputValueConverter\")\n converted = sorted(self.sc.sequenceFile(basepath + \"/converters/\").collect())\n expected = [(u'1', 3.0),\n (u'2', 1.0),\n (u'3', 2.0)]\n self.assertEqual(converted, expected)\n\n def test_reserialization(self):\n basepath = self.tempdir.name\n x = range(1, 5)\n y = range(1001, 1005)\n data = list(zip(x, y))\n rdd = self.sc.parallelize(x).zip(self.sc.parallelize(y))\n rdd.saveAsSequenceFile(basepath + \"/reserialize/sequence\")\n result1 = sorted(self.sc.sequenceFile(basepath + \"/reserialize/sequence\").collect())\n self.assertEqual(result1, data)\n\n rdd.saveAsHadoopFile(\n basepath + \"/reserialize/hadoop\",\n \"org.apache.hadoop.mapred.SequenceFileOutputFormat\")\n result2 = sorted(self.sc.sequenceFile(basepath + \"/reserialize/hadoop\").collect())\n self.assertEqual(result2, data)\n\n rdd.saveAsNewAPIHadoopFile(\n basepath + \"/reserialize/newhadoop\",\n \"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat\")\n result3 = sorted(self.sc.sequenceFile(basepath + \"/reserialize/newhadoop\").collect())\n self.assertEqual(result3, data)\n\n conf4 = {\n \"mapred.output.format.class\": \"org.apache.hadoop.mapred.SequenceFileOutputFormat\",\n \"mapred.output.key.class\": \"org.apache.hadoop.io.IntWritable\",\n \"mapred.output.value.class\": \"org.apache.hadoop.io.IntWritable\",\n \"mapred.output.dir\": basepath + \"/reserialize/dataset\"}\n rdd.saveAsHadoopDataset(conf4)\n result4 = sorted(self.sc.sequenceFile(basepath + \"/reserialize/dataset\").collect())\n self.assertEqual(result4, data)\n\n conf5 = {\"mapreduce.outputformat.class\":\n \"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat\",\n \"mapred.output.key.class\": \"org.apache.hadoop.io.IntWritable\",\n \"mapred.output.value.class\": \"org.apache.hadoop.io.IntWritable\",\n \"mapred.output.dir\": basepath + \"/reserialize/newdataset\"}\n rdd.saveAsNewAPIHadoopDataset(conf5)\n result5 = sorted(self.sc.sequenceFile(basepath + \"/reserialize/newdataset\").collect())\n self.assertEqual(result5, data)\n\n def test_malformed_RDD(self):\n basepath = self.tempdir.name\n # non-batch-serialized RDD[[(K, V)]] should be rejected\n data = [[(1, \"a\")], [(2, \"aa\")], [(3, \"aaa\")]]\n rdd = self.sc.parallelize(data, len(data))\n self.assertRaises(Exception, lambda: rdd.saveAsSequenceFile(\n basepath + \"/malformed/sequence\"))\n\n\nclass DaemonTests(unittest.TestCase):\n def connect(self, port):\n from socket import socket, AF_INET, SOCK_STREAM\n sock = socket(AF_INET, SOCK_STREAM)\n sock.connect(('127.0.0.1', port))\n # send a split index of -1 to shutdown the worker\n sock.send(b\"\\xFF\\xFF\\xFF\\xFF\")\n sock.close()\n return True\n\n def do_termination_test(self, terminator):\n from subprocess import Popen, PIPE\n from errno import ECONNREFUSED\n\n # start daemon\n daemon_path = os.path.join(os.path.dirname(__file__), \"daemon.py\")\n daemon = Popen([sys.executable, daemon_path], stdin=PIPE, stdout=PIPE)\n\n # read the port number\n port = read_int(daemon.stdout)\n\n # daemon should accept connections\n self.assertTrue(self.connect(port))\n\n # request shutdown\n terminator(daemon)\n time.sleep(1)\n\n # daemon should no longer accept connections\n try:\n self.connect(port)\n except EnvironmentError as exception:\n self.assertEqual(exception.errno, ECONNREFUSED)\n else:\n self.fail(\"Expected EnvironmentError to be raised\")\n\n def test_termination_stdin(self):\n \"\"\"Ensure that daemon and workers terminate when stdin is closed.\"\"\"\n self.do_termination_test(lambda daemon: daemon.stdin.close())\n\n def test_termination_sigterm(self):\n \"\"\"Ensure that daemon and workers terminate on SIGTERM.\"\"\"\n from signal import SIGTERM\n self.do_termination_test(lambda daemon: os.kill(daemon.pid, SIGTERM))\n\n\nclass WorkerTests(ReusedPySparkTestCase):\n def test_cancel_task(self):\n temp = tempfile.NamedTemporaryFile(delete=True)\n temp.close()\n path = temp.name\n\n def sleep(x):\n import os\n import time\n with open(path, 'w') as f:\n f.write(\"%d %d\" % (os.getppid(), os.getpid()))\n time.sleep(100)\n\n # start job in background thread\n def run():\n try:\n self.sc.parallelize(range(1), 1).foreach(sleep)\n except Exception:\n pass\n import threading\n t = threading.Thread(target=run)\n t.daemon = True\n t.start()\n\n daemon_pid, worker_pid = 0, 0\n while True:\n if os.path.exists(path):\n with open(path) as f:\n data = f.read().split(' ')\n daemon_pid, worker_pid = map(int, data)\n break\n time.sleep(0.1)\n\n # cancel jobs\n self.sc.cancelAllJobs()\n t.join()\n\n for i in range(50):\n try:\n os.kill(worker_pid, 0)\n time.sleep(0.1)\n except OSError:\n break # worker was killed\n else:\n self.fail(\"worker has not been killed after 5 seconds\")\n\n try:\n os.kill(daemon_pid, 0)\n except OSError:\n self.fail(\"daemon had been killed\")\n\n # run a normal job\n rdd = self.sc.parallelize(xrange(100), 1)\n self.assertEqual(100, rdd.map(str).count())\n\n def test_after_exception(self):\n def raise_exception(_):\n raise Exception()\n rdd = self.sc.parallelize(xrange(100), 1)\n with QuietTest(self.sc):\n self.assertRaises(Exception, lambda: rdd.foreach(raise_exception))\n self.assertEqual(100, rdd.map(str).count())\n\n def test_after_jvm_exception(self):\n tempFile = tempfile.NamedTemporaryFile(delete=False)\n tempFile.write(b\"Hello World!\")\n tempFile.close()\n data = self.sc.textFile(tempFile.name, 1)\n filtered_data = data.filter(lambda x: True)\n self.assertEqual(1, filtered_data.count())\n os.unlink(tempFile.name)\n with QuietTest(self.sc):\n self.assertRaises(Exception, lambda: filtered_data.count())\n\n rdd = self.sc.parallelize(xrange(100), 1)\n self.assertEqual(100, rdd.map(str).count())\n\n def test_accumulator_when_reuse_worker(self):\n from pyspark.accumulators import INT_ACCUMULATOR_PARAM\n acc1 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)\n self.sc.parallelize(xrange(100), 20).foreach(lambda x: acc1.add(x))\n self.assertEqual(sum(range(100)), acc1.value)\n\n acc2 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)\n self.sc.parallelize(xrange(100), 20).foreach(lambda x: acc2.add(x))\n self.assertEqual(sum(range(100)), acc2.value)\n self.assertEqual(sum(range(100)), acc1.value)\n\n def test_reuse_worker_after_take(self):\n rdd = self.sc.parallelize(xrange(100000), 1)\n self.assertEqual(0, rdd.first())\n\n def count():\n try:\n rdd.count()\n except Exception:\n pass\n\n t = threading.Thread(target=count)\n t.daemon = True\n t.start()\n t.join(5)\n self.assertTrue(not t.isAlive())\n self.assertEqual(100000, rdd.count())\n\n def test_with_different_versions_of_python(self):\n rdd = self.sc.parallelize(range(10))\n rdd.count()\n version = self.sc.pythonVer\n self.sc.pythonVer = \"2.0\"\n try:\n with QuietTest(self.sc):\n self.assertRaises(Py4JJavaError, lambda: rdd.count())\n finally:\n self.sc.pythonVer = version\n\n\nclass SparkSubmitTests(unittest.TestCase):\n\n def setUp(self):\n self.programDir = tempfile.mkdtemp()\n self.sparkSubmit = os.path.join(os.environ.get(\"SPARK_HOME\"), \"bin\", \"spark-submit\")\n\n def tearDown(self):\n shutil.rmtree(self.programDir)\n\n def createTempFile(self, name, content, dir=None):\n \"\"\"\n Create a temp file with the given name and content and return its path.\n Strips leading spaces from content up to the first '|' in each line.\n \"\"\"\n pattern = re.compile(r'^ *\\|', re.MULTILINE)\n content = re.sub(pattern, '', content.strip())\n if dir is None:\n path = os.path.join(self.programDir, name)\n else:\n os.makedirs(os.path.join(self.programDir, dir))\n path = os.path.join(self.programDir, dir, name)\n with open(path, \"w\") as f:\n f.write(content)\n return path\n\n def createFileInZip(self, name, content, ext=\".zip\", dir=None, zip_name=None):\n \"\"\"\n Create a zip archive containing a file with the given content and return its path.\n Strips leading spaces from content up to the first '|' in each line.\n \"\"\"\n pattern = re.compile(r'^ *\\|', re.MULTILINE)\n content = re.sub(pattern, '', content.strip())\n if dir is None:\n path = os.path.join(self.programDir, name + ext)\n else:\n path = os.path.join(self.programDir, dir, zip_name + ext)\n zip = zipfile.ZipFile(path, 'w')\n zip.writestr(name, content)\n zip.close()\n return path\n\n def create_spark_package(self, artifact_name):\n group_id, artifact_id, version = artifact_name.split(\":\")\n self.createTempFile(\"%s-%s.pom\" % (artifact_id, version), (\"\"\"\n |<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n |<project xmlns=\"http://maven.apache.org/POM/4.0.0\"\n | xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n | xsi:schemaLocation=\"http://maven.apache.org/POM/4.0.0\n | http://maven.apache.org/xsd/maven-4.0.0.xsd\">\n | <modelVersion>4.0.0</modelVersion>\n | <groupId>%s</groupId>\n | <artifactId>%s</artifactId>\n | <version>%s</version>\n |</project>\n \"\"\" % (group_id, artifact_id, version)).lstrip(),\n os.path.join(group_id, artifact_id, version))\n self.createFileInZip(\"%s.py\" % artifact_id, \"\"\"\n |def myfunc(x):\n | return x + 1\n \"\"\", \".jar\", os.path.join(group_id, artifact_id, version),\n \"%s-%s\" % (artifact_id, version))\n\n def test_single_script(self):\n \"\"\"Submit and test a single script file\"\"\"\n script = self.createTempFile(\"test.py\", \"\"\"\n |from pyspark import SparkContext\n |\n |sc = SparkContext()\n |print(sc.parallelize([1, 2, 3]).map(lambda x: x * 2).collect())\n \"\"\")\n proc = subprocess.Popen([self.sparkSubmit, script], stdout=subprocess.PIPE)\n out, err = proc.communicate()\n self.assertEqual(0, proc.returncode)\n self.assertIn(\"[2, 4, 6]\", out.decode('utf-8'))\n\n def test_script_with_local_functions(self):\n \"\"\"Submit and test a single script file calling a global function\"\"\"\n script = self.createTempFile(\"test.py\", \"\"\"\n |from pyspark import SparkContext\n |\n |def foo(x):\n | return x * 3\n |\n |sc = SparkContext()\n |print(sc.parallelize([1, 2, 3]).map(foo).collect())\n \"\"\")\n proc = subprocess.Popen([self.sparkSubmit, script], stdout=subprocess.PIPE)\n out, err = proc.communicate()\n self.assertEqual(0, proc.returncode)\n self.assertIn(\"[3, 6, 9]\", out.decode('utf-8'))\n\n def test_module_dependency(self):\n \"\"\"Submit and test a script with a dependency on another module\"\"\"\n script = self.createTempFile(\"test.py\", \"\"\"\n |from pyspark import SparkContext\n |from mylib import myfunc\n |\n |sc = SparkContext()\n |print(sc.parallelize([1, 2, 3]).map(myfunc).collect())\n \"\"\")\n zip = self.createFileInZip(\"mylib.py\", \"\"\"\n |def myfunc(x):\n | return x + 1\n \"\"\")\n proc = subprocess.Popen([self.sparkSubmit, \"--py-files\", zip, script],\n stdout=subprocess.PIPE)\n out, err = proc.communicate()\n self.assertEqual(0, proc.returncode)\n self.assertIn(\"[2, 3, 4]\", out.decode('utf-8'))\n\n def test_module_dependency_on_cluster(self):\n \"\"\"Submit and test a script with a dependency on another module on a cluster\"\"\"\n script = self.createTempFile(\"test.py\", \"\"\"\n |from pyspark import SparkContext\n |from mylib import myfunc\n |\n |sc = SparkContext()\n |print(sc.parallelize([1, 2, 3]).map(myfunc).collect())\n \"\"\")\n zip = self.createFileInZip(\"mylib.py\", \"\"\"\n |def myfunc(x):\n | return x + 1\n \"\"\")\n proc = subprocess.Popen([self.sparkSubmit, \"--py-files\", zip, \"--master\",\n \"local-cluster[1,1,512]\", script],\n stdout=subprocess.PIPE)\n out, err = proc.communicate()\n self.assertEqual(0, proc.returncode)\n self.assertIn(\"[2, 3, 4]\", out.decode('utf-8'))\n\n def test_package_dependency(self):\n \"\"\"Submit and test a script with a dependency on a Spark Package\"\"\"\n script = self.createTempFile(\"test.py\", \"\"\"\n |from pyspark import SparkContext\n |from mylib import myfunc\n |\n |sc = SparkContext()\n |print(sc.parallelize([1, 2, 3]).map(myfunc).collect())\n \"\"\")\n self.create_spark_package(\"a:mylib:0.1\")\n proc = subprocess.Popen([self.sparkSubmit, \"--packages\", \"a:mylib:0.1\", \"--repositories\",\n \"file:\" + self.programDir, script], stdout=subprocess.PIPE)\n out, err = proc.communicate()\n self.assertEqual(0, proc.returncode)\n self.assertIn(\"[2, 3, 4]\", out.decode('utf-8'))\n\n def test_package_dependency_on_cluster(self):\n \"\"\"Submit and test a script with a dependency on a Spark Package on a cluster\"\"\"\n script = self.createTempFile(\"test.py\", \"\"\"\n |from pyspark import SparkContext\n |from mylib import myfunc\n |\n |sc = SparkContext()\n |print(sc.parallelize([1, 2, 3]).map(myfunc).collect())\n \"\"\")\n self.create_spark_package(\"a:mylib:0.1\")\n proc = subprocess.Popen([self.sparkSubmit, \"--packages\", \"a:mylib:0.1\", \"--repositories\",\n \"file:\" + self.programDir, \"--master\",\n \"local-cluster[1,1,512]\", script], stdout=subprocess.PIPE)\n out, err = proc.communicate()\n self.assertEqual(0, proc.returncode)\n self.assertIn(\"[2, 3, 4]\", out.decode('utf-8'))\n\n def test_single_script_on_cluster(self):\n \"\"\"Submit and test a single script on a cluster\"\"\"\n script = self.createTempFile(\"test.py\", \"\"\"\n |from pyspark import SparkContext\n |\n |def foo(x):\n | return x * 2\n |\n |sc = SparkContext()\n |print(sc.parallelize([1, 2, 3]).map(foo).collect())\n \"\"\")\n # this will fail if you have different spark.executor.memory\n # in conf/spark-defaults.conf\n proc = subprocess.Popen(\n [self.sparkSubmit, \"--master\", \"local-cluster[1,1,512]\", script],\n stdout=subprocess.PIPE)\n out, err = proc.communicate()\n self.assertEqual(0, proc.returncode)\n self.assertIn(\"[2, 4, 6]\", out.decode('utf-8'))\n\n\nclass ContextTests(unittest.TestCase):\n\n def test_failed_sparkcontext_creation(self):\n # Regression test for SPARK-1550\n self.assertRaises(Exception, lambda: SparkContext(\"an-invalid-master-name\"))\n\n def test_stop(self):\n sc = SparkContext()\n self.assertNotEqual(SparkContext._active_spark_context, None)\n sc.stop()\n self.assertEqual(SparkContext._active_spark_context, None)\n\n def test_with(self):\n with SparkContext() as sc:\n self.assertNotEqual(SparkContext._active_spark_context, None)\n self.assertEqual(SparkContext._active_spark_context, None)\n\n def test_with_exception(self):\n try:\n with SparkContext() as sc:\n self.assertNotEqual(SparkContext._active_spark_context, None)\n raise Exception()\n except:\n pass\n self.assertEqual(SparkContext._active_spark_context, None)\n\n def test_with_stop(self):\n with SparkContext() as sc:\n self.assertNotEqual(SparkContext._active_spark_context, None)\n sc.stop()\n self.assertEqual(SparkContext._active_spark_context, None)\n\n def test_progress_api(self):\n with SparkContext() as sc:\n sc.setJobGroup('test_progress_api', '', True)\n rdd = sc.parallelize(range(10)).map(lambda x: time.sleep(100))\n\n def run():\n try:\n rdd.count()\n except Exception:\n pass\n t = threading.Thread(target=run)\n t.daemon = True\n t.start()\n # wait for scheduler to start\n time.sleep(1)\n\n tracker = sc.statusTracker()\n jobIds = tracker.getJobIdsForGroup('test_progress_api')\n self.assertEqual(1, len(jobIds))\n job = tracker.getJobInfo(jobIds[0])\n self.assertEqual(1, len(job.stageIds))\n stage = tracker.getStageInfo(job.stageIds[0])\n self.assertEqual(rdd.getNumPartitions(), stage.numTasks)\n\n sc.cancelAllJobs()\n t.join()\n # wait for event listener to update the status\n time.sleep(1)\n\n job = tracker.getJobInfo(jobIds[0])\n self.assertEqual('FAILED', job.status)\n self.assertEqual([], tracker.getActiveJobsIds())\n self.assertEqual([], tracker.getActiveStageIds())\n\n sc.stop()\n\n def test_startTime(self):\n with SparkContext() as sc:\n self.assertGreater(sc.startTime, 0)\n\n\[email protected](not _have_scipy, \"SciPy not installed\")\nclass SciPyTests(PySparkTestCase):\n\n \"\"\"General PySpark tests that depend on scipy \"\"\"\n\n def test_serialize(self):\n from scipy.special import gammaln\n x = range(1, 5)\n expected = list(map(gammaln, x))\n observed = self.sc.parallelize(x).map(gammaln).collect()\n self.assertEqual(expected, observed)\n\n\[email protected](not _have_numpy, \"NumPy not installed\")\nclass NumPyTests(PySparkTestCase):\n\n \"\"\"General PySpark tests that depend on numpy \"\"\"\n\n def test_statcounter_array(self):\n x = self.sc.parallelize([np.array([1.0, 1.0]), np.array([2.0, 2.0]), np.array([3.0, 3.0])])\n s = x.stats()\n self.assertSequenceEqual([2.0, 2.0], s.mean().tolist())\n self.assertSequenceEqual([1.0, 1.0], s.min().tolist())\n self.assertSequenceEqual([3.0, 3.0], s.max().tolist())\n self.assertSequenceEqual([1.0, 1.0], s.sampleStdev().tolist())\n\n\nif __name__ == \"__main__\":\n if not _have_scipy:\n print(\"NOTE: Skipping SciPy tests as it does not seem to be installed\")\n if not _have_numpy:\n print(\"NOTE: Skipping NumPy tests as it does not seem to be installed\")\n unittest.main()\n if not _have_scipy:\n print(\"NOTE: SciPy tests were skipped as it does not seem to be installed\")\n if not _have_numpy:\n print(\"NOTE: NumPy tests were skipped as it does not seem to be installed\")\n"
] |
[
[
"numpy.array"
]
] |
MarSaKi/NvEM
|
[
"a636245c96c07f3b507b69f2a9837a4ff127f4aa"
] |
[
"r2r_src/speaker.py"
] |
[
"import torch\nimport numpy as np\nfrom param import args\nimport os\nimport utils\nimport model\nimport torch.nn.functional as F\nimport time\n\n\nclass Speaker():\n env_actions = {\n 'left': (0,-1, 0), # left\n 'right': (0, 1, 0), # right\n 'up': (0, 0, 1), # up\n 'down': (0, 0,-1), # down\n 'forward': (1, 0, 0), # forward\n '<end>': (0, 0, 0), # <end>\n '<start>': (0, 0, 0), # <start>\n '<ignore>': (0, 0, 0) # <ignore>\n }\n\n def __init__(self, env, listener, tok):\n self.env = env\n self.feature_size = self.env.feature_size\n self.tok = tok\n self.tok.finalize()\n self.listener = listener\n\n # Model\n print(\"VOCAB_SIZE\", self.tok.vocab_size())\n self.encoder = model.SpeakerEncoder(self.feature_size+args.angle_feat_size, args.rnn_dim, args.dropout, bidirectional=args.bidir).cuda()\n self.decoder = model.SpeakerDecoder(self.tok.vocab_size(), args.wemb, self.tok.word_to_index['<PAD>'],\n args.rnn_dim, args.dropout).cuda()\n self.encoder_optimizer = args.optimizer(self.encoder.parameters(), lr=args.lr)\n self.decoder_optimizer = args.optimizer(self.decoder.parameters(), lr=args.lr)\n\n # Evaluation\n self.softmax_loss = torch.nn.CrossEntropyLoss(ignore_index=self.tok.word_to_index['<PAD>'])\n\n # Will be used in beam search\n self.nonreduced_softmax_loss = torch.nn.CrossEntropyLoss(\n ignore_index=self.tok.word_to_index['<PAD>'],\n size_average=False,\n reduce=False\n )\n\n def train(self, iters):\n for i in range(iters):\n self.env.reset()\n\n self.encoder_optimizer.zero_grad()\n self.decoder_optimizer.zero_grad()\n\n # t0 = time.time()\n loss = self.teacher_forcing(train=True)\n # t1 = time.time()\n # print('iter: {:0>3d}, time: {:.4f}'.format(i, t1 - t0))\n\n loss.backward()\n torch.nn.utils.clip_grad_norm(self.encoder.parameters(), 40.)\n torch.nn.utils.clip_grad_norm(self.decoder.parameters(), 40.)\n self.encoder_optimizer.step()\n self.decoder_optimizer.step()\n\n def get_insts(self, wrapper=(lambda x: x)):\n # Get the caption for all the data\n self.env.reset_epoch(shuffle=True)\n path2inst = {}\n total = self.env.size()\n for _ in wrapper(range(total // self.env.batch_size + 1)): # Guarantee that all the data are processed\n obs = self.env.reset()\n insts = self.infer_batch() # Get the insts of the result\n path_ids = [ob['path_id'] for ob in obs] # Gather the path ids\n for path_id, inst in zip(path_ids, insts):\n if path_id not in path2inst:\n path2inst[path_id] = self.tok.shrink(inst) # Shrink the words\n return path2inst\n\n def valid(self, *aargs, **kwargs):\n \"\"\"\n\n :param iters:\n :return: path2inst: path_id --> inst (the number from <bos> to <eos>)\n loss: The XE loss\n word_accu: per word accuracy\n sent_accu: per sent accuracy\n \"\"\"\n path2inst = self.get_insts(*aargs, **kwargs)\n\n # Calculate the teacher-forcing metrics\n self.env.reset_epoch(shuffle=True)\n N = 1 if args.fast_train else 3 # Set the iter to 1 if the fast_train (o.w. the problem occurs)\n metrics = np.zeros(3)\n for i in range(N):\n self.env.reset()\n metrics += np.array(self.teacher_forcing(train=False))\n metrics /= N\n\n return (path2inst, *metrics)\n\n def make_equiv_action(self, a_t, perm_obs, perm_idx=None, traj=None):\n if perm_idx is None:\n perm_idx = range(len(perm_obs))\n actions = [[]] * self.env.batch_size # batch * action_len\n max_len = 0 # for padding stop action\n for i, idx in enumerate(perm_idx):\n action = a_t[i]\n if action != -1: # -1 is the <stop> action\n select_candidate = perm_obs[i]['candidate'][action]\n src_point = perm_obs[i]['viewIndex']\n trg_point = select_candidate['pointId']\n src_level = (src_point) // 12 # The point idx started from 0\n trg_level = (trg_point) // 12\n src_heading = (src_point) % 12\n trg_heading = (trg_point) % 12\n # adjust elevation\n if trg_level > src_level:\n actions[idx] = actions[idx] + [self.env_actions['up']] * int(trg_level - src_level)\n elif trg_level < src_level:\n actions[idx] = actions[idx] + [self.env_actions['down']] * int(src_level - trg_level)\n # adjust heading\n if trg_heading > src_heading:\n dif = trg_heading - src_heading\n if dif >= 6: # turn left\n actions[idx] = actions[idx] + [self.env_actions['left']] * int(12 - dif)\n else: # turn right\n actions[idx] = actions[idx] + [self.env_actions['right']] * int(dif)\n elif trg_heading < src_heading:\n dif = src_heading - trg_heading\n if dif >=6: # turn right\n actions[idx] = actions[idx] + [self.env_actions['right']] * int(12 - dif)\n else: # turn left\n actions[idx] = actions[idx] + [self.env_actions['left']] * int(dif)\n\n actions[idx] = actions[idx] + [(select_candidate['idx'], 0, 0)]\n max_len = max(max_len, len(actions[idx]))\n\n for idx in perm_idx:\n if len(actions[idx]) < max_len:\n actions[idx] = actions[idx] + [self.env_actions['<end>']] * (max_len - len(actions[idx]))\n actions = np.array(actions, dtype = 'float32')\n \n for i in range(max_len):\n cur_actions = actions[:,i]\n cur_actions = list(cur_actions)\n cur_actions = [tuple(a) for a in cur_actions]\n self.env.env.makeActions(cur_actions)\n if traj is not None:\n state = self.env.env.sim.getState()\n for j, idx in enumerate(perm_idx):\n if cur_actions[idx] != self.env_actions['<end>']:\n traj[j]['path'].append((state[idx].location.viewpointId, state[idx].heading, state[idx].elevation))\n\n def _teacher_action(self, obs, ended, tracker=None):\n \"\"\"\n Extract teacher actions into variable.\n :param obs: The observation.\n :param ended: Whether the action seq is ended\n :return:\n \"\"\"\n a = np.zeros(len(obs), dtype=np.int64)\n for i, ob in enumerate(obs):\n if ended[i]: # Just ignore this index\n a[i] = args.ignoreid\n else:\n for k, candidate in enumerate(ob['candidate']):\n if candidate['viewpointId'] == ob['teacher']: # Next view point\n a[i] = k\n break\n else: # Stop here\n assert ob['teacher'] == ob['viewpoint'] # The teacher action should be \"STAY HERE\"\n a[i] = len(ob['candidate'])\n return torch.from_numpy(a).cuda()\n\n def _candidate_variable(self, obs, actions):\n candidate_feat = np.zeros((len(obs), self.feature_size + args.angle_feat_size), dtype=np.float32)\n for i, (ob, act) in enumerate(zip(obs, actions)):\n if act == -1: # Ignore or Stop --> Just use zero vector as the feature\n pass\n else:\n c = ob['candidate'][act]\n candidate_feat[i, :] = np.concatenate([c['visual_feat'],c['angle_feat']], -1)\n return torch.from_numpy(candidate_feat).cuda()\n\n def from_shortest_path(self, viewpoints=None, get_first_feat=False):\n \"\"\"\n :param viewpoints: [[], [], ....(batch_size)]. Only for dropout viewpoint\n :param get_first_feat: whether output the first feat\n :return:\n \"\"\"\n obs = self.env._get_obs()\n ended = np.array([False] * len(obs)) # Indices match permuation of the model, not env\n length = np.zeros(len(obs), np.int64)\n img_feats = []\n can_feats = []\n first_feat = np.zeros((len(obs), self.feature_size+args.angle_feat_size), np.float32)\n for i, ob in enumerate(obs):\n first_feat[i, -args.angle_feat_size:] = utils.angle_feature(ob['heading'], ob['elevation'])\n first_feat = torch.from_numpy(first_feat).cuda()\n while not ended.all():\n if viewpoints is not None:\n for i, ob in enumerate(obs):\n viewpoints[i].append(ob['viewpoint'])\n img_feats.append(self.listener._feature_variable(obs))\n teacher_action = self._teacher_action(obs, ended)\n teacher_action = teacher_action.cpu().numpy()\n for i, act in enumerate(teacher_action):\n if act < 0 or act == len(obs[i]['candidate']): # Ignore or Stop\n teacher_action[i] = -1 # Stop Action\n can_feats.append(self._candidate_variable(obs, teacher_action))\n self.make_equiv_action(teacher_action, obs)\n length += (1 - ended)\n ended[:] = np.logical_or(ended, (teacher_action == -1))\n obs = self.env._get_obs()\n img_feats = torch.stack(img_feats, 1).contiguous() # batch_size, max_len, 36, 2176\n can_feats = torch.stack(can_feats, 1).contiguous() # batch_size, max_len, 2176\n if get_first_feat:\n return (img_feats, can_feats, first_feat), length\n else:\n return (img_feats, can_feats), length\n\n def gt_words(self, obs):\n \"\"\"\n See \"utils.Tokenizer.encode_sentence(...)\" for \"instr_encoding\" details\n \"\"\"\n seq_tensor = np.array([ob['instr_encoding'] for ob in obs])\n return torch.from_numpy(seq_tensor).cuda()\n\n def teacher_forcing(self, train=True, features=None, insts=None, for_listener=False):\n if train:\n self.encoder.train()\n self.decoder.train()\n else:\n self.encoder.eval()\n self.decoder.eval()\n\n # Get Image Input & Encode\n if features is not None:\n # It is used in calulating the speaker score in beam-search\n assert insts is not None\n (img_feats, can_feats), lengths = features\n ctx = self.encoder(can_feats, img_feats, lengths)\n batch_size = len(lengths)\n else:\n obs = self.env._get_obs()\n batch_size = len(obs)\n (img_feats, can_feats), lengths = self.from_shortest_path() # Image Feature (from the shortest path)\n ctx = self.encoder(can_feats, img_feats, lengths)\n h_t = torch.zeros(1, batch_size, args.rnn_dim).cuda()\n c_t = torch.zeros(1, batch_size, args.rnn_dim).cuda()\n ctx_mask = utils.length2mask(lengths)\n\n # Get Language Input\n if insts is None:\n insts = self.gt_words(obs) # Language Feature\n \n # Decode\n logits, _, _ = self.decoder(insts, ctx, ctx_mask, h_t, c_t)\n\n # Because the softmax_loss only allow dim-1 to be logit,\n # So permute the output (batch_size, length, logit) --> (batch_size, logit, length)\n logits = logits.permute(0, 2, 1).contiguous()\n loss = self.softmax_loss(\n input = logits[:, :, :-1], # -1 for aligning\n target = insts[:, 1:] # \"1:\" to ignore the word <BOS>\n )\n\n if for_listener:\n return self.nonreduced_softmax_loss(\n input = logits[:, :, :-1], # -1 for aligning\n target = insts[:, 1:] # \"1:\" to ignore the word <BOS>\n )\n\n if train:\n return loss\n else:\n # Evaluation\n _, predict = logits.max(dim=1) # BATCH, LENGTH\n gt_mask = (insts != self.tok.word_to_index['<PAD>'])\n correct = (predict[:, :-1] == insts[:, 1:]) * gt_mask[:, 1:] # Not pad and equal to gt\n correct, gt_mask = correct.type(torch.LongTensor), gt_mask.type(torch.LongTensor)\n word_accu = correct.sum().item() / gt_mask[:, 1:].sum().item() # Exclude <BOS>\n sent_accu = (correct.sum(dim=1) == gt_mask[:, 1:].sum(dim=1)).sum().item() / batch_size # Exclude <BOS>\n return loss.item(), word_accu, sent_accu\n\n def infer_batch(self, sampling=False, train=False, featdropmask=None):\n \"\"\"\n\n :param sampling: if not, use argmax. else use softmax_multinomial\n :param train: Whether in the train mode\n :return: if sampling: return insts(np, [batch, max_len]),\n log_probs(torch, requires_grad, [batch,max_len])\n hiddens(torch, requires_grad, [batch, max_len, dim})\n And if train: the log_probs and hiddens are detached\n if not sampling: returns insts(np, [batch, max_len])\n \"\"\"\n if train:\n self.encoder.train()\n self.decoder.train()\n else:\n self.encoder.eval()\n self.decoder.eval()\n\n # Image Input for the Encoder\n obs = self.env._get_obs()\n batch_size = len(obs)\n viewpoints_list = [list() for _ in range(batch_size)]\n\n # Get feature\n (img_feats, can_feats), lengths = self.from_shortest_path(viewpoints=viewpoints_list) # Image Feature (from the shortest path)\n\n # This code block is only used for the featdrop.\n if featdropmask is not None:\n img_feats[..., :-args.angle_feat_size] *= featdropmask\n can_feats[..., :-args.angle_feat_size] *= featdropmask\n\n # Encoder\n ctx = self.encoder(can_feats, img_feats, lengths,\n already_dropfeat=(featdropmask is not None))\n ctx_mask = utils.length2mask(lengths)\n\n # Decoder\n words = []\n log_probs = []\n hidden_states = []\n entropies = []\n h_t = torch.zeros(1, batch_size, args.rnn_dim).cuda()\n c_t = torch.zeros(1, batch_size, args.rnn_dim).cuda()\n ended = np.zeros(len(obs), np.bool)\n word = np.ones(len(obs), np.int64) * self.tok.word_to_index['<BOS>'] # First word is <BOS>\n word = torch.from_numpy(word).view(-1, 1).cuda()\n for i in range(args.maxDecode):\n # Decode Step\n logits, h_t, c_t = self.decoder(word, ctx, ctx_mask, h_t, c_t) # Decode, logits: (b, 1, vocab_size)\n\n # Select the word\n logits = logits.squeeze() # logits: (b, vocab_size)\n logits[:, self.tok.word_to_index['<UNK>']] = -float(\"inf\") # No <UNK> in infer\n if sampling:\n probs = F.softmax(logits, -1)\n m = torch.distributions.Categorical(probs)\n word = m.sample()\n log_prob = m.log_prob(word)\n if train:\n log_probs.append(log_prob)\n hidden_states.append(h_t.squeeze())\n entropies.append(m.entropy())\n else:\n log_probs.append(log_prob.detach())\n hidden_states.append(h_t.squeeze().detach())\n entropies.append(m.entropy().detach())\n else:\n values, word = logits.max(1)\n\n # Append the word\n cpu_word = word.cpu().numpy()\n cpu_word[ended] = self.tok.word_to_index['<PAD>']\n words.append(cpu_word)\n\n # Prepare the shape for next step\n word = word.view(-1, 1)\n\n # End?\n ended = np.logical_or(ended, cpu_word == self.tok.word_to_index['<EOS>'])\n if ended.all():\n break\n\n if train and sampling:\n return np.stack(words, 1), torch.stack(log_probs, 1), torch.stack(hidden_states, 1), torch.stack(entropies, 1)\n else:\n return np.stack(words, 1) # [(b), (b), (b), ...] --> [b, l]\n\n def save(self, epoch, path):\n ''' Snapshot models '''\n the_dir, _ = os.path.split(path)\n os.makedirs(the_dir, exist_ok=True)\n states = {}\n def create_state(name, model, optimizer):\n states[name] = {\n 'epoch': epoch + 1,\n 'state_dict': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n }\n all_tuple = [(\"encoder\", self.encoder, self.encoder_optimizer),\n (\"decoder\", self.decoder, self.decoder_optimizer)]\n for param in all_tuple:\n create_state(*param)\n torch.save(states, path)\n\n def load(self, path):\n ''' Loads parameters (but not training state) '''\n print(\"Load the speaker's state dict from %s\" % path)\n states = torch.load(path)\n def recover_state(name, model, optimizer):\n # print(name)\n # print(list(model.state_dict().keys()))\n # for key in list(model.state_dict().keys()):\n # print(key, model.state_dict()[key].size())\n state = model.state_dict()\n state.update(states[name]['state_dict'])\n model.load_state_dict(state)\n if args.loadOptim:\n optimizer.load_state_dict(states[name]['optimizer'])\n all_tuple = [(\"encoder\", self.encoder, self.encoder_optimizer),\n (\"decoder\", self.decoder, self.decoder_optimizer)]\n for param in all_tuple:\n recover_state(*param)\n return states['encoder']['epoch'] - 1\n\n"
] |
[
[
"torch.nn.CrossEntropyLoss",
"torch.nn.functional.softmax",
"torch.load",
"torch.zeros",
"torch.from_numpy",
"numpy.stack",
"numpy.logical_or",
"numpy.concatenate",
"torch.distributions.Categorical",
"torch.stack",
"numpy.array",
"numpy.zeros",
"torch.save"
]
] |
akiaohk/Udacity-Disaster-Response-Pipelines
|
[
"9a6042a0d288381c1310de1948121bccf647f418"
] |
[
"data/process_data.py"
] |
[
"\"\"\"\nProject: Disaster Response Pipeline\n\nScript Syntax for execution:\n> python process_data.py <path to messages csv file> <path to categories csv file> <path to sqllite destination db>\n> python process_data.py disaster_messages.csv disaster_categories.csv DisasterResponse.db\n\"\"\"\n\n# Import libraries\nimport sys\nimport pandas as pd\nimport sqlite3\nfrom sqlalchemy import create_engine\n\ndef load_data(messages_filepath, categories_filepath): \n \"\"\"\n Load messages and categories datasets and merge using common id function.\n Arguments:\n messages_filepath -> csv path of file containing messages\n categories_filepath -> csv path of file containing categories\n Output:\n df -> combined dataset of messages and categories\n \"\"\"\n \n # load messages dataset\n messages = pd.read_csv(messages_filepath)\n # load categories dataset\n categories = pd.read_csv(categories_filepath)\n # merge datasets\n df = messages.merge(categories, on = ['id'])\n return df\n\ndef clean_data(df): \n \"\"\"\n Clean Categories Data Function\n \n Arguments:\n df -> combined data containing messages and categories\n Outputs:\n df -> combined data containing messages and categories with categories cleaned up\n \"\"\"\n \n # split the values in the categories column on the ';'\n categories = df.categories.str.split(';', expand=True)\n\n # use the first row of categories dataframe to create column names for the categories data\n row = categories.iloc[0]\n category_colnames = row.map(lambda x: str(x)[:-2])\n categories.columns = category_colnames\n\n # convert category values to just numbers 0 or 1\n for column in categories:\n categories[column] = pd.Series([str(x)[-1] for x in categories[column]])\n categories[column] = categories[column].astype(int)\n \n # replace categories column in df with the new category columns\n df.drop(columns=['categories'], inplace=True)\n df = pd.concat([df, categories], axis=1)\n df.drop_duplicates(inplace=True)\n # Remove child_alone as it has all zeros\n df = df.drop(['child_alone'],axis=1)\n # There is a category 2 in 'related' column. This could be an error. \n # In the absense of any information, we assume it to be 1 as the majority class.\n df['related']=df['related'].map(lambda x: 1 if x == 2 else x)\n\n return df\n\n\n\ndef save_data(df, database_filename):\n \"\"\"\n Save the clean dataset into an sqlite database function.\n Arguments:\n df -> combined dataset of messages and categories cleaned\n database_filename -> path to SQLite database\n \"\"\" \n \n # save the clean dataset into an sqlite database\n engine = create_engine('sqlite:///' + database_filename)\n conn = sqlite3.connect('data/DisasterResponse.db')\n #df.to_sql('disaster_response_clean', con = conn, if_exists='replace', index=False)\n table_name = database_filename.replace(\".db\",\"\") + \"_table\"\n df.to_sql(table_name, con = conn, if_exists='replace', index=False)\n\n\ndef main():\n \"\"\"\n Main function which will kick off the data processing functions. There are three primary actions taken by this function:\n 1) Load Messages Data with Categories\n 2) Clean Categories Data\n 3) Save Data to SQLite Database\n \"\"\"\n \n if len(sys.argv) == 4:\n\n messages_filepath, categories_filepath, database_filepath = sys.argv[1:]\n\n print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}'\n .format(messages_filepath, categories_filepath))\n df = load_data(messages_filepath, categories_filepath)\n\n print('Cleaning data...')\n df = clean_data(df)\n \n print('Saving data...\\n DATABASE: {}'.format(database_filepath))\n save_data(df, database_filepath)\n \n print('Cleaned data saved to database!')\n \n else:\n print('Please provide the filepaths of the messages and categories '\\\n 'datasets as the first and second argument respectively, as '\\\n 'well as the filepath of the database to save the cleaned data '\\\n 'to as the third argument. \\n\\nExample: python process_data.py '\\\n 'disaster_messages.csv disaster_categories.csv '\\\n 'DisasterResponse.db')\n\n\nif __name__ == '__main__':\n main()\n\n \n"
] |
[
[
"pandas.concat",
"pandas.read_csv"
]
] |
granatb/mlops_handin
|
[
"b0992be9667bf7f1e226efd0174289327a548efb"
] |
[
"src/models/model_lightning.py"
] |
[
"import os\nimport sys\nfrom typing import Callable, List, Optional, Tuple, Union\n\nimport matplotlib.pyplot as plt # type: ignore\nimport pytorch_lightning as pl\nimport torch\nimport torch.nn.functional as F\nfrom pytorch_lightning import loggers\nfrom torch import nn\nfrom torch.utils.data import Dataset\n\nsys.path.insert(1, os.path.join(sys.path[0], \"..\"))\nimport wandb\nfrom data.make_dataset import MNISTdata\nimport torchdrift\nimport copy\n\nclass MyLightningModel(pl.LightningModule):\n def __init__(self, hidden_size: int, output_size: int, drop_p: float = 0.3) -> None:\n \"\"\"Builds a feedforward network with arbitrary hidden layers.\n\n Arguments\n ---------\n hidden_size: integer, size of dense layer\n output_size: number of classes\n drop_p: dropout rate\n\n \"\"\"\n super().__init__()\n # Input to a hidden layer\n self.num_classes = output_size\n\n self.arch = nn.Sequential(\n nn.Conv2d(\n in_channels=1, out_channels=16, kernel_size=3, padding=1, stride=1\n ),\n # convolution output dim (16, 28, 28)\n nn.BatchNorm2d(16),\n nn.MaxPool2d(kernel_size=2, stride=2),\n # pooling output dim (16, 14, 14)\n nn.ReLU(inplace=True),\n nn.Conv2d(in_channels=16, out_channels=8, kernel_size=5, padding=2),\n nn.Dropout2d(p=drop_p),\n # convolution output dim (8, 14, 14)\n nn.MaxPool2d(kernel_size=2, stride=2),\n # polling output dim (8, 7, 7)\n nn.ReLU(inplace=True),\n )\n\n # fully connected output layers\n # [(W−K+2P)/S]+1\n self.fc1_features = 8 * 7 * 7\n self.fc1 = nn.Linear(in_features=self.fc1_features, out_features=hidden_size)\n self.fc2 = nn.Linear(in_features=hidden_size, out_features=self.num_classes)\n\n def forward(self, x):\n\n x = self.arch(x)\n x = x.view(-1, self.fc1_features)\n x = F.relu(self.fc1(x))\n\n return F.log_softmax(self.fc2(x), dim=1)\n\n def training_step(self, batch, batch_idx):\n # training_step defines the train loop. It is independent of forward\n images, labels = batch\n x = self.arch(images)\n x = x.view(-1, self.fc1_features)\n x = F.relu(self.fc1(x))\n x_hat = F.log_softmax(self.fc2(x), dim=1)\n loss = F.nll_loss(x_hat, labels)\n self.log(\"train_loss\", loss)\n self.logger.experiment.log({\"logits\": wandb.Histogram(x_hat.detach().numpy())})\n return loss\n\n def configure_optimizers(self):\n optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)\n return optimizer\n\n def validation_step(self, batch, batch_idx):\n images, labels = batch\n y_hat = self(images)\n val_loss = F.nll_loss(y_hat, labels)\n self.log(\"val_loss\", val_loss)\n return val_loss\n\ndef corruption_function(x: torch.Tensor):\n return torchdrift.data.functional.gaussian_blur(x, severity=2)\n\n\ndef main():\n\n train_data = torch.load(\"data/processed/train.pth\")\n test_data = torch.load(\"data/processed/test.pth\")\n\n trainloader = torch.utils.data.DataLoader(train_data, batch_size=64, shuffle=True)\n testloader = torch.utils.data.DataLoader(test_data, batch_size=64, shuffle=True)\n\n model = MyLightningModel(128, 10)\n wd_logger = loggers.WandbLogger(name=\"test\")\n trainer = pl.Trainer(logger=wd_logger, max_epochs=5)\n\n trainer.fit(model, trainloader, testloader)\n\n inputs, _ = next(iter(testloader))\n inputs_ood = corruption_function(inputs)\n\n N = 6\n model.eval()\n inps = torch.cat([inputs[:N], inputs_ood[:N]])\n model.cpu()\n # predictions = model.predict(inps).max(1).indices\n\n feature_extractor = copy.deepcopy(model)\n feature_extractor.classifier = torch.nn.Identity()\n\n drift_detector = torchdrift.detectors.KernelMMDDriftDetector()\n \n torchdrift.utils.fit(trainloader, feature_extractor, drift_detector)\n\n drift_detection_model = torch.nn.Sequential(\n feature_extractor,\n drift_detector\n )\n\n features = feature_extractor(inputs)\n score = drift_detector(features)\n p_val = drift_detector.compute_p_value(features)\n print(f'score: {score}, p_val: {p_val}')\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"torch.nn.Sequential",
"torch.nn.Dropout2d",
"torch.nn.functional.nll_loss",
"torch.cat",
"torch.load",
"torch.nn.Conv2d",
"torch.utils.data.DataLoader",
"torch.nn.Linear",
"torch.nn.Identity",
"torch.nn.MaxPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
]
] |
marcvergees/rieffel_method
|
[
"5377284c10010691238f10d5d6f77935c44d8f3d"
] |
[
"neural_network.py"
] |
[
"# Big Data, Xarxes Neuronals i Màrqueting: la clau de l'èxit?\r\n# Treball de recerca (TR)\r\n# Marc Vergés Santiago - Escola Pia Mataró\r\n#\r\n#\r\n#\r\n# Copyright (c) 2021, Marc Vergés Santiago\r\n# All rights reserved.\r\n#\r\n# Redistribution and use in source and binary forms, with or without\r\n# modification, are permitted provided that the following conditions are met:\r\n# * Redistributions of source code must retain the above copyright\r\n# notice, this list of conditions and the following disclaimer.\r\n# * Redistributions in binary form must reproduce the above copyright\r\n# notice, this list of conditions and the following disclaimer in the\r\n# documentation and/or other materials provided with the distribution.\r\n# * Neither the name of the <organization> nor the\r\n# names of its contributors may be used to endorse or promote products\r\n# derived from this software without specific prior written permission.\r\n#\r\n# THIS SOFTWARE IS PROVIDED BY MARC VERGÉS ''AS IS'' AND ANY\r\n# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\r\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\r\n# DISCLAIMED. IN NO EVENT SHALL <copyright holder> BE LIABLE FOR ANY\r\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\r\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\r\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\r\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\r\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\r\n# SOFTWARE\r\n\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom keras.utils import np_utils\r\nimport keras\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense, Dropout\r\nfrom keras.callbacks import EarlyStopping\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn.metrics import classification_report\r\nimport matplotlib.pyplot as plt\r\nimport instaloader\r\nfrom contrasenyes import usuari, contrasenya\r\n\r\n\r\ndef profile_preferences_to_NN(user):\r\n L = instaloader.Instaloader()\r\n\r\n L.login(usuari, contrasenya)\r\n\r\n list_to_append_csv = []\r\n none = 0\r\n creators_celebrities = 0\r\n personal_gods = 0\r\n local_events = 0\r\n professional_services = 0\r\n restaurants = 0\r\n non_profits = 0\r\n general_interest = 0\r\n publishers = 0\r\n transportation_and_accomodation = 0\r\n business_and_utility = 0\r\n home_services = 0\r\n auto_dealers = 0\r\n food_and_personal_goods = 0\r\n government_agencies = 0\r\n content_apps = 0\r\n grocery = 0\r\n entities = 0\r\n lifestyle_services = 0\r\n geography = 0\r\n\r\n profile = instaloader.Profile.from_username(L.context, user)\r\n preferences = []\r\n for followee in profile.get_followees():\r\n preferences.append(followee.business_category_name)\r\n print(followee.username + \" - \" + str(followee.business_category_name))\r\n if followee.business_category_name == \"None\":\r\n none += 1\r\n if followee.business_category_name == \"Creators & Celebrities\":\r\n creators_celebrities += 1\r\n if followee.business_category_name == \"Personal Goods & General Merchandise Stores\":\r\n personal_gods += 1\r\n if followee.business_category_name == \"Local Events\":\r\n local_events += 1\r\n if followee.business_category_name == \"Professional Services\":\r\n professional_services += 1\r\n if followee.business_category_name == \"Restaurants\":\r\n restaurants += 1\r\n if followee.business_category_name == \"Non-Profits & Religious Organizations\":\r\n non_profits += 1\r\n if followee.business_category_name == \"General Interest\":\r\n general_interest += 1\r\n if followee.business_category_name == \"Publishers\":\r\n publishers += 1\r\n if followee.business_category_name == \"Transportation & Accomodation Services\":\r\n transportation_and_accomodation += 1\r\n if followee.business_category_name == \"Business & Utility Services\":\r\n business_and_utility += 1\r\n if followee.business_category_name == \"Home Services\":\r\n home_services += 1\r\n if followee.business_category_name == \"Auto Dealers\":\r\n auto_dealers += 1\r\n if followee.business_category_name == \"Food & Personal Goods\":\r\n food_and_personal_goods += 1\r\n if followee.business_category_name == \"Government Agencies\":\r\n government_agencies += 1\r\n if followee.business_category_name == \"Content & Apps\":\r\n content_apps += 1\r\n if followee.business_category_name == \"Grocery & Convenience Stores\":\r\n grocery += 1\r\n if followee.business_category_name == \"Entities\":\r\n entities += 1\r\n if followee.business_category_name == \"Lifestyle Services\":\r\n lifestyle_services += 1\r\n if followee.business_category_name == \"Geography\":\r\n geography += 1\r\n\r\n print(preferences)\r\n\r\n print(\"None: \" + str(none))\r\n print(\"Creators & Celebrities: \" + str(creators_celebrities))\r\n print(\"Personal Goods & General Merchandise Stores: \" + str(personal_gods))\r\n print(\"Local Events: \" + str(local_events))\r\n print(\"Professional Services: \" + str(professional_services))\r\n print(\"Restaurants: \" + str(restaurants))\r\n print(\"Non-Profits & Religious Organizations: \" + str(non_profits))\r\n print(\"General Interest: \" + str(general_interest))\r\n print(\"Publishers: \" + str(publishers))\r\n print(\"Transportation & Accomodation Services: \" + str(transportation_and_accomodation))\r\n print(\"Business & Utility Services: \" + str(business_and_utility))\r\n print(\"Home Services: \" + str(home_services))\r\n print(\"Auto Dealers: \" + str(auto_dealers))\r\n print(\"Food & Personal Goods: \" + str(food_and_personal_goods))\r\n print(\"Government Agencies: \" + str(government_agencies))\r\n print(\"Content & Apps: \" + str(content_apps))\r\n print(\"Grocery & Convenience Stores: \" + str(grocery))\r\n print(\"Entities: \" + str(entities))\r\n print(\"Lifestyle Services: \" + str(lifestyle_services))\r\n print(\"Geography: \" + str(geography))\r\n\r\n followers = 0\r\n following = 0\r\n\r\n for follower in profile.get_followers():\r\n followers += 1\r\n for follower in profile.get_followees():\r\n following += 1\r\n\r\n return preferences\r\n\r\ndef neural_network(list):\r\n # url = 'https://gist.githubusercontent.com/curran/a08a1080b88344b0c8a7/raw/639388c2cbc2120a14dcf466e85730eb8be498bb/iris.csv'\r\n df = pd.read_csv(\"data_set3.csv\")\r\n df = df.sample(frac=1).reset_index(drop=True)\r\n Y = df['Tematica']\r\n print(Y) # output\r\n X = df.drop(['Tematica'], axis=1)\r\n print(X) # input o dataset\r\n print(X.shape)\r\n print(Y.shape)\r\n X = np.array(X)\r\n Y.head()\r\n encoder = LabelEncoder()\r\n encoder.fit(Y)\r\n encoded_Y = encoder.transform(Y)\r\n dummy_y = np_utils.to_categorical(encoded_Y, 10)\r\n print(encoded_Y)\r\n print(dummy_y)\r\n model = Sequential()\r\n model.add(Dense(16, input_shape=(X.shape[1],), activation='relu')) # input shape is (features,)\r\n model.add(Dense(16, input_shape=(X.shape[1],), activation='relu')) # input shape is (features,)\r\n model.add(Dense(10, activation='softmax'))\r\n model.summary()\r\n\r\n # compile the model\r\n model.compile(optimizer='rmsprop',\r\n loss='categorical_crossentropy',\r\n # this is different instead of binary_crossentropy (for regular classification)\r\n metrics=['accuracy'])\r\n es = keras.callbacks.EarlyStopping(monitor='val_loss',\r\n mode='min',\r\n patience=10,\r\n restore_best_weights=True) # important - otherwise you just return the last weigths...\r\n '''\r\n # now we just update our model fit call\r\n history = model.fit(X,\r\n dummy_y,\r\n callbacks=[es],\r\n epochs=200, # you can set this to a big number!\r\n batch_size=1,\r\n shuffle=True,\r\n validation_split=0.2,\r\n verbose=1)\r\n es = keras.callbacks.EarlyStopping(monitor='val_loss',\r\n mode='min',\r\n patience=10,\r\n restore_best_weights=True) # important - otherwise you just return the last weigths...\r\n '''\r\n # now we just update our model fit call\r\n history = model.fit(X,\r\n dummy_y,\r\n callbacks=[es],\r\n epochs=50, # you can set this to a big number!\r\n batch_size=2,\r\n shuffle=True,\r\n validation_split=0.2,\r\n verbose=1)\r\n\r\n history_dict = history.history\r\n\r\n # learning curve\r\n # accuracy\r\n acc = history_dict['accuracy']\r\n val_acc = history_dict['val_accuracy']\r\n\r\n # loss\r\n loss = history_dict['loss']\r\n val_loss = history_dict['val_loss']\r\n\r\n # range of X (no. of epochs)\r\n epochs = range(1, len(acc) + 1)\r\n\r\n # plot\r\n # \"r\" is for \"solid red line\"\r\n plt.plot(epochs, acc, 'r', label='Training accuracy')\r\n # b is for \"solid blue line\"\r\n plt.plot(epochs, val_acc, 'b', label='Validation accuracy')\r\n plt.title('Training and validation accuracy')\r\n plt.xlabel('Epochs')\r\n plt.ylabel('Accuracy')\r\n plt.legend()\r\n\r\n plt.show()\r\n\r\n preds = model.predict(X) # see how the model did!\r\n print(preds[0]) # i'm spreading that prediction across three nodes and they sum to 1\r\n print(np.sum(preds[0])) # sum it up! Should be 1\r\n ## [9.9999988e-01 1.3509347e-07 6.7064638e-16]\r\n ## 1.0\r\n\r\n # Almost a perfect prediction\r\n # actual is left, predicted is top\r\n # names can be found by inspecting Y\r\n matrix = confusion_matrix(dummy_y.argmax(axis=1), preds.argmax(axis=1))\r\n matrix\r\n ## array([[50, 0, 0],\r\n ## [ 0, 46, 4],\r\n ## [ 0, 1, 49]])\r\n\r\n # more detail on how well things were predicted\r\n print(classification_report(dummy_y.argmax(axis=1), preds.argmax(axis=1)))\r\n\r\n model.predict(list, batch_size=1, verbose=1)\r\n"
] |
[
[
"matplotlib.pyplot.legend",
"pandas.read_csv",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"numpy.array",
"sklearn.preprocessing.LabelEncoder",
"numpy.sum",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
jairideout/q2-feature-table
|
[
"494e0b8080799c746c55be2271278891798b8e56"
] |
[
"q2_feature_table/tests/test_merge.py"
] |
[
"# ----------------------------------------------------------------------------\n# Copyright (c) 2016-2017, QIIME 2 development team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file LICENSE, distributed with this software.\n# ----------------------------------------------------------------------------\n\nimport unittest\n\nimport skbio\nimport numpy as np\nfrom biom.table import Table\nimport pandas as pd\nimport pandas.util.testing as pdt\n\nfrom q2_feature_table import (merge, merge_seq_data,\n merge_taxa_data)\nfrom q2_feature_table._merge import _merge_feature_data\n\n\nclass MergeTableTests(unittest.TestCase):\n\n def test_valid_overlapping_feature_ids(self):\n t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),\n ['O1', 'O2'],\n ['S1', 'S2', 'S3'])\n t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),\n ['O1', 'O3'],\n ['S4', 'S5', 'S6'])\n obs = merge(t1, t2)\n exp = Table(np.array([[0, 1, 3, 0, 2, 6], [1, 1, 2, 0, 0, 0],\n [0, 0, 0, 2, 2, 4]]),\n ['O1', 'O2', 'O3'],\n ['S1', 'S2', 'S3', 'S4', 'S5', 'S6'])\n self.assertEqual(obs, exp)\n\n def test_valid_non_overlapping_feature_ids(self):\n t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),\n ['O1', 'O2'],\n ['S1', 'S2', 'S3'])\n t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),\n ['O3', 'O4'],\n ['S4', 'S5', 'S6'])\n obs = merge(t1, t2)\n exp = Table(np.array([[0, 1, 3, 0, 0, 0], [1, 1, 2, 0, 0, 0],\n [0, 0, 0, 0, 2, 6], [0, 0, 0, 2, 2, 4]]),\n ['O1', 'O2', 'O3', 'O4'],\n ['S1', 'S2', 'S3', 'S4', 'S5', 'S6'])\n self.assertEqual(obs, exp)\n\n def test_invalid_overlapping_sample_ids(self):\n t1 = Table(np.array([[0, 1, 3], [1, 1, 2]]),\n ['O1', 'O2'],\n ['S1', 'S2', 'S3'])\n t2 = Table(np.array([[0, 2, 6], [2, 2, 4]]),\n ['O1', 'O3'],\n ['S1', 'S5', 'S6'])\n with self.assertRaises(ValueError):\n merge(t1, t2)\n\n\nclass MergeFeatureDataTests(unittest.TestCase):\n\n def test_valid_overlapping_feature_ids(self):\n d1 = pd.Series(['ACGT', 'ACCT'], index=['f1', 'f2'])\n d2 = pd.Series(['ACGT', 'ACCA'], index=['f1', 'f3'])\n obs = _merge_feature_data(d1, d2)\n exp = pd.Series(['ACGT', 'ACCT', 'ACCA'], index=['f1', 'f2', 'f3'])\n pdt.assert_series_equal(obs, exp)\n\n def test_first_feature_data_retained(self):\n d1 = pd.Series(['ACGT', 'ACCT'], index=['f1', 'f2'])\n d2 = pd.Series(['ACGAAA', 'ACCA'], index=['f1', 'f3'])\n\n obs = _merge_feature_data(d1, d2)\n exp = pd.Series(['ACGT', 'ACCT', 'ACCA'], index=['f1', 'f2', 'f3'])\n pdt.assert_series_equal(obs, exp)\n\n # swapping input order changes f1 data\n obs = _merge_feature_data(d2, d1)\n exp = pd.Series(['ACGAAA', 'ACCT', 'ACCA'], index=['f1', 'f2', 'f3'])\n pdt.assert_series_equal(obs, exp)\n\n def test_valid_non_overlapping_feature_ids(self):\n d1 = pd.Series(['ACGT', 'ACCT'], index=['f1', 'f2'])\n d2 = pd.Series(['ACGT', 'ACCA'], index=['f3', 'f4'])\n obs = _merge_feature_data(d1, d2)\n exp = pd.Series(['ACGT', 'ACCT', 'ACGT', 'ACCA'],\n index=['f1', 'f2', 'f3', 'f4'])\n pdt.assert_series_equal(obs, exp)\n\n\nclass MergeFeatureSequenceTests(unittest.TestCase):\n # More extensive testing is performed in MergeFeatureDataTests, which\n # tests the shared private API.\n\n def test_merge_seq_data(self):\n d1 = pd.Series([skbio.DNA('ACGT', metadata={'id': 'abc'}),\n skbio.DNA('ACCT', metadata={'id': 'xyz'})],\n index=['f1', 'f2'])\n d2 = pd.Series([skbio.DNA('ACGT', metadata={'id': 'abc'}),\n skbio.DNA('ACCA', metadata={'id': 'wxy'})],\n index=['f1', 'f3'])\n obs = merge_seq_data(d1, d2)\n exp = pd.Series([skbio.DNA('ACGT', metadata={'id': 'abc'}),\n skbio.DNA('ACCT', metadata={'id': 'xyz'}),\n skbio.DNA('ACCA', metadata={'id': 'wxy'})],\n index=['f1', 'f2', 'f3'])\n pdt.assert_series_equal(obs, exp)\n\n\nclass MergeFeatureTaxonomyTests(unittest.TestCase):\n # More extensive testing is performed in MergeFeatureDataTests, which\n # tests the shared private API.\n\n def test_merge_taxa_data(self):\n # this test calls the public API directly\n d1 = pd.Series(['a;b;c;d', 'a;b;c;e'], index=['f1', 'f2'])\n d2 = pd.Series(['a;b;c;d', 'a;b;c;e'], index=['f1', 'f3'])\n obs = merge_taxa_data(d1, d2)\n exp = pd.Series(['a;b;c;d', 'a;b;c;e', 'a;b;c;e'],\n index=['f1', 'f2', 'f3'])\n pdt.assert_series_equal(obs, exp)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] |
[
[
"numpy.array",
"pandas.util.testing.assert_series_equal",
"pandas.Series"
]
] |
dashstander/cog
|
[
"0aee3c9ef50ac346d053010e39c4e7becbbcb70d"
] |
[
"python/tests/server/test_http_output.py"
] |
[
"import base64\nimport io\nimport os\nimport tempfile\n\nimport numpy as np\nfrom PIL import Image\nimport responses\nfrom responses.matchers import multipart_matcher\n\nfrom cog import BaseModel, BasePredictor, Path, File\nfrom .test_http import make_client\n\n\ndef test_return_wrong_type():\n class Predictor(BasePredictor):\n def predict(self) -> int:\n return \"foo\"\n\n client = make_client(Predictor(), raise_server_exceptions=False)\n resp = client.post(\"/predictions\")\n assert resp.status_code == 500\n\n\ndef test_path_output_path():\n class Predictor(BasePredictor):\n def predict(self) -> Path:\n temp_dir = tempfile.mkdtemp()\n temp_path = os.path.join(temp_dir, \"my_file.bmp\")\n img = Image.new(\"RGB\", (255, 255), \"red\")\n img.save(temp_path)\n return Path(temp_path)\n\n client = make_client(Predictor())\n res = client.post(\"/predictions\")\n assert res.status_code == 200\n header, b64data = res.json()[\"output\"].split(\",\", 1)\n # need both image/bmp and image/x-ms-bmp until https://bugs.python.org/issue44211 is fixed\n assert header in [\"data:image/bmp;base64\", \"data:image/x-ms-bmp;base64\"]\n assert len(base64.b64decode(b64data)) == 195894\n\n\[email protected]\ndef test_output_path_to_http():\n class Predictor(BasePredictor):\n def predict(self) -> Path:\n temp_dir = tempfile.mkdtemp()\n temp_path = os.path.join(temp_dir, \"file.txt\")\n with open(temp_path, \"w\") as fh:\n fh.write(\"hello\")\n return Path(temp_path)\n\n fh = io.BytesIO(b\"hello\")\n fh.name = \"file.txt\"\n responses.add(\n responses.PUT,\n \"http://example.com/upload/file.txt\",\n status=201,\n match=[multipart_matcher({\"file\": fh})],\n )\n\n client = make_client(Predictor())\n res = client.post(\n \"/predictions\", json={\"output_file_prefix\": \"http://example.com/upload/\"}\n )\n assert res.json() == {\n \"status\": \"succeeded\",\n \"output\": \"http://example.com/upload/file.txt\",\n }\n assert res.status_code == 200\n\n\ndef test_path_output_file():\n class Predictor(BasePredictor):\n def predict(self) -> File:\n return io.StringIO(\"hello\")\n\n client = make_client(Predictor())\n res = client.post(\"/predictions\")\n assert res.status_code == 200\n assert res.json() == {\n \"status\": \"succeeded\",\n \"output\": \"data:application/octet-stream;base64,aGVsbG8=\", # hello\n }\n\n\[email protected]\ndef test_output_file_to_http():\n class Predictor(BasePredictor):\n def predict(self) -> File:\n fh = io.StringIO(\"hello\")\n fh.name = \"foo.txt\"\n return fh\n\n responses.add(\n responses.PUT,\n \"http://example.com/upload/foo.txt\",\n status=201,\n match=[multipart_matcher({\"file\": (\"foo.txt\", b\"hello\")})],\n )\n\n client = make_client(Predictor())\n res = client.post(\n \"/predictions\", json={\"output_file_prefix\": \"http://example.com/upload/\"}\n )\n assert res.json() == {\n \"status\": \"succeeded\",\n \"output\": \"http://example.com/upload/foo.txt\",\n }\n assert res.status_code == 200\n\n\ndef test_json_output_numpy():\n class Predictor(BasePredictor):\n def predict(self) -> np.float64:\n return np.float64(1.0)\n\n client = make_client(Predictor())\n resp = client.post(\"/predictions\")\n assert resp.status_code == 200\n assert resp.json() == {\"output\": 1.0, \"status\": \"succeeded\"}\n\n\ndef test_complex_output():\n class Output(BaseModel):\n text: str\n file: File\n\n class Predictor(BasePredictor):\n def predict(self) -> Output:\n return Output(text=\"hello\", file=io.StringIO(\"hello\"))\n\n client = make_client(Predictor())\n resp = client.post(\"/predictions\")\n assert resp.json() == {\n \"output\": {\n \"file\": \"data:application/octet-stream;base64,aGVsbG8=\",\n \"text\": \"hello\",\n },\n \"status\": \"succeeded\",\n }\n assert resp.status_code == 200\n"
] |
[
[
"numpy.float64"
]
] |
hamk-uas/HAMK_Smart_City
|
[
"c9408ea1caac995522489a331207737b37971314"
] |
[
"rnn.py"
] |
[
"import pandas as pd\nimport numpy as np\nfrom tensorflow import Variable\nfrom tensorflow.keras.models import Sequential, load_model\nfrom tensorflow.keras.layers import Dense, GRU, LSTM, SimpleRNN, BatchNormalization\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.callbacks import EarlyStopping\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.model_selection import KFold\nfrom collections import deque\nimport matplotlib.pyplot as plt\nimport kerastuner as kt\nfrom datetime import datetime, date\nfrom joblib import dump, load\nimport os\nimport json\nimport csv\nimport math\n\nclass RNN:\n '''\n Parent class for RNN models.\n '''\n def __init__(self, quant=None, seq=4, fut=0, parameters=None):\n '''\n All parameters for class objects are defined here, child classes don't have __init__ methods\n Inputs: target quantities as list, sequence length as int, future period as int, input parameters as a list.\n '''\n self.quant = quant\n self.seq = seq\n self.fut = fut\n self.parameters = parameters\n self.date = date.today() # For bookkeeping purposes\n self.model = None # For storage of a model\n self.scaler = None # For storage of feature scaler\n self.name = None # Defined after training\n \n def preprocess(self, raw_data):\n '''\n Function for preprocessing downsampled data for sequence modeling.\n Inputs: Downsampled data frame with desired parameters defined in class attribute list in headers\n Output: Training input data, training target data, testing input data, testing target data, sklearn scaler object for inverse transformations\n '''\n raw_data.iloc[:,0] = pd.to_datetime(raw_data.iloc[:,0], format='%Y-%m-%d %H:%M:%S%z')\n vec = raw_data.iloc[:,0].values\n datetimes = np.array([[vec, vec], [vec, vec]], dtype = 'M8[ms]').astype('O')[0,1]\n raw_data['weekday'] = [t.timetuple().tm_wday for t in datetimes]\n raw_data['hours'] = [t.hour for t in datetimes]\n \n # Encode time parameters to cyclical features\n raw_data['hours_sin'] = np.sin(2 * np.pi * raw_data['hours']/24.0)\n raw_data['hours_cos'] = np.cos(2 * np.pi * raw_data['hours']/24.0)\n raw_data['weekday_sin'] = np.sin(2 * np.pi * raw_data['weekday']/7)\n raw_data['weekday_cos'] = np.cos(2 * np.pi * raw_data['weekday']/7)\n \n # Extend parameter list by quantity for picking data\n self.parameters.extend(self.quant)\n \n # Split the data to training and testing sets\n raw_data = raw_data[self.parameters].copy()\n df_train = raw_data[int(len(raw_data)*0.2):].copy()\n df_val = raw_data[:int(len(raw_data)*0.2)].copy()\n \n # Delete the quantity from parameter list to preserve the original inputs\n self.parameters = [x for x in self.parameters if x not in self.quant]\n \n # Scale all data features to range [0,1]\n self.scaler = MinMaxScaler()\n df_train = self.scaler.fit_transform(df_train)\n df_val = self.scaler.transform(df_val)\n \n # Next generate a list which will hold all of the sequences for training data\n sequences_train = []\n sequences_val = []\n prev_days_train = deque(maxlen=self.seq) # Placeholder for the sequences\n prev_days_val = deque(maxlen=self.seq)\n l_quant = len(self.quant)\n \n for count, row in enumerate(pd.DataFrame(df_train).values):\n prev_days_train.append([val for val in row[:-l_quant]]) # store everything but the target values\n\n if (len(prev_days_train) == self.seq): # This checks that our sequences are of the correct length and target value is at full hour\n if (any(pd.isna(pd.DataFrame(df_train).values[count-1][-l_quant:]))): # Test for 30 min data interval because of energy data gaps\n continue\n try:\n sequences_train.append([np.array(prev_days_train), pd.DataFrame(df_train).values[count+1][-l_quant:]])\n except IndexError:\n break\n \n for count, row in enumerate(pd.DataFrame(df_val).values):\n prev_days_val.append([val for val in row[:-l_quant]]) # store everything but the target values\n\n if (len(prev_days_val) == self.seq): # This checks that our sequences are of the correct length and target value is at full hour\n if (any(pd.isna(pd.DataFrame(df_val).values[count-1][-l_quant:]))): # Test for 30 min data interval because of energy data gaps\n continue\n try:\n sequences_val.append([np.array(prev_days_val), pd.DataFrame(df_val).values[count+1][-l_quant:]])\n except IndexError:\n break\n \n # Iterating through the sequences in order to differentiate X and y\n X_train = []\n y_train = []\n X_val = []\n y_val = []\n\n for seq, target in sequences_train:\n X_train.append(seq)\n y_train.append(target)\n \n for seq, target in sequences_val:\n X_val.append(seq)\n y_val.append(target)\n \n X_train = np.array(X_train)\n y_train = np.array(y_train)\n X_val = np.array(X_val)\n y_val = np.array(y_val)\n \n # Output the shapes of training and testing data.\n print(f'Shape of training data: {X_train.shape}')\n print(f'Shape of testing data: {X_val.shape}')\n \n return X_train, y_train, X_val, y_val\n \n def inv_target(self, X, preds, y_val):\n '''\n Method for inverting the scaling target variable\n Inputs: 3-dimensional data matrix used to train (or validate) the model, predictions obtained using the model,\n validation target vector and pre-fitted sklearn scaler. \n Note: the X tensor is more of a placeholder in this function used only for getting the dimensions correct.\n Output: Inversely transformed predictions and validation vectors\n '''\n \n preds = np.concatenate((X[:len(preds),-1], np.array(preds).reshape(len(preds), 1)), axis=1) # Reshape is necessary as there are issues with dimensions\n y_val = np.concatenate((X[:len(preds),-1], np.array(y_val[:len(preds)]).reshape(len(preds), 1)), axis=1)\n \n preds = self.scaler.inverse_transform(preds)[:,-1:]\n y_val = self.scaler.inverse_transform(y_val)[:,-1:]\n \n return preds, y_val\n \n def plot_preds(self, preds, y_val, low=[], up=[], conf=0.9):\n '''\n Producing plots of predictions with the measured values as time series.\n Inputs: predicted and measured values as numpy arrays.\n '''\n \n # Number of instances to plot.\n if len(low) != 0: # Check whether the list is empty.\n rounds = len(low)\n else:\n rounds = len(preds)\n \n plt.figure()\n \n plt.plot(preds[:rounds], color='navy', label='Predicted')\n plt.plot(y_val[:rounds], color='darkorange', label='Measured', marker='*')\n if len(low) != 0: # Check whether the list is empty.\n plt.fill_between(range(rounds), (preds[:rounds,0])+(low[:,0]), (preds[:rounds,0])+(up[:,0]), color='gray', alpha=0.25, label=f'{round(conf*100)}% prediction interval')\n plt.legend()\n plt.grid()\n plt.title(f'Predictions for {self.quant[0]} with {self.name}.')\n \n plt.show()\n \n def load_intervals(self, int_path, conf=0.9):\n '''\n Method for loading desired prediction intervals for ML forecasts.\n Inputs: path to the prediction interval .csv file, confidence level as float (0.5-0.99)\n '''\n \n # Load the predictions\n with open(int_path) as csvf:\n \n read_fil = csv.reader(csvf)\n percs = list(read_fil)\n \n percs = np.array([obj for obj in percs if obj])\n \n low_ind = round(((1-conf)/2 - 0.01) * 100)\n up_ind = round((conf + (1-conf)/2 - 0.01) * 100)\n \n # Select the desired intervals bounds. Reshape is necessary for following target inversion.\n lower, upper = percs[:,low_ind].reshape(len(percs), 1), percs[:,up_ind].reshape(len(percs), 1)\n \n return lower, upper\n \n #plt.figure()\n #\n #plt.plot(preds, label='Predicted')\n #plt.plot(y_val, label='Measured', marker='*')\n #plt.fill_between(range(len(preds)), (preds)+(percs[:,low_ind]), (preds)+(percs[:,up_ind]), color='gray', alpha=0.25, label=f'{round(100*conf)}% prediction interval')\n #plt.legend()\n #\n #plt.show()\n \n \n \n def save(self, path=rf'{os.getcwd()}'):\n '''\n Method for saving the model, scaler, and other attributes to compatible forms.\n Uses same folder as subclasses fit-method to save the information.\n Input: Desired path for saving the information.\n '''\n \n # Define the folder which the results are saved to\n new_fold_path = rf'{path}/{self.name}_{self.quant[0]}_{str(self.date)}'\n if not os.path.exists(new_fold_path): # Test whether the directory already exists\n os.makedirs(new_fold_path)\n print(f'Folder created on path: {new_fold_path}.')\n else:\n print(f'Savings results to {new_fold_path}.')\n \n # Save model to folder\n self.model.save(rf'{new_fold_path}/model.h5')\n print('Model saved.')\n \n # Save scaler to folder\n dump(self.scaler, rf'{new_fold_path}/scaler.joblib')\n print('Scaler saved.')\n \n # Save all other variables to json format to folder\n other_vars = {'name': self.name, 'quant': self.quant, 'seq': self.seq, 'fut': self.fut, 'parameters': self.parameters, 'date': str(self.date)}\n with open(rf'{new_fold_path}/vars.json', 'w') as f:\n json.dump(other_vars, f)\n print('Other variables saved.')\n \n def load(self, path):\n '''\n Loads RNN model information saved with .save method from location specified in function call.\n Stores the information by updating class attributes.\n Input: path of the storage directory\n '''\n \n # Load the model to class attribute\n self.model = load_model(rf'{path}/model.h5')\n print('Model loaded.')\n \n # Load the scaler\n self.scaler = load(rf'{path}/scaler.joblib')\n print('Scaler loaded.')\n \n # Load dictionary containing all other variables\n with open(rf'{path}/vars.json', 'r') as f:\n var_dict = json.load(f)\n \n # Place the variables to correct positions\n self.name = var_dict[\"name\"]\n self.quant = var_dict[\"quant\"]\n self.seq = var_dict[\"seq\"]\n self.fut = var_dict[\"fut\"]\n self.parameters = var_dict[\"parameters\"]\n self.date = var_dict[\"date\"]\n \n print('Other variables loaded.')\n \n def prediction_interval(self, X_train, y_train, x0, path=rf'{os.getcwd()}'):\n '''\n Compute bootstrap prediction interval around the models prediction on single data point x0.\n Inputs: pre-trained model, training input data, training output data, new input data row, number of rows to save,\n path for model saving.\n Output: Percentiles 0-100 for prediction intervals\n '''\n \n # Define output path for saving the percentile results.\n new_fold_path = rf'{path}/{self.name}_{self.quant[0]}_{str(self.date)}'\n if not os.path.exists(new_fold_path): # Test whether the directory already exists\n os.makedirs(new_fold_path)\n print(f'Folder created on path: {new_fold_path}.')\n else:\n print(f'Savings prediction intervals to {new_fold_path}.')\n \n # Local copy of the machine learning model. Done dut to weight and bias initialization done in the script.\n model = self.model\n \n # Number of training samples\n n = X_train.shape[0]\n \n # Calculate the next prediction to be output in the end\n pred_x0 = model.predict(np.reshape(x0, (1, x0.shape[0], x0.shape[1])))\n \n # Calculate training residuals\n preds = model.predict(X_train)\n train_res = y_train - preds\n \n # Number of bootstrap samples\n n_boots = np.sqrt(n).astype(int)\n \n # Compute bootstrap predictions and validation residuals\n boot_preds, val_res = np.empty(n_boots), []\n \n for b in range(n_boots):\n \n # Reset model weights, not straightforward with tensorflow Recurrent Neural Networks\n for ix, layer in enumerate(model.layers):\n if hasattr(self.model.layers[ix], 'recurrent_initializer'):\n weight_initializer = model.layers[ix].kernel_initializer\n bias_initializer = model.layers[ix].bias_initializer\n recurr_init = model.layers[ix].recurrent_initializer\n\n old_weights, old_biases, old_recurrent = model.layers[ix].get_weights()\n\n model.layers[ix].set_weights([\n weight_initializer(shape=old_weights.shape),\n bias_initializer(shape=old_biases.shape),\n recurr_init(shape=old_recurrent.shape)])\n elif hasattr(model.layers[ix], 'kernel_initializer') and hasattr(model.layers[ix], 'bias_initializer'):\n weight_initializer = model.layers[ix].kernel_initializer\n bias_initializer = model.layers[ix].bias_initializer\n \n old_weights, old_biases = model.layers[ix].get_weights()\n \n model.layers[ix].set_weights([\n weight_initializer(shape=old_weights.shape),\n bias_initializer(shape=len(old_biases))])\n\n \n print(f'Starting bootstrap {b+1}/{n_boots}')\n train_idx = np.random.choice(range(n), size=n, replace=True) # Draw the training indexes with replacement\n val_idx = np.array([idx for idx in range(n) if idx not in train_idx]) # Use the ones left after training as validation data\n \n # Train model with training data, validate with validation data. Early Stopping stops training after validation performance\n # starts to deteriorate.\n model.fit(X_train[train_idx], y_train[train_idx], epochs=100, verbose=0, validation_data=(X_train[val_idx], y_train[val_idx]),\n callbacks=EarlyStopping(monitor='val_loss', patience=20, restore_best_weights=True)) \n \n preds_val = model.predict(X_train[val_idx]) # Validation predictions\n \n val_res.append(y_train[val_idx] - preds_val) # Calculate validation residuals\n boot_preds[b] = model.predict(np.reshape(x0, (1, x0.shape[0], x0.shape[1]))) # Predict with bootstrapped model\n \n boot_preds -= np.mean(boot_preds) # Center bootstrap predictions\n val_res = np.concatenate(val_res, axis=None) # Flattening predictions to a single array\n \n # Take percentiles of training and validation residuals to compare\n val_res = np.percentile(val_res, q=np.arange(100))\n train_res = np.percentile(train_res, q=np.arange(100))\n \n # Estimates for the relationship between bias and variance\n no_inf_err = np.mean(np.abs(np.random.permutation(y_train) - np.random.permutation(preds)))\n gener = np.abs(val_res.mean() - train_res.mean())\n no_inf_val = np.abs(no_inf_err - train_res)\n rel_overfitting_rate = np.mean(gener / no_inf_val)\n w = .632 / (1 - .368*rel_overfitting_rate)\n res = (1-w) * train_res + w*val_res\n \n # Construct interval boundaries\n C = np.array([m + o for m in boot_preds for o in res])\n percs = np.percentile(C, q=np.arange(0, 101))\n \n # Saving results to model folder...\n print(f'Saving results to {new_fold_path}.')\n \n # Writing rows to file.\n with open(rf'{new_fold_path}/pred_ints.csv', 'a') as f:\n write = csv.writer(f)\n write.writerow(percs)\n \n print('----------------------------------------------------------------------------------------------')\n \n \n\nclass CVTuner(kt.engine.tuner.Tuner):\n '''\n Class used for customizing Keras Tuner for cross-validation purposes. Inherits Tuner baseclass.\n By default, 5-fold CV is implemented.\n '''\n \n def run_trial(self, trial, x, y, batch_size=32, epochs=1, patience=20):\n cv = KFold(5)\n val_losses = []\n for train_indices, test_indices in cv.split(x):\n x_train, x_test = x[train_indices], x[test_indices]\n y_train, y_test = y[train_indices], y[test_indices]\n model = self.hypermodel.build(trial.hyperparameters)\n # Define early stopping callback with patience parameter\n stopper = EarlyStopping(monitor='val_loss', patience=patience, restore_best_weights=True)\n model.fit(x_train, y_train, batch_size=batch_size, validation_data=(x_test, y_test), epochs=epochs, callbacks=[stopper])\n val_losses.append(model.evaluate(x_test, y_test))\n self.oracle.update_trial(trial.trial_id, {'val_loss': np.mean(val_losses)})\n self.save_model(trial.trial_id, model)\n \nclass RNN_HyperModel(kt.HyperModel):\n '''\n Class for custom implementation of Keras Tuner HyperModel. Two methods: initiation with parameters and formation of the hypermodel.\n Inherits Keras Tuner HyperModel base class. Is used in fit-method of child classes.\n Inputs: model type as string, input data shape as tuple, unit boundaries as list, layer boundaries as list,\n learning rate values as list, suitable activation functions as a list.\n '''\n\n def __init__(self, mtype, input_shape, units, layers, lr, act):\n self.mtype = mtype\n self.input_shape = input_shape\n self.units = units\n self.layers = layers\n self.lr = lr\n self.act = act\n\n def build(self, hp):\n \n # Create TensorFlow sequential model\n model = Sequential()\n \n # Define hyperparameter search space\n hp_units = hp.Int('units', min_value=self.units[0], max_value=self.units[1], step=10)\n try:\n hp_layers = hp.Int('layers', min_value=self.layers[0], max_value=self.layers[1])\n except IndexError:\n hp_layers = hp.Fixed('layers', value=self.layers[0])\n hp_act = hp.Choice('activation function', values=self.act)\n hp_lr = hp.Choice('learning rate', values=self.lr)\n \n # Select correct implementation of layer formation based on the model type.\n if self.mtype == 'SimpleRNN':\n \n for i in range(hp_layers):\n if i == 0 and max(range(hp_layers)) == 0:\n model.add(SimpleRNN(units=hp_units, activation=hp_act, input_shape=self.input_shape))\n elif i == 0:\n model.add(SimpleRNN(units=hp_units, activation=hp_act, input_shape=self.input_shape, return_sequences=True))\n model.add(BatchNormalization())\n elif i < max(range(hp_layers)):\n model.add(SimpleRNN(units=hp_units, activation=hp_act, return_sequences=True))\n model.add(BatchNormalization())\n else:\n model.add(SimpleRNN(units=hp_units, activation=hp_act))\n elif self.mtype == 'GRU':\n \n for i in range(hp_layers):\n if i == 0 and max(range(hp_layers)) == 0:\n model.add(GRU(units=hp_units, activation=hp_act, input_shape=self.input_shape))\n elif i == 0:\n model.add(GRU(units=hp_units, activation=hp_act, input_shape=self.input_shape, return_sequences=True))\n model.add(BatchNormalization())\n elif i < max(range(hp_layers)):\n model.add(GRU(units=hp_units, activation=hp_act, return_sequences=True))\n model.add(BatchNormalization())\n else:\n model.add(GRU(units=hp_units, activation=hp_act))\n elif self.mtype == 'LSTM':\n \n for i in range(hp_layers):\n if i == 0 and max(range(hp_layers)) == 0:\n model.add(LSTM(units=hp_units, activation=hp_act, input_shape=self.input_shape))\n elif i == 0:\n model.add(LSTM(units=hp_units, activation=hp_act, input_shape=self.input_shape, return_sequences=True))\n model.add(BatchNormalization())\n elif i < max(range(hp_layers)):\n model.add(LSTM(units=hp_units, activation=hp_act, return_sequences=True))\n model.add(BatchNormalization())\n else:\n model.add(LSTM(units=hp_units, activation=hp_act)) \n \n # Add a single output cell with linear activation function.\n model.add(Dense(1))\n \n # Define model optimizer, here Adam is used with learning rate decided with Bayesian Optimization\n opt = Adam(learning_rate=hp_lr)\n \n # Compile the model. Mean Squared Error is used as loss function while Mean Absolute Error is calculated for illustration\n model.compile(loss='mse', optimizer=opt, metrics=['mae'])\n \n return model\n \nclass VanillaRNN(RNN):\n '''\n Conventional Recurrent Neural Network model.\n '''\n def fit(self, X, y, epochs, max_trials, units=[10, 100], act=['tanh', 'relu'], layers=[1, 2], lr=[0.1, 0.01, 0.001]):\n '''\n Fitting method performing hyperparameter optimization. Bayesian Optimization is used for finding correct\n direction in search space, while 5-fold cross-validation is used for measuring predictive performance of\n a model. Saves the model object and the name to class attributes.\n Inputs: Preprocessed input and target data as numpy arrays, maximum epochs for training as int, model compositions to be tested as int,\n hyperparameter search space with fitting default values.\n '''\n tuner = CVTuner(hypermodel=RNN_HyperModel(mtype='SimpleRNN', input_shape=(X.shape[1], X.shape[2]), units=[10,100],\n act=act, layers=layers, lr=lr),\n oracle=kt.oracles.BayesianOptimization(objective='val_loss', max_trials=max_trials),\n directory=os.getcwd(),\n project_name=f'VanillaRNN_{self.quant[0]}_{str(date.today())}', overwrite=True)\n \n tuner.search(X, y, epochs=epochs)\n \n print(tuner.results_summary())\n \n best = tuner.get_best_models(num_models=1)[0]\n self.name = f'VanillaRNN'\n self.model = best\n \nclass MyGRU(RNN):\n '''\n Gated Recurrent Unit variant of RNN. Inherits all attributes and methods from parent class.\n '''\n def fit(self, X, y, epochs, max_trials, units=[10, 100], act=['tanh'], layers=[1, 2], lr=[0.1, 0.01, 0.001]):\n '''\n Fitting method performing hyperparameter optimization. Bayesian Optimization is used for finding correct\n direction in search space, while 5-fold cross-validation is used for measuring predictive performance of\n a model. Saves the model object and the name to class attributes.\n Inputs: Preprocessed input and target data as numpy arrays, maximum epochs for training as int, model compositions to be tested as int,\n hyperparameter search space with fitting default values.\n '''\n tuner = CVTuner(hypermodel=RNN_HyperModel(mtype='GRU', input_shape=(X.shape[1], X.shape[2]), units=[10,100],\n act=act, layers=layers, lr=lr),\n oracle=kt.oracles.BayesianOptimization(objective='val_loss', max_trials=max_trials),\n directory=os.getcwd(),\n project_name=f'GRU_{self.quant[0]}_{str(date.today())}', overwrite=True)\n \n tuner.search(X, y, epochs=epochs)\n \n print(tuner.results_summary())\n \n best = tuner.get_best_models(num_models=1)[0]\n self.name = f'GRU'\n self.model = best\n \nclass MyLSTM(RNN):\n '''\n Long Short Term Memory variant of RNN. Inherits all attributes and methods from parent class.\n '''\n def fit(self, X, y, epochs, max_trials, units=[10, 100], act=['tanh'], layers=[1, 2], lr=[0.1, 0.01, 0.001]):\n '''\n Fitting method performing hyperparameter optimization. Bayesian Optimization is used for finding correct\n direction in search space, while 5-fold cross-validation is used for measuring predictive performance of\n a model. Saves the model object and the name to class attributes.\n Inputs: Preprocessed input and target data as numpy arrays, maximum epochs for training as int, model compositions to be tested as int,\n hyperparameter search space with fitting default values.\n '''\n tuner = CVTuner(hypermodel=RNN_HyperModel(mtype='LSTM', input_shape=(X.shape[1], X.shape[2]), units=[10,100],\n act=act, layers=layers, lr=lr),\n oracle=kt.oracles.BayesianOptimization(objective='val_loss', max_trials=max_trials),\n directory=os.getcwd(),\n project_name=f'LSTM_{self.quant[0]}_{str(date.today())}', overwrite=True)\n \n tuner.search(X, y, epochs=epochs)\n \n print(tuner.results_summary())\n \n best = tuner.get_best_models(num_models=1)[0]\n self.name = f'LSTM'\n self.model = best"
] |
[
[
"matplotlib.pyplot.legend",
"tensorflow.keras.models.load_model",
"pandas.to_datetime",
"numpy.sqrt",
"sklearn.model_selection.KFold",
"pandas.DataFrame",
"matplotlib.pyplot.plot",
"numpy.concatenate",
"numpy.mean",
"sklearn.preprocessing.MinMaxScaler",
"tensorflow.keras.layers.SimpleRNN",
"numpy.reshape",
"numpy.arange",
"numpy.sin",
"tensorflow.keras.layers.GRU",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.keras.models.Sequential",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"tensorflow.keras.layers.Dense",
"matplotlib.pyplot.show",
"numpy.array",
"numpy.abs",
"numpy.cos",
"tensorflow.keras.optimizers.Adam",
"numpy.random.permutation",
"tensorflow.keras.layers.BatchNormalization",
"matplotlib.pyplot.grid",
"tensorflow.keras.layers.LSTM",
"numpy.empty"
]
] |
zhangchenkai/piwise_segmentation
|
[
"3dfecaae32cde9097d9c312e3373a834b0884319"
] |
[
"main.py"
] |
[
"import sys\nfrom argparse import ArgumentParser\n\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom torch.autograd import Variable\nfrom torch.optim import Adam\nfrom torch.utils.data import DataLoader\nfrom torchvision.transforms import Compose, Normalize\nfrom torchvision.transforms import ToTensor, ToPILImage, Resize\n\nsys.path.append(\"/home/nico/PycharmProjects/project-marvel/defect-detection\")\nfrom defect_detection.evaluator.evaluation import save_metrics_on_results\nfrom piwise.criterion import CrossEntropyLoss2d\nfrom piwise.dataset import VOCTrain, VOCTest\nfrom piwise.network import FCN8, FCN16, FCN32, UNet, PSPNet, SegNet\nfrom piwise.transform import ToLabel, Colorize\nfrom piwise.visualize import Dashboard\n\nNUM_CHANNELS = 3\nNUM_CLASSES = 16\n\ncolor_transform = Colorize(n=NUM_CLASSES)\nimage_transform = ToPILImage()\ninput_transform = Compose([\n Resize(256),\n ToTensor(),\n # Normalize([.485, .456, .406], [.229, .224, .225]),\n Normalize([.5, .5, .5], [.5, .5, .5]),\n])\ntarget_transform = Compose([\n Resize(256),\n ToLabel(),\n # Relabel(255, 21),\n])\n\n\ndef train(args, model):\n model.train()\n\n weight = torch.ones(NUM_CLASSES)\n weight[0] = 0.1\n\n loader = DataLoader(VOCTrain(args.datadir, 'train', input_transform, target_transform),\n num_workers=args.num_workers, batch_size=args.batch_size, shuffle=True)\n\n if args.cuda:\n criterion = CrossEntropyLoss2d(weight.cuda())\n else:\n criterion = CrossEntropyLoss2d(weight)\n\n optimizer = Adam(model.parameters(), lr=1e-5)\n # if args.model.startswith('FCN'):\n # optimizer = SGD(model.parameters(), 1e-4, .9, 2e-5)\n # if args.model.startswith('PSP'):\n # optimizer = SGD(model.parameters(), 1e-2, .9, 1e-4)\n # if args.model.startswith('Seg'):\n # optimizer = SGD(model.parameters(), 1e-3, .9)\n\n if args.steps_plot > 0:\n board = Dashboard(args.port)\n\n for epoch in range(1, args.num_epochs + 1):\n epoch_loss = []\n\n for step, (images, labels) in enumerate(loader):\n if args.cuda:\n images = images.cuda()\n labels = labels.cuda()\n\n inputs = Variable(images)\n targets = Variable(labels)\n outputs = model(inputs)\n\n optimizer.zero_grad()\n loss = criterion(outputs, targets[:, 0])\n loss.backward()\n optimizer.step()\n\n epoch_loss.append(loss.item())\n if args.steps_plot > 0 and step % args.steps_plot == 0:\n image = inputs[0].cpu().data\n image[0] = image[0] * .5 + .5\n image[1] = image[1] * .5 + .5\n image[2] = image[2] * .5 + .5\n board.image(image,\n f'input (epoch: {epoch}, step: {step})')\n board.image(color_transform(outputs[0].cpu().max(0, keepdim=True)[1].data),\n f'output (epoch: {epoch}, step: {step})')\n board.image(color_transform(targets[0].cpu().data),\n f'target (epoch: {epoch}, step: {step})')\n if args.steps_loss > 0 and step % args.steps_loss == 0:\n average = sum(epoch_loss) / len(epoch_loss)\n print(f'loss: {average} (epoch: {epoch}, step: {step})')\n if args.steps_save > 0 and step % args.steps_save == 0:\n filename = f'{args.model}-{epoch:03}-{step:04}.pth'\n torch.save(model.state_dict(), filename)\n print(f'save: {filename} (epoch: {epoch}, step: {step})')\n\n\ndef evaluate(args, model):\n save_dir = '/home/nico/Desktop/FCN-8s/'\n os.makedirs(save_dir, exist_ok=True)\n\n model.eval()\n\n all_metrics_list = []\n for p_id in range(1, 16):\n print('=====pattern %d=====' % p_id)\n loader = DataLoader(VOCTest(args.datadir, p_id, input_transform, target_transform),\n num_workers=args.num_workers, batch_size=args.batch_size, shuffle=False)\n\n targets = []\n preds = []\n for step, (image, label) in enumerate(loader):\n if args.cuda:\n image = image.cuda()\n # inputs = Variable(image)\n targets.append(label.numpy().astype(np.uint8))\n outputs = model(image)\n pred = outputs.detach().cpu().numpy().argmax(axis=1)\n preds.append(pred.astype(np.uint8))\n\n targets = np.concatenate(targets).flatten() == p_id\n preds = np.concatenate(preds).flatten() == p_id\n\n print('======start evaluation======')\n metrics_dict = save_metrics_on_results(label_pred=None, label_true=None,\n binary_result=preds, binary_mask=targets,\n model_name='fcn8s-p%d' % p_id, save_dir=save_dir)\n\n all_metrics_list.append(metrics_dict)\n df = pd.DataFrame(all_metrics_list)\n df.to_csv('~/Desktop/all_metrics_of_%s.csv' % 'fcn8s')\n\n\ndef main(args):\n Net = None\n if args.model == 'fcn8':\n Net = FCN8\n if args.model == 'fcn16':\n Net = FCN16\n if args.model == 'fcn32':\n Net = FCN32\n if args.model == 'fcn32':\n Net = FCN32\n if args.model == 'unet':\n Net = UNet\n if args.model == 'pspnet':\n Net = PSPNet\n if args.model == 'segnet':\n Net = SegNet\n assert Net is not None, f'model {args.model} not available'\n\n model = Net(NUM_CLASSES)\n\n if args.cuda:\n model = model.cuda()\n if args.state:\n try:\n model.load_state_dict(torch.load(args.state))\n except AssertionError:\n model.load_state_dict(torch.load(args.state,\n map_location=lambda storage, loc: storage))\n\n if args.mode == 'eval':\n evaluate(args, model)\n if args.mode == 'train':\n train(args, model)\n\n\nif __name__ == '__main__':\n import os\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = '1'\n parser = ArgumentParser()\n parser.add_argument('--cuda', action='store_true')\n parser.add_argument('--model', required=True)\n parser.add_argument('--state')\n\n subparsers = parser.add_subparsers(dest='mode')\n subparsers.required = True\n\n parser_eval = subparsers.add_parser('eval')\n parser_eval.add_argument('--datadir', default='data')\n parser_eval.add_argument('--batch-size', type=int, default=4)\n parser_eval.add_argument('--num-workers', type=int, default=4)\n # parser_eval.add_argument('image')\n # parser_eval.add_argument('label')\n\n parser_train = subparsers.add_parser('train')\n parser_train.add_argument('--datadir', default='data')\n parser_train.add_argument('--port', type=int, default=5000)\n parser_train.add_argument('--num-epochs', type=int, default=32)\n parser_train.add_argument('--num-workers', type=int, default=4)\n parser_train.add_argument('--batch-size', type=int, default=4)\n parser_train.add_argument('--steps-loss', type=int, default=50)\n parser_train.add_argument('--steps-plot', type=int, default=100)\n parser_train.add_argument('--steps-save', type=int, default=500)\n\n main(parser.parse_args())\n"
] |
[
[
"torch.ones",
"torch.load",
"pandas.DataFrame",
"numpy.concatenate",
"torch.autograd.Variable"
]
] |
alar0330/amazon-sagemaker-build-train-deploy
|
[
"b476c5ba5b3bd55a99709e7788079763fa498682"
] |
[
"03_train_model/source_dir/training_debug.py"
] |
[
"import argparse\nimport json\nimport os\nimport random\nimport pandas as pd\nimport glob\nimport pickle as pkl\n\nimport xgboost\n\nfrom smdebug import SaveConfig\nfrom smdebug.xgboost import Hook\n\ndef parse_args():\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--max_depth\", type=int, default=5)\n parser.add_argument(\"--eta\", type=float, default=0.05)\n parser.add_argument(\"--gamma\", type=int, default=4)\n parser.add_argument(\"--min_child_weight\", type=int, default=6)\n parser.add_argument(\"--silent\", type=int, default=0)\n parser.add_argument(\"--objective\", type=str, default=\"reg:logistic\")\n parser.add_argument(\"--num_round\", type=int, default=10)\n \n parser.add_argument('--train', type=str, default=os.environ.get('SM_CHANNEL_TRAIN'))\n parser.add_argument('--validation', type=str, default=os.environ.get('SM_CHANNEL_VALIDATION'))\n\n args = parser.parse_args()\n\n return args\n\ndef main():\n\n args = parse_args()\n train_files_path, validation_files_path = args.train, args.validation\n \n train_features_path = os.path.join(args.train, 'train_features.csv')\n train_labels_path = os.path.join(args.train, 'train_labels.csv')\n \n val_features_path = os.path.join(args.validation, 'val_features.csv')\n val_labels_path = os.path.join(args.validation, 'val_labels.csv')\n \n print('Loading training dataframes...')\n df_train_features = pd.read_csv(train_features_path)\n df_train_labels = pd.read_csv(train_labels_path)\n \n print('Loading validation dataframes...')\n df_val_features = pd.read_csv(val_features_path)\n df_val_labels = pd.read_csv(val_labels_path)\n \n X = df_train_features.values\n y = df_train_labels.values\n \n val_X = df_val_features.values\n val_y = df_val_labels.values\n\n dtrain = xgboost.DMatrix(X, label=y)\n dval = xgboost.DMatrix(val_X, label=val_y)\n \n hook = Hook.create_from_json_file()\n hook.train_data = dtrain\n hook.validation_data = dval\n\n watchlist = [(dtrain, \"train\"), (dval, \"validation\")]\n\n params = {\n \"max_depth\": args.max_depth,\n \"eta\": args.eta,\n \"gamma\": args.gamma,\n \"min_child_weight\": args.min_child_weight,\n \"silent\": args.silent,\n \"objective\": args.objective\n }\n\n bst = xgboost.train(\n params=params,\n dtrain=dtrain,\n evals=watchlist,\n num_boost_round=args.num_round,\n callbacks=[hook])\n \n model_dir = os.environ.get('SM_MODEL_DIR')\n pkl.dump(bst, open(model_dir + '/model.bin', 'wb'))\n\nif __name__ == \"__main__\":\n main()"
] |
[
[
"pandas.read_csv"
]
] |
x1011x/yfinance
|
[
"87a6dc2e9be7b013a11f956eb4593a5595798e2e"
] |
[
"yfinance/ticker.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Yahoo! Finance market data downloader (+fix for Pandas Datareader)\n# https://github.com/ranaroussi/yfinance\n#\n# Copyright 2017-2019 Ran Aroussi\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom __future__ import print_function\n\n# import time as _time\nimport datetime as _datetime\nimport requests as _requests\nimport pandas as _pd\n# import numpy as _np\n\n# import json as _json\n# import re as _re\nfrom collections import namedtuple as _namedtuple\n\nfrom .base import TickerBase\n\n\nclass Ticker(TickerBase):\n\n def __repr__(self):\n return 'yfinance.Ticker object <%s>' % self.ticker\n\n def _download_options(self, date=None, proxy=None):\n if date is None:\n url = \"{}/v7/finance/options/{}\".format(\n self._base_url, self.ticker)\n else:\n url = \"{}/v7/finance/options/{}?date={}\".format(\n self._base_url, self.ticker, date)\n\n # setup proxy in requests format\n if proxy is not None:\n if isinstance(proxy, dict) and \"https\" in proxy:\n proxy = proxy[\"https\"]\n proxy = {\"https\": proxy}\n\n r = _requests.get(url=url, proxies=proxy).json()\n if r['optionChain']['result']:\n for exp in r['optionChain']['result'][0]['expirationDates']:\n self._expirations[_datetime.datetime.utcfromtimestamp(\n exp).strftime('%Y-%m-%d')] = exp\n return r['optionChain']['result'][0]['options'][0]\n return {}\n\n def _options2df(self, opt, tz=None):\n data = _pd.DataFrame(opt).reindex(columns=[\n 'contractSymbol',\n 'lastTradeDate',\n 'strike',\n 'lastPrice',\n 'bid',\n 'ask',\n 'change',\n 'percentChange',\n 'volume',\n 'openInterest',\n 'impliedVolatility',\n 'inTheMoney',\n 'contractSize',\n 'currency'])\n\n data['lastTradeDate'] = _pd.to_datetime(\n data['lastTradeDate'], unit='s')\n if tz is not None:\n data['lastTradeDate'] = data['lastTradeDate'].tz_localize(tz)\n return data\n\n def option_chain(self, date=None, proxy=None, tz=None):\n if date is None:\n options = self._download_options(proxy=proxy)\n else:\n if not self._expirations:\n self._download_options()\n if date not in self._expirations:\n raise ValueError(\n \"Expiration `%s` cannot be found. \"\n \"Available expiration are: [%s]\" % (\n date, ', '.join(self._expirations)))\n date = self._expirations[date]\n options = self._download_options(date, proxy=proxy)\n\n return _namedtuple('Options', ['calls', 'puts'])(**{\n \"calls\": self._options2df(options['calls'], tz=tz),\n \"puts\": self._options2df(options['puts'], tz=tz)\n })\n\n # ------------------------\n\n @property\n def isin(self):\n return self.get_isin()\n\n @property\n def major_holders(self):\n return self.get_major_holders()\n\n @property\n def institutional_holders(self):\n return self.get_institutional_holders()\n\n @property\n def mutualfund_holders(self):\n return self.get_mutualfund_holders()\n\n @property\n def dividends(self):\n return self.get_dividends()\n\n @property\n def splits(self):\n return self.get_splits()\n\n @property\n def actions(self):\n return self.get_actions()\n\n @property\n def info(self):\n return self.get_info()\n\n @property\n def calendar(self):\n return self.get_calendar()\n\n @property\n def recommendations(self):\n return self.get_recommendations()\n\n @property\n def earnings(self):\n return self.get_earnings()\n\n @property\n def quarterly_earnings(self):\n return self.get_earnings(freq='quarterly')\n\n @property\n def financials(self):\n return self.get_financials()\n\n @property\n def annualbasiceps(self):\n return self.get_annualbasiceps()\n\n @property\n def quarterly_financials(self):\n return self.get_financials(freq='quarterly')\n\n @property\n def balance_sheet(self):\n return self.get_balancesheet()\n\n @property\n def quarterly_balance_sheet(self):\n return self.get_balancesheet(freq='quarterly')\n\n @property\n def balancesheet(self):\n return self.get_balancesheet()\n\n @property\n def quarterly_balancesheet(self):\n return self.get_balancesheet(freq='quarterly')\n\n @property\n def cashflow(self):\n return self.get_cashflow()\n\n @property\n def quarterly_cashflow(self):\n return self.get_cashflow(freq='quarterly')\n\n @property\n def sustainability(self):\n return self.get_sustainability()\n\n @property\n def options(self):\n if not self._expirations:\n self._download_options()\n return tuple(self._expirations.keys())\n"
] |
[
[
"pandas.to_datetime",
"pandas.DataFrame"
]
] |
TausifAnsari/PyHub
|
[
"6281e9f515674fb51f0d0862c26ec18020fa7d83"
] |
[
"Graphs/Line Graph.py"
] |
[
"# pip install matplotlib\nimport matplotlib.pyplot as graph\n\nmonths = [\"Jan\",\"Feb\",\"Mar\",\"Apr\",\"May\",\"Jun\",\"Jul\"]\nscores = [100,130,125,90,20,50,70]\n\ngraph.plot(months,scores,color=(0/255,0/255,255/255),marker = \"+\",markersize = 10,markeredgewidth = 2,\nlinewidth = 2,linestyle = \"dotted\", markeredgecolor = (255/255,0,0)) \n# The colour code is in RGB. Make sure you divide it by 255 (values have to be between 0 and 1)\ngraph.title(\"Monthly Analysis\")\ngraph.xlabel(\"Months\")\ngraph.ylabel(\"Stocks Sold\")\ngraph.show()"
] |
[
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
bopopescu/sage-5
|
[
"9d85b34956ca2edd55af307f99c5d3859acd30bf"
] |
[
"src/sage/calculus/desolvers.py"
] |
[
"r\"\"\"\nSolving ordinary differential equations\n\nThis file contains functions useful for solving differential equations\nwhich occur commonly in a 1st semester differential equations\ncourse. For another numerical solver see :meth:`ode_solver` function\nand optional package Octave.\n\nCommands:\n\n- ``desolve`` - Computes the \"general solution\" to a 1st or 2nd order\n ODE via Maxima.\n\n- ``desolve_laplace`` - Solves an ODE using laplace transforms via\n Maxima. Initials conditions are optional.\n\n- ``desolve_system`` - Solves any size system of 1st order odes using\n Maxima. Initials conditions are optional.\n\n- ``desolve_rk4`` - Solves numerically IVP for one first order\n equation, returns list of points or plot\n\n- ``desolve_system_rk4`` - Solves numerically IVP for system of first\n order equations, returns list of points\n\n- ``desolve_odeint`` - Solves numerically a system of first-order ordinary\n differential equations using ``odeint`` from scipy.integrate module.\n\n- ``eulers_method`` - Approximate solution to a 1st order DE,\n presented as a table.\n\n- ``eulers_method_2x2`` - Approximate solution to a 1st order system\n of DEs, presented as a table.\n\n- ``eulers_method_2x2_plot`` - Plots the sequence of points obtained\n from Euler's method.\n\nAUTHORS:\n\n- David Joyner (3-2006) - Initial version of functions\n\n- Marshall Hampton (7-2007) - Creation of Python module and testing\n\n- Robert Bradshaw (10-2008) - Some interface cleanup.\n\n- Robert Marik (10-2009) - Some bugfixes and enhancements\n\n\"\"\"\n\n##########################################################################\n# Copyright (C) 2006 David Joyner <[email protected]>, Marshall Hampton,\n# Robert Marik <[email protected]>\n#\n# Distributed under the terms of the GNU General Public License (GPL):\n#\n# http://www.gnu.org/licenses/\n##########################################################################\n\nfrom sage.interfaces.maxima import Maxima\nfrom sage.plot.all import line\nfrom sage.symbolic.expression import is_SymbolicEquation\nfrom sage.symbolic.ring import is_SymbolicVariable\nfrom sage.calculus.functional import diff\nfrom sage.misc.decorators import rename_keyword\n\nmaxima = Maxima()\n\ndef desolve(de, dvar, ics=None, ivar=None, show_method=False, contrib_ode=False):\n r\"\"\"\n Solves a 1st or 2nd order linear ODE via maxima. Including IVP and BVP.\n\n *Use* ``desolve? <tab>`` *if the output in truncated in notebook.*\n\n INPUT:\n\n - ``de`` - an expression or equation representing the ODE\n\n - ``dvar`` - the dependent variable (hereafter called ``y``)\n\n - ``ics`` - (optional) the initial or boundary conditions\n\n - for a first-order equation, specify the initial ``x`` and ``y``\n\n - for a second-order equation, specify the initial ``x``, ``y``,\n and ``dy/dx``, i.e. write `[x_0, y(x_0), y'(x_0)]`\n\n - for a second-order boundary solution, specify initial and\n final ``x`` and ``y`` boundary conditions, i.e. write `[x_0, y(x_0), x_1, y(x_1)]`.\n\n - gives an error if the solution is not SymbolicEquation (as happens for\n example for Clairaut equation)\n\n - ``ivar`` - (optional) the independent variable (hereafter called\n x), which must be specified if there is more than one\n independent variable in the equation.\n\n - ``show_method`` - (optional) if true, then Sage returns pair\n ``[solution, method]``, where method is the string describing\n method which has been used to get solution (Maxima uses the\n following order for first order equations: linear, separable,\n exact (including exact with integrating factor), homogeneous,\n bernoulli, generalized homogeneous) - use carefully in class,\n see below for the example of the equation which is separable but\n this property is not recognized by Maxima and equation is solved\n as exact.\n\n - ``contrib_ode`` - (optional) if true, desolve allows to solve\n clairaut, lagrange, riccati and some other equations. May take\n a long time and thus turned off by default. Initial conditions\n can be used only if the result is one SymbolicEquation (does not\n contain singular solution, for example)\n\n OUTPUT:\n\n In most cases returns SymbolicEquation which defines the solution\n implicitly. If the result is in the form y(x)=... (happens for\n linear eqs.), returns the right-hand side only. The possible\n constant solutions of separable ODE's are omitted.\n\n\n EXAMPLES::\n\n sage: x = var('x')\n sage: y = function('y', x)\n sage: desolve(diff(y,x) + y - 1, y)\n (c + e^x)*e^(-x)\n\n ::\n\n sage: f = desolve(diff(y,x) + y - 1, y, ics=[10,2]); f\n (e^10 + e^x)*e^(-x)\n\n ::\n\n sage: plot(f)\n\n We can also solve second-order differential equations.::\n\n sage: x = var('x')\n sage: y = function('y', x)\n sage: de = diff(y,x,2) - y == x\n sage: desolve(de, y)\n k2*e^(-x) + k1*e^x - x\n\n\n ::\n\n sage: f = desolve(de, y, [10,2,1]); f\n -x + 7*e^(x - 10) + 5*e^(-x + 10)\n\n ::\n\n sage: f(x=10)\n 2\n\n ::\n\n sage: diff(f,x)(x=10)\n 1\n\n ::\n\n sage: de = diff(y,x,2) + y == 0\n sage: desolve(de, y)\n k2*cos(x) + k1*sin(x)\n\n ::\n\n sage: desolve(de, y, [0,1,pi/2,4])\n cos(x) + 4*sin(x)\n\n ::\n\n sage: desolve(y*diff(y,x)+sin(x)==0,y)\n -1/2*y(x)^2 == c - cos(x)\n\n Clairot equation: general and singular solutions::\n\n sage: desolve(diff(y,x)^2+x*diff(y,x)-y==0,y,contrib_ode=True,show_method=True)\n [[y(x) == c^2 + c*x, y(x) == -1/4*x^2], 'clairault']\n\n For equations involving more variables we specify independent variable::\n\n sage: a,b,c,n=var('a b c n')\n sage: desolve(x^2*diff(y,x)==a+b*x^n+c*x^2*y^2,y,ivar=x,contrib_ode=True)\n [[y(x) == 0, (b*x^(n - 2) + a/x^2)*c^2*u == 0]]\n\n ::\n\n sage: desolve(x^2*diff(y,x)==a+b*x^n+c*x^2*y^2,y,ivar=x,contrib_ode=True,show_method=True)\n [[[y(x) == 0, (b*x^(n - 2) + a/x^2)*c^2*u == 0]], 'riccati']\n\n\n Higher orded, not involving independent variable::\n\n sage: desolve(diff(y,x,2)+y*(diff(y,x,1))^3==0,y).expand()\n 1/6*y(x)^3 + k1*y(x) == k2 + x\n\n ::\n\n sage: desolve(diff(y,x,2)+y*(diff(y,x,1))^3==0,y,[0,1,1,3]).expand()\n 1/6*y(x)^3 - 5/3*y(x) == x - 3/2\n\n ::\n\n sage: desolve(diff(y,x,2)+y*(diff(y,x,1))^3==0,y,[0,1,1,3],show_method=True)\n [1/6*y(x)^3 - 5/3*y(x) == x - 3/2, 'freeofx']\n\n Separable equations - Sage returns solution in implicit form::\n\n sage: desolve(diff(y,x)*sin(y) == cos(x),y)\n -cos(y(x)) == c + sin(x)\n\n ::\n\n sage: desolve(diff(y,x)*sin(y) == cos(x),y,show_method=True)\n [-cos(y(x)) == c + sin(x), 'separable']\n\n ::\n\n sage: desolve(diff(y,x)*sin(y) == cos(x),y,[pi/2,1])\n -cos(y(x)) == -cos(1) + sin(x) - 1\n\n Linear equation - Sage returns the expression on the right hand side only::\n\n sage: desolve(diff(y,x)+(y) == cos(x),y)\n 1/2*((cos(x) + sin(x))*e^x + 2*c)*e^(-x)\n\n ::\n\n sage: desolve(diff(y,x)+(y) == cos(x),y,show_method=True)\n [1/2*((cos(x) + sin(x))*e^x + 2*c)*e^(-x), 'linear']\n\n ::\n\n sage: desolve(diff(y,x)+(y) == cos(x),y,[0,1])\n 1/2*(cos(x)*e^x + e^x*sin(x) + 1)*e^(-x)\n\n This ODE with separated variables is solved as\n exact. Explanation - factor does not split `e^{x-y}` in Maxima\n into `e^{x}e^{y}`::\n\n sage: desolve(diff(y,x)==exp(x-y),y,show_method=True)\n [-e^x + e^y(x) == c, 'exact']\n\n You can solve Bessel equations. You can also use initial\n conditions, but you cannot put (sometimes desired) initial\n condition at x=0, since this point is singlar point of the\n equation. Anyway, if the solution should be bounded at x=0, then\n k2=0.::\n\n sage: desolve(x^2*diff(y,x,x)+x*diff(y,x)+(x^2-4)*y==0,y)\n k1*bessel_J(2, x) + k2*bessel_Y(2, x)\n\n Difficult ODE produces error::\n\n sage: desolve(sqrt(y)*diff(y,x)+e^(y)+cos(x)-sin(x+y)==0,y) # not tested\n Traceback (click to the left for traceback)\n ...\n NotImplementedError, \"Maxima was unable to solve this ODE. Consider to set option contrib_ode to True.\"\n\n Difficult ODE produces error - moreover, takes a long time ::\n\n sage: desolve(sqrt(y)*diff(y,x)+e^(y)+cos(x)-sin(x+y)==0,y,contrib_ode=True) # not tested\n\n Some more types od ODE's::\n\n sage: desolve(x*diff(y,x)^2-(1+x*y)*diff(y,x)+y==0,y,contrib_ode=True,show_method=True)\n [[y(x) == c + log(x), y(x) == c*e^x], 'factor']\n\n ::\n\n sage: desolve(diff(y,x)==(x+y)^2,y,contrib_ode=True,show_method=True)\n [[[x == c - arctan(sqrt(t)), y(x) == -x - sqrt(t)], [x == c + arctan(sqrt(t)), y(x) == -x + sqrt(t)]], 'lagrange']\n\n These two examples produce error (as expected, Maxima 5.18 cannot\n solve equations from initial conditions). Current Maxima 5.18\n returns false answer in this case!::\n\n sage: desolve(diff(y,x,2)+y*(diff(y,x,1))^3==0,y,[0,1,2]).expand() # not tested\n Traceback (click to the left for traceback)\n ...\n NotImplementedError, \"Maxima was unable to solve this ODE. Consider to set option contrib_ode to True.\"\n\n ::\n\n sage: desolve(diff(y,x,2)+y*(diff(y,x,1))^3==0,y,[0,1,2],show_method=True) # not tested\n Traceback (click to the left for traceback)\n ...\n NotImplementedError, \"Maxima was unable to solve this ODE. Consider to set option contrib_ode to True.\"\n\n Second order linear ODE::\n\n sage: desolve(diff(y,x,2)+2*diff(y,x)+y == cos(x),y)\n (k2*x + k1)*e^(-x) + 1/2*sin(x)\n\n ::\n\n sage: desolve(diff(y,x,2)+2*diff(y,x)+y == cos(x),y,show_method=True)\n [(k2*x + k1)*e^(-x) + 1/2*sin(x), 'variationofparameters']\n\n ::\n\n sage: desolve(diff(y,x,2)+2*diff(y,x)+y == cos(x),y,[0,3,1])\n 1/2*(7*x + 6)*e^(-x) + 1/2*sin(x)\n\n ::\n\n sage: desolve(diff(y,x,2)+2*diff(y,x)+y == cos(x),y,[0,3,1],show_method=True)\n [1/2*(7*x + 6)*e^(-x) + 1/2*sin(x), 'variationofparameters']\n\n ::\n\n sage: desolve(diff(y,x,2)+2*diff(y,x)+y == cos(x),y,[0,3,pi/2,2])\n 3*(x*(e^(1/2*pi) - 2)/pi + 1)*e^(-x) + 1/2*sin(x)\n\n ::\n\n sage: desolve(diff(y,x,2)+2*diff(y,x)+y == cos(x),y,[0,3,pi/2,2],show_method=True)\n [3*(x*(e^(1/2*pi) - 2)/pi + 1)*e^(-x) + 1/2*sin(x), 'variationofparameters']\n\n ::\n\n sage: desolve(diff(y,x,2)+2*diff(y,x)+y == 0,y)\n (k2*x + k1)*e^(-x)\n\n ::\n\n sage: desolve(diff(y,x,2)+2*diff(y,x)+y == 0,y,show_method=True)\n [(k2*x + k1)*e^(-x), 'constcoeff']\n\n ::\n\n sage: desolve(diff(y,x,2)+2*diff(y,x)+y == 0,y,[0,3,1])\n (4*x + 3)*e^(-x)\n\n ::\n\n sage: desolve(diff(y,x,2)+2*diff(y,x)+y == 0,y,[0,3,1],show_method=True)\n [(4*x + 3)*e^(-x), 'constcoeff']\n\n ::\n\n sage: desolve(diff(y,x,2)+2*diff(y,x)+y == 0,y,[0,3,pi/2,2])\n (2*x*(2*e^(1/2*pi) - 3)/pi + 3)*e^(-x)\n\n ::\n\n sage: desolve(diff(y,x,2)+2*diff(y,x)+y == 0,y,[0,3,pi/2,2],show_method=True)\n [(2*x*(2*e^(1/2*pi) - 3)/pi + 3)*e^(-x), 'constcoeff']\n\n TESTS:\n\n Trac #9961 fixed (allow assumptions on the dependent variable in desolve)::\n\n sage: y=function('y',x); assume(x>0); assume(y>0)\n sage: sage.calculus.calculus.maxima('domain:real') # needed since Maxima 5.26.0 to get the answer as below\n real\n sage: desolve(x*diff(y,x)-x*sqrt(y^2+x^2)-y == 0, y, contrib_ode=True)\n [x - arcsinh(y(x)/x) == c]\n\n Trac #10682 updated Maxima to 5.26, and it started to show a different\n solution in the complex domain for the ODE above::\n\n sage: sage.calculus.calculus.maxima('domain:complex') # back to the default complex domain\n complex\n sage: desolve(x*diff(y,x)-x*sqrt(y^2+x^2)-y == 0, y, contrib_ode=True)\n [1/2*(2*x^2*sqrt(x^(-2)) - 2*x*sqrt(x^(-2))*arcsinh(y(x)/sqrt(x^2)) -\n 2*x*sqrt(x^(-2))*arcsinh(y(x)^2/(x*sqrt(y(x)^2))) +\n log(4*(2*x^2*sqrt((x^2*y(x)^2 + y(x)^4)/x^2)*sqrt(x^(-2)) + x^2 +\n 2*y(x)^2)/x^2))/(x*sqrt(x^(-2))) == c]\n\n Trac #6479 fixed::\n\n sage: x = var('x')\n sage: y = function('y', x)\n sage: desolve( diff(y,x,x) == 0, y, [0,0,1])\n x\n\n ::\n\n sage: desolve( diff(y,x,x) == 0, y, [0,1,1])\n x + 1\n\n Trac #9835 fixed::\n\n sage: x = var('x')\n sage: y = function('y', x)\n sage: desolve(diff(y,x,2)+y*(1-y^2)==0,y,[0,-1,1,1])\n Traceback (most recent call last):\n ...\n NotImplementedError: Unable to use initial condition for this equation (freeofx).\n\n Trac #8931 fixed::\n\n sage: x=var('x'); f=function('f',x); k=var('k'); assume(k>0)\n sage: desolve(diff(f,x,2)/f==k,f,ivar=x)\n k1*e^(sqrt(k)*x) + k2*e^(-sqrt(k)*x)\n\n\n AUTHORS:\n\n - David Joyner (1-2006)\n\n - Robert Bradshaw (10-2008)\n\n - Robert Marik (10-2009)\n\n \"\"\"\n if is_SymbolicEquation(de):\n de = de.lhs() - de.rhs()\n if is_SymbolicVariable(dvar):\n raise ValueError(\"You have to declare dependent variable as a function, eg. y=function('y',x)\")\n # for backwards compatibility\n if isinstance(dvar, list):\n dvar, ivar = dvar\n elif ivar is None:\n ivars = de.variables()\n ivars = [t for t in ivars if t is not dvar]\n if len(ivars) != 1:\n raise ValueError(\"Unable to determine independent variable, please specify.\")\n ivar = ivars[0]\n def sanitize_var(exprs):\n return exprs.replace(\"'\"+dvar_str+\"(\"+ivar_str+\")\",dvar_str)\n de00 = de._maxima_()\n P = de00.parent()\n dvar_str=P(dvar.operator()).str()\n ivar_str=P(ivar).str()\n de00 = de00.str()\n de0 = sanitize_var(de00)\n ode_solver=\"ode2\"\n cmd=\"(TEMP:%s(%s,%s,%s), if TEMP=false then TEMP else substitute(%s=%s(%s),TEMP))\"%(ode_solver,de0,dvar_str,ivar_str,dvar_str,dvar_str,ivar_str)\n # we produce string like this\n # ode2('diff(y,x,2)+2*'diff(y,x,1)+y-cos(x),y(x),x)\n soln = P(cmd)\n\n if str(soln).strip() == 'false':\n if contrib_ode:\n ode_solver=\"contrib_ode\"\n P(\"load('contrib_ode)\")\n cmd=\"(TEMP:%s(%s,%s,%s), if TEMP=false then TEMP else substitute(%s=%s(%s),TEMP))\"%(ode_solver,de0,dvar_str,ivar_str,dvar_str,dvar_str,ivar_str)\n # we produce string like this\n # (TEMP:contrib_ode(x*('diff(y,x,1))^2-(x*y+1)*'diff(y,x,1)+y,y,x), if TEMP=false then TEMP else substitute(y=y(x),TEMP))\n soln = P(cmd)\n if str(soln).strip() == 'false':\n raise NotImplementedError(\"Maxima was unable to solve this ODE.\")\n else:\n raise NotImplementedError(\"Maxima was unable to solve this ODE. Consider to set option contrib_ode to True.\")\n\n if show_method:\n maxima_method=P(\"method\")\n\n if (ics is not None):\n if not is_SymbolicEquation(soln.sage()):\n if not show_method:\n maxima_method=P(\"method\")\n raise NotImplementedError(\"Unable to use initial condition for this equation (%s).\"%(str(maxima_method).strip()))\n if len(ics) == 2:\n tempic=(ivar==ics[0])._maxima_().str()\n tempic=tempic+\",\"+(dvar==ics[1])._maxima_().str()\n cmd=\"(TEMP:ic1(%s(%s,%s,%s),%s),substitute(%s=%s(%s),TEMP))\"%(ode_solver,de00,dvar_str,ivar_str,tempic,dvar_str,dvar_str,ivar_str)\n cmd=sanitize_var(cmd)\n # we produce string like this\n # (TEMP:ic2(ode2('diff(y,x,2)+2*'diff(y,x,1)+y-cos(x),y,x),x=0,y=3,'diff(y,x)=1),substitute(y=y(x),TEMP))\n soln=P(cmd)\n if len(ics) == 3:\n #fixed ic2 command from Maxima - we have to ensure that %k1, %k2 do not depend on variables, should be removed when fixed in Maxima\n P(\"ic2_sage(soln,xa,ya,dya):=block([programmode:true,backsubst:true,singsolve:true,temp,%k2,%k1,TEMP_k], \\\n noteqn(xa), noteqn(ya), noteqn(dya), boundtest('%k1,%k1), boundtest('%k2,%k2), \\\n temp: lhs(soln) - rhs(soln), \\\n TEMP_k:solve([subst([xa,ya],soln), subst([dya,xa], lhs(dya)=-subst(0,lhs(dya),diff(temp,lhs(xa)))/diff(temp,lhs(ya)))],[%k1,%k2]), \\\n if not freeof(lhs(ya),TEMP_k) or not freeof(lhs(xa),TEMP_k) then return (false), \\\n temp: maplist(lambda([zz], subst(zz,soln)), TEMP_k), \\\n if length(temp)=1 then return(first(temp)) else return(temp))\")\n tempic=P(ivar==ics[0]).str()\n tempic=tempic+\",\"+P(dvar==ics[1]).str()\n tempic=tempic+\",'diff(\"+dvar_str+\",\"+ivar_str+\")=\"+P(ics[2]).str()\n cmd=\"(TEMP:ic2_sage(%s(%s,%s,%s),%s),substitute(%s=%s(%s),TEMP))\"%(ode_solver,de00,dvar_str,ivar_str,tempic,dvar_str,dvar_str,ivar_str)\n cmd=sanitize_var(cmd)\n # we produce string like this\n # (TEMP:ic2(ode2('diff(y,x,2)+2*'diff(y,x,1)+y-cos(x),y,x),x=0,y=3,'diff(y,x)=1),substitute(y=y(x),TEMP))\n soln=P(cmd)\n if str(soln).strip() == 'false':\n raise NotImplementedError(\"Maxima was unable to solve this IVP. Remove the initial condition to get the general solution.\")\n if len(ics) == 4:\n #fixed bc2 command from Maxima - we have to ensure that %k1, %k2 do not depend on variables, should be removed when fixed in Maxima\n P(\"bc2_sage(soln,xa,ya,xb,yb):=block([programmode:true,backsubst:true,singsolve:true,temp,%k1,%k2,TEMP_k], \\\n noteqn(xa), noteqn(ya), noteqn(xb), noteqn(yb), boundtest('%k1,%k1), boundtest('%k2,%k2), \\\n TEMP_k:solve([subst([xa,ya],soln), subst([xb,yb],soln)], [%k1,%k2]), \\\n if not freeof(lhs(ya),TEMP_k) or not freeof(lhs(xa),TEMP_k) then return (false), \\\n temp: maplist(lambda([zz], subst(zz,soln)),TEMP_k), \\\n if length(temp)=1 then return(first(temp)) else return(temp))\")\n cmd=\"bc2_sage(%s(%s,%s,%s),%s,%s=%s,%s,%s=%s)\"%(ode_solver,de00,dvar_str,ivar_str,P(ivar==ics[0]).str(),dvar_str,P(ics[1]).str(),P(ivar==ics[2]).str(),dvar_str,P(ics[3]).str())\n cmd=\"(TEMP:%s,substitute(%s=%s(%s),TEMP))\"%(cmd,dvar_str,dvar_str,ivar_str)\n cmd=sanitize_var(cmd)\n # we produce string like this\n # (TEMP:bc2(ode2('diff(y,x,2)+2*'diff(y,x,1)+y-cos(x),y,x),x=0,y=3,x=%pi/2,y=2),substitute(y=y(x),TEMP))\n soln=P(cmd)\n if str(soln).strip() == 'false':\n raise NotImplementedError(\"Maxima was unable to solve this BVP. Remove the initial condition to get the general solution.\")\n\n soln=soln.sage()\n if is_SymbolicEquation(soln) and soln.lhs() == dvar:\n # Remark: Here we do not check that the right hand side does not depend on dvar.\n # This probably will not hapen for soutions obtained via ode2, anyway.\n soln = soln.rhs()\n if show_method:\n return [soln,maxima_method.str()]\n else:\n return soln\n\n\n#def desolve_laplace2(de,vars,ics=None):\n## \"\"\"\n## Solves an ODE using laplace transforms via maxima. Initial conditions\n## are optional.\n\n## INPUT:\n## de -- a lambda expression representing the ODE\n## (eg, de = \"diff(f(x),x,2)=diff(f(x),x)+sin(x)\")\n## vars -- a list of strings representing the variables\n## (eg, vars = [\"x\",\"f\"], if x is the independent\n## variable and f is the dependent variable)\n## ics -- a list of numbers representing initial conditions,\n## with symbols allowed which are represented by strings\n## (eg, f(0)=1, f'(0)=2 is ics = [0,1,2])\n\n## EXAMPLES:\n## sage: from sage.calculus.desolvers import desolve_laplace\n## sage: x = var('x')\n## sage: f = function('f', x)\n## sage: de = lambda y: diff(y,x,x) - 2*diff(y,x) + y\n## sage: desolve_laplace(de(f(x)),[f,x])\n## #x*%e^x*(?%at('diff('f(x),x,1),x=0))-'f(0)*x*%e^x+'f(0)*%e^x\n## sage: desolve_laplace(de(f(x)),[f,x],[0,1,2]) ## IC option does not work\n## #x*%e^x*(?%at('diff('f(x),x,1),x=0))-'f(0)*x*%e^x+'f(0)*%e^x\n\n## AUTHOR: David Joyner (1st version 1-2006, 8-2007)\n## \"\"\"\n# ######## this method seems reasonable but doesn't work for some reason\n# name0 = vars[0]._repr_()[0:(len(vars[0]._repr_())-2-len(str(vars[1])))]\n# name1 = str(vars[1])\n# #maxima(\"de:\"+de+\";\")\n# if ics!=None:\n# ic0 = maxima(\"ic:\"+str(vars[1])+\"=\"+str(ics[0]))\n# d = len(ics)\n# for i in range(d-1):\n# maxima(vars[0](vars[1])).diff(vars[1],i).atvalue(ic0,ics[i+1])\n# de0 = de._maxima_()\n# #cmd = \"desolve(\"+de+\",\"+vars[1]+\"(\"+vars[0]+\"));\"\n# #return maxima.eval(cmd)\n# return de0.desolve(vars[0]).rhs()\n\n\ndef desolve_laplace(de, dvar, ics=None, ivar=None):\n \"\"\"\n Solves an ODE using laplace transforms. Initials conditions are optional.\n\n INPUT:\n\n - ``de`` - a lambda expression representing the ODE (eg, de =\n diff(y,x,2) == diff(y,x)+sin(x))\n\n - ``dvar`` - the dependent variable (eg y)\n\n - ``ivar`` - (optional) the independent variable (hereafter called\n x), which must be specified if there is more than one\n independent variable in the equation.\n\n - ``ics`` - a list of numbers representing initial conditions, (eg,\n f(0)=1, f'(0)=2 is ics = [0,1,2])\n\n OUTPUT:\n\n Solution of the ODE as symbolic expression\n\n EXAMPLES::\n\n sage: u=function('u',x)\n sage: eq = diff(u,x) - exp(-x) - u == 0\n sage: desolve_laplace(eq,u)\n 1/2*(2*u(0) + 1)*e^x - 1/2*e^(-x)\n\n We can use initial conditions::\n\n sage: desolve_laplace(eq,u,ics=[0,3])\n -1/2*e^(-x) + 7/2*e^x\n\n The initial conditions do not persist in the system (as they persisted\n in previous versions)::\n\n sage: desolve_laplace(eq,u)\n 1/2*(2*u(0) + 1)*e^x - 1/2*e^(-x)\n\n ::\n\n sage: f=function('f', x)\n sage: eq = diff(f,x) + f == 0\n sage: desolve_laplace(eq,f,[0,1])\n e^(-x)\n\n ::\n\n sage: x = var('x')\n sage: f = function('f', x)\n sage: de = diff(f,x,x) - 2*diff(f,x) + f\n sage: desolve_laplace(de,f)\n -x*e^x*f(0) + x*e^x*D[0](f)(0) + e^x*f(0)\n\n ::\n\n sage: desolve_laplace(de,f,ics=[0,1,2])\n x*e^x + e^x\n\n TESTS:\n\n Trac #4839 fixed::\n\n sage: t=var('t')\n sage: x=function('x', t)\n sage: soln=desolve_laplace(diff(x,t)+x==1, x, ics=[0,2])\n sage: soln\n e^(-t) + 1\n\n ::\n\n sage: soln(t=3)\n e^(-3) + 1\n\n AUTHORS:\n\n - David Joyner (1-2006,8-2007)\n\n - Robert Marik (10-2009)\n \"\"\"\n #This is the original code from David Joyner (inputs and outputs strings)\n #maxima(\"de:\"+de._repr_()+\"=0;\")\n #if ics!=None:\n # d = len(ics)\n # for i in range(0,d-1):\n # ic = \"atvalue(diff(\"+vars[1]+\"(\"+vars[0]+\"),\"+str(vars[0])+\",\"+str(i)+\"),\"+str(vars[0])+\"=\"+str(ics[0])+\",\"+str(ics[1+i])+\")\"\n # maxima(ic)\n #\n #cmd = \"desolve(\"+de._repr_()+\",\"+vars[1]+\"(\"+vars[0]+\"));\"\n #return maxima(cmd).rhs()._maxima_init_()\n\n ## verbatim copy from desolve - begin\n if is_SymbolicEquation(de):\n de = de.lhs() - de.rhs()\n if is_SymbolicVariable(dvar):\n raise ValueError(\"You have to declare dependent variable as a function, eg. y=function('y',x)\")\n # for backwards compatibility\n if isinstance(dvar, list):\n dvar, ivar = dvar\n elif ivar is None:\n ivars = de.variables()\n ivars = [t for t in ivars if t != dvar]\n if len(ivars) != 1:\n raise ValueError(\"Unable to determine independent variable, please specify.\")\n ivar = ivars[0]\n ## verbatim copy from desolve - end\n\n def sanitize_var(exprs): # 'y(x) -> y(x)\n return exprs.replace(\"'\"+str(dvar),str(dvar))\n de0=de._maxima_()\n P = de0.parent()\n cmd = sanitize_var(\"desolve(\"+de0.str()+\",\"+str(dvar)+\")\")\n soln=P(cmd).rhs()\n if str(soln).strip() == 'false':\n raise NotImplementedError(\"Maxima was unable to solve this ODE.\")\n soln=soln.sage()\n if ics!=None:\n d = len(ics)\n for i in range(0,d-1):\n soln=eval('soln.substitute(diff(dvar,ivar,i)('+str(ivar)+'=ics[0])==ics[i+1])')\n return soln\n\n\ndef desolve_system(des, vars, ics=None, ivar=None):\n \"\"\"\n Solves any size system of 1st order ODE's. Initials conditions are optional.\n\n Onedimensional systems are passed to :meth:`desolve_laplace`.\n\n INPUT:\n\n - ``des`` - list of ODEs\n\n - ``vars`` - list of dependent variables\n\n - ``ics`` - (optional) list of initial values for ivar and vars\n\n - ``ivar`` - (optional) the independent variable, which must be\n specified if there is more than one independent variable in the\n equation.\n\n EXAMPLES::\n\n sage: t = var('t')\n sage: x = function('x', t)\n sage: y = function('y', t)\n sage: de1 = diff(x,t) + y - 1 == 0\n sage: de2 = diff(y,t) - x + 1 == 0\n sage: desolve_system([de1, de2], [x,y])\n [x(t) == (x(0) - 1)*cos(t) - (y(0) - 1)*sin(t) + 1,\n y(t) == (y(0) - 1)*cos(t) + (x(0) - 1)*sin(t) + 1]\n\n Now we give some initial conditions::\n\n sage: sol = desolve_system([de1, de2], [x,y], ics=[0,1,2]); sol\n [x(t) == -sin(t) + 1, y(t) == cos(t) + 1]\n\n ::\n\n sage: solnx, solny = sol[0].rhs(), sol[1].rhs()\n sage: plot([solnx,solny],(0,1)) # not tested\n sage: parametric_plot((solnx,solny),(0,1)) # not tested\n\n TESTS:\n\n Trac #9823 fixed::\n\n sage: t = var('t')\n sage: x = function('x', t)\n sage: de1 = diff(x,t) + 1 == 0\n sage: desolve_system([de1], [x])\n -t + x(0)\n\n AUTHORS:\n\n - Robert Bradshaw (10-2008)\n \"\"\"\n if len(des)==1:\n return desolve_laplace(des[0], vars[0], ics=ics, ivar=ivar)\n ivars = set([])\n for i, de in enumerate(des):\n if not is_SymbolicEquation(de):\n des[i] = de == 0\n ivars = ivars.union(set(de.variables()))\n if ivar is None:\n ivars = ivars - set(vars)\n if len(ivars) != 1:\n raise ValueError(\"Unable to determine independent variable, please specify.\")\n ivar = list(ivars)[0]\n dvars = [v._maxima_() for v in vars]\n if ics is not None:\n ivar_ic = ics[0]\n for dvar, ic in zip(dvars, ics[1:]):\n dvar.atvalue(ivar==ivar_ic, ic)\n soln = dvars[0].parent().desolve(des, dvars)\n if str(soln).strip() == 'false':\n raise NotImplementedError(\"Maxima was unable to solve this system.\")\n soln = list(soln)\n for i, sol in enumerate(soln):\n soln[i] = sol.sage()\n if ics is not None:\n ivar_ic = ics[0]\n for dvar, ic in zip(dvars, ics[:1]):\n dvar.atvalue(ivar==ivar_ic, dvar)\n return soln\n\n\ndef desolve_system_strings(des,vars,ics=None):\n r\"\"\"\n Solves any size system of 1st order ODE's. Initials conditions are optional.\n\n This function is obsolete, use desolve_system.\n\n INPUT:\n\n - ``de`` - a list of strings representing the ODEs in maxima\n notation (eg, de = \"diff(f(x),x,2)=diff(f(x),x)+sin(x)\")\n\n - ``vars`` - a list of strings representing the variables (eg,\n vars = [\"s\",\"x\",\"y\"], where s is the independent variable and\n x,y the dependent variables)\n\n - ``ics`` - a list of numbers representing initial conditions\n (eg, x(0)=1, y(0)=2 is ics = [0,1,2])\n\n WARNING:\n\n The given ics sets the initial values of the dependent vars in\n maxima, so subsequent ODEs involving these variables will have\n these initial conditions automatically imposed.\n\n EXAMPLES::\n\n sage: from sage.calculus.desolvers import desolve_system_strings\n sage: s = var('s')\n sage: function('x', s)\n x(s)\n\n ::\n\n sage: function('y', s)\n y(s)\n\n ::\n\n sage: de1 = lambda z: diff(z[0],s) + z[1] - 1\n sage: de2 = lambda z: diff(z[1],s) - z[0] + 1\n sage: des = [de1([x(s),y(s)]),de2([x(s),y(s)])]\n sage: vars = [\"s\",\"x\",\"y\"]\n sage: desolve_system_strings(des,vars)\n [\"(1-'y(0))*sin(s)+('x(0)-1)*cos(s)+1\", \"('x(0)-1)*sin(s)+('y(0)-1)*cos(s)+1\"]\n\n ::\n\n sage: ics = [0,1,-1]\n sage: soln = desolve_system_strings(des,vars,ics); soln\n ['2*sin(s)+1', '1-2*cos(s)']\n\n ::\n\n sage: solnx, solny = map(SR, soln)\n sage: RR(solnx(s=3))\n 1.28224001611973\n\n ::\n\n sage: P1 = plot([solnx,solny],(0,1))\n sage: P2 = parametric_plot((solnx,solny),(0,1))\n\n Now type show(P1), show(P2) to view these.\n\n\n AUTHORS:\n\n - David Joyner (3-2006, 8-2007)\n \"\"\"\n d = len(des)\n dess = [de._maxima_init_() + \"=0\" for de in des]\n for i in range(d):\n cmd=\"de:\" + dess[int(i)] + \";\"\n maxima.eval(cmd)\n desstr = \"[\" + \",\".join(dess) + \"]\"\n d = len(vars)\n varss = list(\"'\" + vars[i] + \"(\" + vars[0] + \")\" for i in range(1,d))\n varstr = \"[\" + \",\".join(varss) + \"]\"\n if ics is not None:\n #d = len(ics) ## must be same as len(des)\n for i in range(1,d):\n ic = \"atvalue('\" + vars[i] + \"(\"+vars[0] + \"),\" + str(vars[0]) + \"=\" + str(ics[0]) + \",\" + str(ics[i]) + \")\"\n maxima.eval(ic)\n cmd = \"desolve(\" + desstr + \",\" + varstr + \");\"\n soln = maxima(cmd)\n return [f.rhs()._maxima_init_() for f in soln]\n\n@rename_keyword(deprecation=6094, method=\"algorithm\")\ndef eulers_method(f,x0,y0,h,x1,algorithm=\"table\"):\n r\"\"\"\n This implements Euler's method for finding numerically the\n solution of the 1st order ODE ``y' = f(x,y)``, ``y(a)=c``. The \"x\"\n column of the table increments from ``x0`` to ``x1`` by ``h`` (so\n ``(x1-x0)/h`` must be an integer). In the \"y\" column, the new\n y-value equals the old y-value plus the corresponding entry in the\n last column.\n\n *For pedagogical purposes only.*\n\n EXAMPLES::\n\n sage: from sage.calculus.desolvers import eulers_method\n sage: x,y = PolynomialRing(QQ,2,\"xy\").gens()\n sage: eulers_method(5*x+y-5,0,1,1/2,1)\n x y h*f(x,y)\n 0 1 -2\n 1/2 -1 -7/4\n 1 -11/4 -11/8\n\n ::\n\n sage: x,y = PolynomialRing(QQ,2,\"xy\").gens()\n sage: eulers_method(5*x+y-5,0,1,1/2,1,algorithm=\"none\")\n [[0, 1], [1/2, -1], [1, -11/4], [3/2, -33/8]]\n\n ::\n\n sage: RR = RealField(sci_not=0, prec=4, rnd='RNDU')\n sage: x,y = PolynomialRing(RR,2,\"xy\").gens()\n sage: eulers_method(5*x+y-5,0,1,1/2,1,algorithm=\"None\")\n [[0, 1], [1/2, -1.0], [1, -2.7], [3/2, -4.0]]\n\n ::\n\n sage: RR = RealField(sci_not=0, prec=4, rnd='RNDU')\n sage: x,y=PolynomialRing(RR,2,\"xy\").gens()\n sage: eulers_method(5*x+y-5,0,1,1/2,1)\n x y h*f(x,y)\n 0 1 -2.0\n 1/2 -1.0 -1.7\n 1 -2.7 -1.3\n\n ::\n\n sage: x,y=PolynomialRing(QQ,2,\"xy\").gens()\n sage: eulers_method(5*x+y-5,1,1,1/3,2)\n x y h*f(x,y)\n 1 1 1/3\n 4/3 4/3 1\n 5/3 7/3 17/9\n 2 38/9 83/27\n\n ::\n\n sage: eulers_method(5*x+y-5,0,1,1/2,1,algorithm=\"none\")\n [[0, 1], [1/2, -1], [1, -11/4], [3/2, -33/8]]\n\n ::\n\n sage: pts = eulers_method(5*x+y-5,0,1,1/2,1,algorithm=\"none\")\n sage: P1 = list_plot(pts)\n sage: P2 = line(pts)\n sage: (P1+P2).show()\n\n AUTHORS:\n\n - David Joyner\n \"\"\"\n if algorithm==\"table\":\n print(\"%10s %20s %25s\"%(\"x\",\"y\",\"h*f(x,y)\"))\n n=int((1.0)*(x1-x0)/h)\n x00=x0; y00=y0\n soln = [[x00,y00]]\n for i in range(n+1):\n if algorithm==\"table\":\n print(\"%10r %20r %20r\"%(x00,y00,h*f(x00,y00)))\n y00 = y00+h*f(x00,y00)\n x00=x00+h\n soln.append([x00,y00])\n if algorithm!=\"table\":\n return soln\n\n@rename_keyword(deprecation=6094, method=\"algorithm\")\ndef eulers_method_2x2(f,g, t0, x0, y0, h, t1,algorithm=\"table\"):\n r\"\"\"\n This implements Euler's method for finding numerically the\n solution of the 1st order system of two ODEs\n\n ``x' = f(t, x, y), x(t0)=x0.``\n\n ``y' = g(t, x, y), y(t0)=y0.``\n\n The \"t\" column of the table increments from `t_0` to `t_1` by `h`\n (so `\\\\frac{t_1-t_0}{h}` must be an integer). In the \"x\" column,\n the new x-value equals the old x-value plus the corresponding\n entry in the next (third) column. In the \"y\" column, the new\n y-value equals the old y-value plus the corresponding entry in the\n next (last) column.\n\n *For pedagogical purposes only.*\n\n EXAMPLES::\n\n sage: from sage.calculus.desolvers import eulers_method_2x2\n sage: t, x, y = PolynomialRing(QQ,3,\"txy\").gens()\n sage: f = x+y+t; g = x-y\n sage: eulers_method_2x2(f,g, 0, 0, 0, 1/3, 1,algorithm=\"none\")\n [[0, 0, 0], [1/3, 0, 0], [2/3, 1/9, 0], [1, 10/27, 1/27], [4/3, 68/81, 4/27]]\n\n ::\n\n sage: eulers_method_2x2(f,g, 0, 0, 0, 1/3, 1)\n t x h*f(t,x,y) y h*g(t,x,y)\n 0 0 0 0 0\n 1/3 0 1/9 0 0\n 2/3 1/9 7/27 0 1/27\n 1 10/27 38/81 1/27 1/9\n\n ::\n\n sage: RR = RealField(sci_not=0, prec=4, rnd='RNDU')\n sage: t,x,y=PolynomialRing(RR,3,\"txy\").gens()\n sage: f = x+y+t; g = x-y\n sage: eulers_method_2x2(f,g, 0, 0, 0, 1/3, 1)\n t x h*f(t,x,y) y h*g(t,x,y)\n 0 0 0.00 0 0.00\n 1/3 0.00 0.13 0.00 0.00\n 2/3 0.13 0.29 0.00 0.043\n 1 0.41 0.57 0.043 0.15\n\n To numerically approximate `y(1)`, where `(1+t^2)y''+y'-y=0`,\n `y(0)=1`, `y'(0)=-1`, using 4 steps of Euler's method, first\n convert to a system: `y_1' = y_2`, `y_1(0)=1`; `y_2' =\n \\\\frac{y_1-y_2}{1+t^2}`, `y_2(0)=-1`.::\n\n sage: RR = RealField(sci_not=0, prec=4, rnd='RNDU')\n sage: t, x, y=PolynomialRing(RR,3,\"txy\").gens()\n sage: f = y; g = (x-y)/(1+t^2)\n sage: eulers_method_2x2(f,g, 0, 1, -1, 1/4, 1)\n t x h*f(t,x,y) y h*g(t,x,y)\n 0 1 -0.25 -1 0.50\n 1/4 0.75 -0.12 -0.50 0.29\n 1/2 0.63 -0.054 -0.21 0.19\n 3/4 0.63 -0.0078 -0.031 0.11\n 1 0.63 0.020 0.079 0.071\n\n To numerically approximate y(1), where `y''+ty'+y=0`, `y(0)=1`, `y'(0)=0`::\n\n sage: t,x,y=PolynomialRing(RR,3,\"txy\").gens()\n sage: f = y; g = -x-y*t\n sage: eulers_method_2x2(f,g, 0, 1, 0, 1/4, 1)\n t x h*f(t,x,y) y h*g(t,x,y)\n 0 1 0.00 0 -0.25\n 1/4 1.0 -0.062 -0.25 -0.23\n 1/2 0.94 -0.11 -0.46 -0.17\n 3/4 0.88 -0.15 -0.62 -0.10\n 1 0.75 -0.17 -0.68 -0.015\n\n AUTHORS:\n\n - David Joyner\n \"\"\"\n if algorithm==\"table\":\n print(\"%10s %20s %25s %20s %20s\"%(\"t\", \"x\",\"h*f(t,x,y)\",\"y\", \"h*g(t,x,y)\"))\n n=int((1.0)*(t1-t0)/h)\n t00 = t0; x00 = x0; y00 = y0\n soln = [[t00,x00,y00]]\n for i in range(n+1):\n if algorithm==\"table\":\n print(\"%10r %20r %25r %20r %20r\"%(t00,x00,h*f(t00,x00,y00),y00,h*g(t00,x00,y00)))\n x01 = x00 + h*f(t00,x00,y00)\n y00 = y00 + h*g(t00,x00,y00)\n x00 = x01\n t00 = t00 + h\n soln.append([t00,x00,y00])\n if algorithm!=\"table\":\n return soln\n\ndef eulers_method_2x2_plot(f,g, t0, x0, y0, h, t1):\n r\"\"\"\n Plots solution of ODE\n\n This plots the soln in the rectangle ``(xrange[0],xrange[1])\n x (yrange[0],yrange[1])`` and plots using Euler's method the\n numerical solution of the 1st order ODEs `x' = f(t,x,y)`,\n `x(a)=x_0`, `y' = g(t,x,y)`, `y(a) = y_0`.\n\n *For pedagogical purposes only.*\n\n EXAMPLES::\n\n sage: from sage.calculus.desolvers import eulers_method_2x2_plot\n\n The following example plots the solution to\n `\\theta''+\\sin(\\theta)=0`, `\\theta(0)=\\frac 34`, `\\theta'(0) =\n 0`. Type ``P[0].show()`` to plot the solution,\n ``(P[0]+P[1]).show()`` to plot `(t,\\theta(t))` and\n `(t,\\theta'(t))`::\n\n sage: f = lambda z : z[2]; g = lambda z : -sin(z[1])\n sage: P = eulers_method_2x2_plot(f,g, 0.0, 0.75, 0.0, 0.1, 1.0)\n \"\"\"\n n=int((1.0)*(t1-t0)/h)\n t00 = t0; x00 = x0; y00 = y0\n soln = [[t00,x00,y00]]\n for i in range(n+1):\n x01 = x00 + h*f([t00,x00,y00])\n y00 = y00 + h*g([t00,x00,y00])\n x00 = x01\n t00 = t00 + h\n soln.append([t00,x00,y00])\n Q1 = line([[x[0],x[1]] for x in soln], rgbcolor=(1/4,1/8,3/4))\n Q2 = line([[x[0],x[2]] for x in soln], rgbcolor=(1/2,1/8,1/4))\n return [Q1,Q2]\n\ndef desolve_rk4_determine_bounds(ics,end_points=None):\n \"\"\"\n Used to determine bounds for numerical integration.\n\n - If end_points is None, the interval for integration is from ics[0]\n to ics[0]+10\n\n - If end_points is a or [a], the interval for integration is from min(ics[0],a)\n to max(ics[0],a)\n\n - If end_points is [a,b], the interval for integration is from min(ics[0],a)\n to max(ics[0],b)\n\n EXAMPLES::\n\n sage: from sage.calculus.desolvers import desolve_rk4_determine_bounds\n sage: desolve_rk4_determine_bounds([0,2],1)\n (0, 1)\n\n ::\n\n sage: desolve_rk4_determine_bounds([0,2])\n (0, 10)\n\n ::\n\n sage: desolve_rk4_determine_bounds([0,2],[-2])\n (-2, 0)\n\n ::\n\n sage: desolve_rk4_determine_bounds([0,2],[-2,4])\n (-2, 4)\n\n \"\"\"\n if end_points is None:\n return((ics[0],ics[0]+10))\n if not isinstance(end_points,list):\n end_points=[end_points]\n if len(end_points)==1:\n return (min(ics[0],end_points[0]),max(ics[0],end_points[0]))\n else:\n return (min(ics[0],end_points[0]),max(ics[0],end_points[1]))\n\n\ndef desolve_rk4(de, dvar, ics=None, ivar=None, end_points=None, step=0.1, output='list', **kwds):\n \"\"\"\n Solves numerically one first-order ordinary differential\n equation. See also ``ode_solver``.\n\n INPUT:\n\n input is similar to ``desolve`` command. The differential equation can be\n written in a form close to the plot_slope_field or desolve command\n\n - Variant 1 (function in two variables)\n\n - ``de`` - right hand side, i.e. the function `f(x,y)` from ODE `y'=f(x,y)`\n\n - ``dvar`` - dependent variable (symbolic variable declared by var)\n\n - Variant 2 (symbolic equation)\n\n - ``de`` - equation, including term with ``diff(y,x)``\n\n - ``dvar``` - dependent variable (declared as funciton of independent variable)\n\n - Other parameters\n\n - ``ivar`` - should be specified, if there are more variables or if the equation is autonomous\n\n - ``ics`` - initial conditions in the form [x0,y0]\n\n - ``end_points`` - the end points of the interval\n\n - if end_points is a or [a], we integrate on between min(ics[0],a) and max(ics[0],a)\n - if end_points is None, we use end_points=ics[0]+10\n\n - if end_points is [a,b] we integrate on between min(ics[0],a) and max(ics[0],b)\n\n - ``step`` - (optional, default:0.1) the length of the step (positive number)\n\n - ``output`` - (optional, default: 'list') one of 'list',\n 'plot', 'slope_field' (graph of the solution with slope field)\n\n OUTPUT:\n\n Returns a list of points, or plot produced by list_plot,\n optionally with slope field.\n\n\n EXAMPLES::\n\n sage: from sage.calculus.desolvers import desolve_rk4\n\n Variant 2 for input - more common in numerics::\n\n sage: x,y=var('x y')\n sage: desolve_rk4(x*y*(2-y),y,ics=[0,1],end_points=1,step=0.5)\n [[0, 1], [0.5, 1.12419127425], [1.0, 1.46159016229]]\n\n Variant 1 for input - we can pass ODE in the form used by\n desolve function In this example we integrate bakwards, since\n ``end_points < ics[0]``::\n\n sage: y=function('y',x)\n sage: desolve_rk4(diff(y,x)+y*(y-1) == x-2,y,ics=[1,1],step=0.5, end_points=0)\n [[0.0, 8.90425710896], [0.5, 1.90932794536], [1, 1]]\n\n Here we show how to plot simple pictures. For more advanced\n aplications use list_plot instead. To see the resulting picture\n use ``show(P)`` in Sage notebook. ::\n\n sage: x,y=var('x y')\n sage: P=desolve_rk4(y*(2-y),y,ics=[0,.1],ivar=x,output='slope_field',end_points=[-4,6],thickness=3)\n\n ALGORITHM:\n\n 4th order Runge-Kutta method. Wrapper for command ``rk`` in\n Maxima's dynamics package. Perhaps could be faster by using\n fast_float instead.\n\n AUTHORS:\n\n - Robert Marik (10-2009)\n \"\"\"\n if ics is None:\n raise ValueError(\"No initial conditions, specify with ics=[x0,y0].\")\n\n if ivar is None:\n ivars = de.variables()\n ivars = [t for t in ivars if t != dvar]\n if len(ivars) != 1:\n raise ValueError(\"Unable to determine independent variable, please specify.\")\n ivar = ivars[0]\n\n if not is_SymbolicVariable(dvar):\n from sage.calculus.var import var\n from sage.calculus.all import diff\n from sage.symbolic.relation import solve\n if is_SymbolicEquation(de):\n de = de.lhs() - de.rhs()\n dummy_dvar=var('dummy_dvar')\n # consider to add warning if the solution is not unique\n de=solve(de,diff(dvar,ivar),solution_dict=True)\n if len(de) != 1:\n raise NotImplementedError(\"Sorry, cannot find explicit formula for right-hand side of the ODE.\")\n de=de[0][diff(dvar,ivar)].subs(dvar==dummy_dvar)\n else:\n dummy_dvar=dvar\n\n step=abs(step)\n de0=de._maxima_()\n maxima(\"load('dynamics)\")\n lower_bound,upper_bound=desolve_rk4_determine_bounds(ics,end_points)\n sol_1, sol_2 = [],[]\n if lower_bound<ics[0]:\n cmd=\"rk(%s,%s,%s,[%s,%s,%s,%s])\\\n \"%(de0.str(),str(dummy_dvar),str(ics[1]),str(ivar),str(ics[0]),lower_bound,-step)\n sol_1=maxima(cmd).sage()\n sol_1.pop(0)\n sol_1.reverse()\n if upper_bound>ics[0]:\n cmd=\"rk(%s,%s,%s,[%s,%s,%s,%s])\\\n \"%(de0.str(),str(dummy_dvar),str(ics[1]),str(ivar),str(ics[0]),upper_bound,step)\n sol_2=maxima(cmd).sage()\n sol_2.pop(0)\n sol=sol_1\n sol.extend([[ics[0],ics[1]]])\n sol.extend(sol_2)\n\n if output=='list':\n return sol\n from sage.plot.plot import list_plot\n from sage.plot.plot_field import plot_slope_field\n R = list_plot(sol,plotjoined=True,**kwds)\n if output=='plot':\n return R\n if output=='slope_field':\n XMIN=sol[0][0]\n YMIN=sol[0][1]\n XMAX=XMIN\n YMAX=YMIN\n for s,t in sol:\n if s>XMAX:XMAX=s\n if s<XMIN:XMIN=s\n if t>YMAX:YMAX=t\n if t<YMIN:YMIN=t\n return plot_slope_field(de,(ivar,XMIN,XMAX),(dummy_dvar,YMIN,YMAX))+R\n\n raise ValueError(\"Option output should be 'list', 'plot' or 'slope_field'.\")\n\ndef desolve_system_rk4(des, vars, ics=None, ivar=None, end_points=None, step=0.1):\n r\"\"\"\n Solves numerically system of first-order ordinary differential\n equations using the 4th order Runge-Kutta method. Wrapper for\n Maxima command ``rk``. See also ``ode_solver``.\n\n INPUT:\n\n input is similar to desolve_system and desolve_rk4 commands\n\n - ``des`` - right hand sides of the system\n\n - ``vars`` - dependent variables\n\n - ``ivar`` - (optional) should be specified, if there are more variables or\n if the equation is autonomous and the independent variable is\n missing\n\n - ``ics`` - initial conditions in the form [x0,y01,y02,y03,....]\n\n - ``end_points`` - the end points of the interval\n\n - if end_points is a or [a], we integrate on between min(ics[0],a) and max(ics[0],a)\n - if end_points is None, we use end_points=ics[0]+10\n\n - if end_points is [a,b] we integrate on between min(ics[0],a) and max(ics[0],b)\n\n - ``step`` -- (optional, default: 0.1) the length of the step\n\n OUTPUT:\n\n Returns a list of points.\n\n EXAMPLES::\n\n sage: from sage.calculus.desolvers import desolve_system_rk4\n\n Lotka Volterra system::\n\n sage: from sage.calculus.desolvers import desolve_system_rk4\n sage: x,y,t=var('x y t')\n sage: P=desolve_system_rk4([x*(1-y),-y*(1-x)],[x,y],ics=[0,0.5,2],ivar=t,end_points=20)\n sage: Q=[ [i,j] for i,j,k in P]\n sage: LP=list_plot(Q)\n\n sage: Q=[ [j,k] for i,j,k in P]\n sage: LP=list_plot(Q)\n\n ALGORITHM:\n\n 4th order Runge-Kutta method. Wrapper for command ``rk`` in Maxima's\n dynamics package. Perhaps could be faster by using ``fast_float``\n instead.\n\n AUTHOR:\n\n - Robert Marik (10-2009)\n \"\"\"\n\n if ics is None:\n raise ValueError(\"No initial conditions, specify with ics=[x0,y01,y02,...].\")\n\n ivars = set([])\n\n for de in des:\n ivars = ivars.union(set(de.variables()))\n if ivar is None:\n ivars = ivars - set(vars)\n if len(ivars) != 1:\n raise ValueError(\"Unable to determine independent variable, please specify.\")\n ivar = list(ivars)[0]\n\n dess = [de._maxima_().str() for de in des]\n desstr = \"[\" + \",\".join(dess) + \"]\"\n varss = [varsi._maxima_().str() for varsi in vars]\n varstr = \"[\" + \",\".join(varss) + \"]\"\n x0=ics[0]\n icss = [ics[i]._maxima_().str() for i in range(1,len(ics))]\n icstr = \"[\" + \",\".join(icss) + \"]\"\n step=abs(step)\n\n maxima(\"load('dynamics)\")\n lower_bound,upper_bound=desolve_rk4_determine_bounds(ics,end_points)\n sol_1, sol_2 = [],[]\n if lower_bound<ics[0]:\n cmd=\"rk(%s,%s,%s,[%s,%s,%s,%s])\\\n \"%(desstr,varstr,icstr,str(ivar),str(x0),lower_bound,-step)\n sol_1=maxima(cmd).sage()\n sol_1.pop(0)\n sol_1.reverse()\n if upper_bound>ics[0]:\n cmd=\"rk(%s,%s,%s,[%s,%s,%s,%s])\\\n \"%(desstr,varstr,icstr,str(ivar),str(x0),upper_bound,step)\n sol_2=maxima(cmd).sage()\n sol_2.pop(0)\n sol=sol_1\n sol.append(ics)\n sol.extend(sol_2)\n\n return sol\n\ndef desolve_odeint(des, ics, times, dvars, ivar=None, compute_jac=False, args=()\n, rtol=None, atol=None, tcrit=None, h0=0.0, hmax=0.0, hmin=0.0, ixpr=0\n, mxstep=0, mxhnil=0, mxordn=12, mxords=5, printmessg=0):\n r\"\"\"\n Solves numerically a system of first-order ordinary differential equations\n using ``odeint`` from scipy.integrate module.\n\n INPUT:\n\n - ``des`` -- right hand sides of the system\n\n - ``ics`` -- initial conditions\n\n - ``times`` -- a sequence of time points in which the solution must be found\n\n - ``dvars`` -- dependent variables. ATTENTION: the order must be the same as\n in des, that means: d(dvars[i])/dt=des[i]\n\n - ``ivar`` -- independent variable, optional.\n\n - ``compute_jac`` -- boolean. If True, the Jacobian of des is computed and\n used during the integration of Stiff Systems. Default value is False.\n\n Other Parameters (taken from the documentation of odeint function from\n scipy.integrate module)\n\n - ``rtol``, ``atol`` : float\n The input parameters rtol and atol determine the error\n control performed by the solver. The solver will control the\n vector, e, of estimated local errors in y, according to an\n inequality of the form:\n\n max-norm of (e / ewt) <= 1\n\n where ewt is a vector of positive error weights computed as:\n\n ewt = rtol * abs(y) + atol\n\n rtol and atol can be either vectors the same length as y or scalars.\n\n - ``tcrit`` : array\n Vector of critical points (e.g. singularities) where integration\n care should be taken.\n\n - ``h0`` : float, (0: solver-determined)\n The step size to be attempted on the first step.\n\n - ``hmax`` : float, (0: solver-determined)\n The maximum absolute step size allowed.\n\n - ``hmin`` : float, (0: solver-determined)\n The minimum absolute step size allowed.\n\n - ``ixpr`` : boolean.\n Whether to generate extra printing at method switches.\n\n - ``mxstep`` : integer, (0: solver-determined)\n Maximum number of (internally defined) steps allowed for each\n integration point in t.\n\n - ``mxhnil`` : integer, (0: solver-determined)\n Maximum number of messages printed.\n\n - ``mxordn`` : integer, (0: solver-determined)\n Maximum order to be allowed for the nonstiff (Adams) method.\n\n - ``mxords`` : integer, (0: solver-determined)\n Maximum order to be allowed for the stiff (BDF) method.\n\n OUTPUT:\n\n Returns a list with the solution of the system at each time in times.\n\n EXAMPLES:\n\n Lotka Volterra Equations::\n\n sage: from sage.calculus.desolvers import desolve_odeint\n sage: x,y=var('x,y')\n sage: f=[x*(1-y),-y*(1-x)]\n sage: sol=desolve_odeint(f,[0.5,2],srange(0,10,0.1),[x,y])\n sage: p=line(zip(sol[:,0],sol[:,1]))\n sage: p.show()\n\n Lorenz Equations::\n\n sage: x,y,z=var('x,y,z')\n sage: # Next we define the parameters\n sage: sigma=10\n sage: rho=28\n sage: beta=8/3\n sage: # The Lorenz equations\n sage: lorenz=[sigma*(y-x),x*(rho-z)-y,x*y-beta*z]\n sage: # Time and initial conditions\n sage: times=srange(0,50.05,0.05)\n sage: ics=[0,1,1]\n sage: sol=desolve_odeint(lorenz,ics,times,[x,y,z],rtol=1e-13,atol=1e-14)\n\n One-dimensional Stiff system::\n\n sage: y= var('y')\n sage: epsilon=0.01\n sage: f=y^2*(1-y)\n sage: ic=epsilon\n sage: t=srange(0,2/epsilon,1)\n sage: sol=desolve_odeint(f,ic,t,y,rtol=1e-9,atol=1e-10,compute_jac=True)\n sage: p=points(zip(t,sol))\n sage: p.show()\n\n Another Stiff system with some optional parameters with no\n default value::\n\n sage: y1,y2,y3=var('y1,y2,y3')\n sage: f1=77.27*(y2+y1*(1-8.375*1e-6*y1-y2))\n sage: f2=1/77.27*(y3-(1+y1)*y2)\n sage: f3=0.16*(y1-y3)\n sage: f=[f1,f2,f3]\n sage: ci=[0.2,0.4,0.7]\n sage: t=srange(0,10,0.01)\n sage: v=[y1,y2,y3]\n sage: sol=desolve_odeint(f,ci,t,v,rtol=1e-3,atol=1e-4,h0=0.1,hmax=1,hmin=1e-4,mxstep=1000,mxords=17)\n\n AUTHOR:\n\n - Oriol Castejon (05-2010)\n \"\"\"\n\n from scipy.integrate import odeint\n from sage.ext.fast_eval import fast_float\n from sage.calculus.functions import jacobian\n\n if ivar==None:\n if len(dvars)==0 or len(dvars)==1:\n if len(dvars)==1:\n des=des[0]\n dvars=dvars[0]\n all_vars = set(des.variables())\n else:\n all_vars = set([])\n for de in des:\n all_vars.update(set(de.variables()))\n if is_SymbolicVariable(dvars):\n ivars = all_vars - set([dvars])\n else:\n ivars = all_vars - set(dvars)\n\n if len(ivars)==1:\n ivar = ivars.pop()\n elif not ivars:\n from sage.symbolic.ring import var\n try:\n safe_names = [ 't_' + str(dvar) for dvar in dvars ]\n except TypeError: # not iterable\n safe_names = [ 't_' + str(dvars) ]\n ivar = map(var, safe_names)\n else:\n raise ValueError(\"Unable to determine independent variable, please specify.\")\n\n # one-dimensional systems:\n if is_SymbolicVariable(dvars):\n func = fast_float(des,dvars,ivar)\n if not compute_jac:\n Dfun=None\n else:\n J = diff(des,dvars)\n J = fast_float(J,dvars,ivar)\n Dfun = lambda y,t: [J(y,t)]\n\n # n-dimensional systems:\n else:\n desc = []\n variabs = dvars[:]\n variabs.append(ivar)\n for de in des:\n desc.append(fast_float(de,*variabs))\n\n def func(y,t):\n v = list(y[:])\n v.append(t)\n return [dec(*v) for dec in desc]\n\n if not compute_jac:\n Dfun=None\n else:\n J = jacobian(des,dvars)\n J = [list(v) for v in J]\n J = fast_float(J,*variabs)\n def Dfun(y,t):\n v = list(y[:])\n v.append(t)\n return [[element(*v) for element in row] for row in J]\n\n\n sol=odeint(func, ics, times, args=args, Dfun=Dfun, rtol=rtol, atol=atol,\n tcrit=tcrit, h0=h0, hmax=hmax, hmin=hmin, ixpr=ixpr, mxstep=mxstep,\n mxhnil=mxhnil, mxordn=mxordn, mxords=mxords, printmessg=printmessg)\n\n return sol\n"
] |
[
[
"scipy.integrate.odeint"
]
] |
hubertjb/dynamic-spatial-filtering
|
[
"4580d60c06cd926b34470b8d05d4d72f8e2fd58c"
] |
[
"datasets.py"
] |
[
"\"\"\"Dataset-related functions and classes.\n\nInspired by `mne.datasets.sleep_physionet`.\n\"\"\"\n\nimport os\nimport os.path as op\n\nimport mne\nimport wfdb\nimport numpy as np\nimport pandas as pd\nfrom mne.datasets.utils import _get_path\nfrom mne.datasets.sleep_physionet._utils import _fetch_one\nfrom braindecode.datasets import BaseDataset, BaseConcatDataset\nfrom braindecode.datautil.preprocess import _preprocess\nfrom joblib import Parallel, delayed\n\n\nPC18_DIR = op.join(op.dirname(__file__), 'data', 'pc18')\nPC18_RECORDS = op.join(PC18_DIR, 'sleep_records.csv')\nPC18_INFO = op.join(PC18_DIR, 'age-sex.csv')\nPC18_URL = 'https://physionet.org/files/challenge-2018/1.0.0/'\nPC18_SHA1_TRAINING = op.join(PC18_DIR, 'training_SHA1SUMS')\nPC18_SHA1_TEST = op.join(PC18_DIR, 'test_SHA1SUMS')\n\n\ndef update_pc18_sleep_records(fname=PC18_RECORDS):\n \"\"\"Create CSV file with information about available PC18 recordings.\n \"\"\"\n # Load and massage the checksums.\n sha_train_df = pd.read_csv(PC18_SHA1_TRAINING, sep=' ', header=None,\n names=['sha', 'fname'], engine='python')\n sha_test_df = pd.read_csv(PC18_SHA1_TEST, sep=' ', header=None,\n names=['sha', 'fname'], engine='python')\n sha_train_df['Split'] = 'training'\n sha_test_df['Split'] = 'test'\n sha_df = pd.concat([sha_train_df, sha_test_df], axis=0, ignore_index=True)\n select_records = ((sha_df.fname.str.startswith('tr') |\n sha_df.fname.str.startswith('te')) &\n ~sha_df.fname.str.endswith('arousal.mat'))\n sha_df = sha_df[select_records]\n sha_df['Record'] = sha_df['fname'].str.split('/', expand=True)[0]\n sha_df['fname'] = sha_df[['Split', 'fname']].agg('/'.join, axis=1)\n\n # Load and massage the data.\n data = pd.read_csv(PC18_INFO)\n\n data = data.reset_index().rename({'index': 'Subject'}, axis=1)\n data['Sex'] = data['Sex'].map(\n {'F': 'female', 'M': 'male', 'm': 'male'}).astype('category')\n data = sha_df.merge(data, on='Record')\n\n data['Record type'] = data['fname'].str.split('.', expand=True)[1].map(\n {'hea': 'Header', 'mat': 'PSG', 'arousal': 'Arousal'}).astype(\n 'category')\n data = data[['Subject', 'Record', 'Record type', 'Split', 'Age', 'Sex',\n 'sha', 'fname']].sort_values(by='Subject')\n\n # Save the data.\n data.to_csv(fname, index=False)\n\n\ndef _data_path(path=None, force_update=False, update_path=None, verbose=None):\n \"\"\"Get path to local copy of PC18 dataset.\n \"\"\"\n key = 'PC18_DATASET_PATH'\n name = 'PC18_DATASET_SLEEP'\n path = _get_path(path, key, name)\n subdirs = os.listdir(path)\n if 'training' in subdirs or 'test' in subdirs: # the specified path is\n # already at the training and test folders level\n return path\n else:\n return op.join(path, 'pc18-sleep-data')\n\n\ndef fetch_pc18_data(subjects, path=None, force_update=False, update_path=None,\n base_url=PC18_URL, verbose=None):\n \"\"\"Get paths to local copies of PhysioNet Challenge 2018 dataset files.\n\n This will fetch data from the publicly available PhysioNet Computing in\n Cardiology Challenge 2018 dataset on sleep arousal detection [1]_ [2]_.\n This corresponds to 1983 recordings from individual subjects with\n (suspected) sleep apnea. The dataset is separated into a training set with\n 994 recordings for which arousal annotation are available and a test set\n with 989 recordings for which the labels have not been revealed. Across the\n entire dataset, mean age is 55 years old and 65% of recordings are from\n male subjects.\n\n More information can be found on the\n `physionet website <https://physionet.org/content/challenge-2018/1.0.0/>`_.\n\n Parameters\n ----------\n subjects : list of int\n The subjects to use. Can be in the range of 0-1982 (inclusive). Test\n recordings are 0-988, while training recordings are 989-1982.\n path : None | str\n Location of where to look for the PC18 data storing location. If None,\n the environment variable or config parameter ``PC18_DATASET_PATH``\n is used. If it doesn't exist, the \"~/mne_data\" directory is used. If\n the dataset is not found under the given path, the data will be\n automatically downloaded to the specified folder.\n force_update : bool\n Force update of the dataset even if a local copy exists.\n update_path : bool | None\n If True, set the PC18_DATASET_PATH in mne-python config to the given\n path. If None, the user is prompted.\n base_url : str\n The URL root.\n %(verbose)s\n\n Returns\n -------\n paths : list\n List of local data paths of the given type.\n\n References\n ----------\n .. [1] Mohammad M Ghassemi, Benjamin E Moody, Li-wei H Lehman, Christopher\n Song, Qiao Li, Haoqi Sun, Roger G Mark, M Brandon Westover, Gari D\n Clifford. You Snooze, You Win: the PhysioNet/Computing in Cardiology\n Challenge 2018.\n .. [2] Goldberger, A., Amaral, L., Glass, L., Hausdorff, J., Ivanov, P. C.,\n Mark, R., ... & Stanley, H. E. (2000). PhysioBank, PhysioToolkit, and\n PhysioNet: Components of a new research resource for complex physiologic\n signals. Circulation [Online]. 101 (23), pp. e215–e220.)\n \"\"\"\n records = pd.read_csv(PC18_RECORDS)\n psg_records = records[records['Record type'] == 'PSG']\n hea_records = records[records['Record type'] == 'Header']\n arousal_records = records[records['Record type'] == 'Arousal']\n\n path = _data_path(path=path, update_path=update_path)\n params = [path, force_update, base_url]\n\n fnames = []\n for subject in subjects:\n for idx in np.where(psg_records['Subject'] == subject)[0]:\n psg_fname = _fetch_one(psg_records['fname'].iloc[idx],\n psg_records['sha'].iloc[idx], *params)\n hea_fname = _fetch_one(hea_records['fname'].iloc[idx],\n hea_records['sha'].iloc[idx], *params)\n if psg_records['Split'].iloc[idx] == 'training':\n train_idx = np.where(\n arousal_records['Subject'] == subject)[0][0]\n arousal_fname = _fetch_one(\n arousal_records['fname'].iloc[train_idx],\n arousal_records['sha'].iloc[train_idx], *params)\n else:\n arousal_fname = None\n fnames.append([psg_fname, hea_fname, arousal_fname])\n\n return fnames\n\n\ndef convert_wfdb_anns_to_mne_annotations(annots):\n \"\"\"Convert wfdb.io.Annotation format to MNE's.\n\n Parameters\n ----------\n annots : wfdb.io.Annotation\n Annotation object obtained by e.g. loading an annotation file with\n wfdb.rdann().\n\n Returns\n -------\n mne.Annotations :\n MNE Annotations object.\n \"\"\"\n ann_chs = set(annots.chan)\n onsets = annots.sample / annots.fs\n new_onset, new_duration, new_description = list(), list(), list()\n for ch in ann_chs:\n mask = annots.chan == ch\n ch_onsets = onsets[mask]\n ch_descs = np.array(annots.aux_note)[mask]\n\n # Events with beginning and end, defined by '(event' and 'event)'\n if all([(i.startswith('(') or i.endswith(')')) for i in ch_descs]):\n pass\n else: # Sleep stage-like annotations\n ch_durations = np.concatenate([np.diff(ch_onsets), [30]])\n assert all(ch_durations > 0), 'Negative duration'\n new_onset.extend(ch_onsets)\n new_duration.extend(ch_durations)\n new_description.extend(ch_descs)\n\n mne_annots = mne.Annotations(\n new_onset, new_duration, new_description, orig_time=None)\n\n return mne_annots\n\n\nclass PC18(BaseConcatDataset):\n \"\"\"Physionet Challenge 2018 polysomnography dataset.\n\n Sleep dataset from https://physionet.org/content/challenge-2018/1.0.0/.\n Contains overnight recordings from 1983 healthy subjects.\n\n See `fetch_pc18_data` for a more complete description.\n\n Parameters\n ----------\n subject_ids: list(int) | str | None\n (list of) int of subject(s) to be loaded. If None, load all available\n subjects. If 'training', load all training recordings. If 'test', load\n all test recordings.\n path : None | str\n Location of where to look for the PC18 data storing location. If None,\n the environment variable or config parameter ``MNE_DATASETS_PC18_PATH``\n is used. If it doesn't exist, the \"~/mne_data\" directory is used. If\n the dataset is not found under the given path, the data will be\n automatically downloaded to the specified folder.\n load_eeg_only: bool\n If True, only load the EEG channels and discard the others (EOG, EMG,\n temperature, respiration) to avoid resampling the other signals.\n preproc : list(Preprocessor) | None\n List of preprocessors to apply to each file individually. This way the\n data can e.g., be downsampled (temporally and spatially) to limit the\n memory usage of the entire Dataset object. This also enables applying\n preprocessing in parallel over the recordings.\n windower : callable | None\n Function to split the raw data into windows. If provided, windowing is\n integrated into the loading process (after preprocessing) such that\n memory usage is minized while allowing parallelization.\n n_jobs : int\n Number of parallel processes.\n \"\"\"\n def __init__(self, subject_ids=None, path=None, load_eeg_only=True,\n preproc=None, windower=None, n_jobs=1):\n if subject_ids is None:\n subject_ids = range(1983)\n elif subject_ids == 'training':\n subject_ids = range(989, 1983)\n elif subject_ids == 'test':\n subject_ids = range(989)\n paths = fetch_pc18_data(subject_ids, path=path)\n\n self.info_df = pd.read_csv(PC18_INFO)\n\n if n_jobs == 1:\n all_base_ds = [self._load_raw(\n subject_id, p[0], p[2], load_eeg_only=load_eeg_only,\n preproc=preproc, windower=windower)\n for subject_id, p in zip(subject_ids, paths)]\n else:\n all_base_ds = Parallel(n_jobs=n_jobs)(delayed(self._load_raw)(\n subject_id, p[0], p[2], load_eeg_only=load_eeg_only,\n preproc=preproc, windower=windower)\n for subject_id, p in zip(subject_ids, paths))\n super().__init__(all_base_ds)\n\n def _load_raw(self, subj_nb, raw_fname, arousal_fname, load_eeg_only,\n preproc, windower):\n channel_types = ['eeg'] * 7\n if load_eeg_only:\n channels = list(range(7))\n else:\n channel_types += ['emg', 'misc', 'misc', 'misc', 'misc', 'ecg']\n channels = None\n\n # Load raw signals and header\n record = wfdb.io.rdrecord(op.splitext(raw_fname)[0], channels=channels)\n\n # Convert to right units for MNE (EEG should be in V)\n data = record.p_signal.T\n data[np.array(record.units) == 'uV'] /= 1e6\n data[np.array(record.units) == 'mV'] /= 1e3\n info = mne.create_info(record.sig_name, record.fs, channel_types)\n out = mne.io.RawArray(data, info)\n\n # Extract annotations\n if arousal_fname is not None:\n annots = wfdb.rdann(\n op.splitext(raw_fname)[0], 'arousal', sampfrom=0, sampto=None,\n shift_samps=False, return_label_elements=['symbol'],\n summarize_labels=False)\n mne_annots = convert_wfdb_anns_to_mne_annotations(annots)\n out.set_annotations(mne_annots)\n\n record_name = op.splitext(op.basename(raw_fname))[0]\n record_info = self.info_df[\n self.info_df['Record'] == record_name].iloc[0]\n if record_info['Record'].startswith('tr'):\n split = 'training'\n elif record_info['Record'].startswith('te'):\n split = 'test'\n else:\n split = 'unknown'\n\n desc = pd.Series({\n 'subject': subj_nb,\n 'record': record_info['Record'],\n 'split': split,\n 'age': record_info['Age'],\n 'sex': record_info['Sex']\n }, name='')\n\n if preproc is not None:\n _preprocess(out, preproc)\n\n out = BaseDataset(out, desc)\n\n if windower is not None:\n out = windower(out)\n out.windows.load_data()\n\n return out\n"
] |
[
[
"pandas.concat",
"pandas.read_csv",
"pandas.Series",
"numpy.diff",
"numpy.array",
"numpy.where"
]
] |
intersun/LightningDOT
|
[
"5f2880f69ba87b8701ab89348d70ebb11432578c"
] |
[
"uniter_model/train_vcr.py"
] |
[
"# coding=utf-8\n# copied from hugginface github\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc.\n# team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"BERT pre-training runner.\"\"\"\nimport argparse\nimport json\nimport os\nfrom os.path import exists, join\nimport random\nfrom time import time\n\nimport torch\nfrom torch.nn import functional as F\nfrom torch.nn.utils import clip_grad_norm_\nfrom torch.optim import Adam, Adamax\nfrom torch.utils.data import DataLoader, ConcatDataset\n\nfrom apex import amp\nfrom horovod import torch as hvd\n\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom data import (DistributedTokenBucketSampler,\n DetectFeatLmdb, VcrDataset, VcrEvalDataset,\n vcr_collate, vcr_eval_collate,\n PrefetchLoader)\nfrom model import BertForVisualCommonsenseReasoning\nfrom optim import warmup_linear, noam_schedule, vqa_schedule, AdamW\nfrom torch.utils.data.distributed import DistributedSampler\n\nfrom utils.logger import LOGGER, TB_LOGGER, RunningMeter, add_log_to_file\nfrom utils.distributed import (all_reduce_and_rescale_tensors, all_gather_list,\n broadcast_tensors)\nfrom utils.save import ModelSaver, save_training_meta\nfrom utils.misc import NoOp, parse_with_config\nNUM_SPECIAL_TOKENS = 81\n\n\ndef load_img_feat(dir_list, path2imgdir, opts):\n dir_ = dir_list.split(\";\")\n assert len(dir_) <= 2, \"More than two img_dirs found\"\n img_dir_gt, img_dir = None, None\n gt_dir_path, dir_path = \"\", \"\"\n for d in dir_:\n if \"gt\" in d:\n gt_dir_path = d\n else:\n dir_path = d\n if gt_dir_path != \"\":\n img_dir_gt = path2imgdir.get(gt_dir_path, None)\n if img_dir_gt is None:\n img_dir_gt = DetectFeatLmdb(gt_dir_path, -1,\n opts.max_bb, opts.min_bb, 100,\n opts.compressed_db)\n path2imgdir[gt_dir_path] = img_dir_gt\n if dir_path != \"\":\n img_dir = path2imgdir.get(dir_path, None)\n if img_dir is None:\n img_dir = DetectFeatLmdb(dir_path, opts.conf_th,\n opts.max_bb, opts.min_bb, opts.num_bb,\n opts.compressed_db)\n path2imgdir[dir_path] = img_dir\n return img_dir, img_dir_gt, path2imgdir\n\n\ndef main(opts):\n hvd.init()\n n_gpu = hvd.size()\n device = torch.device(\"cuda\", hvd.local_rank())\n torch.cuda.set_device(hvd.local_rank())\n rank = hvd.rank()\n opts.rank = rank\n LOGGER.info(\"device: {} n_gpu: {}, rank: {}, \"\n \"16-bits training: {}\".format(\n device, n_gpu, hvd.rank(), opts.fp16))\n\n if opts.gradient_accumulation_steps < 1:\n raise ValueError(\"Invalid gradient_accumulation_steps parameter: {}, \"\n \"should be >= 1\".format(\n opts.gradient_accumulation_steps))\n\n random.seed(opts.seed)\n np.random.seed(opts.seed)\n torch.manual_seed(opts.seed)\n if n_gpu > 0:\n torch.cuda.manual_seed_all(opts.seed)\n\n # train_examples = None\n LOGGER.info(f\"Loading Train Dataset {opts.train_txt_db}, \"\n f\"{opts.train_img_dir}\")\n\n # load DBs and image dirs\n train_txt_dbs = opts.train_txt_db.split(':')\n train_img_dirs = opts.train_img_dir.split(':')\n path2imgdir = {}\n train_datasets = []\n for db, dir_list in zip(train_txt_dbs, train_img_dirs):\n img_dir, img_dir_gt, path2imgdir = load_img_feat(\n dir_list, path2imgdir, opts)\n train_datasets.append(VcrDataset(opts.mask_prob, db, img_dir_gt,\n img_dir,\n opts.max_txt_len, task=\"qa\"))\n train_datasets.append(VcrDataset(opts.mask_prob, db, img_dir_gt,\n img_dir,\n opts.max_txt_len, task=\"qar\"))\n train_dataset = ConcatDataset(train_datasets)\n train_lens = [l for dset in train_datasets for l in dset.lens]\n val_img_dir, val_img_dir_gt, path2imgdir = load_img_feat(\n opts.val_img_dir, path2imgdir, opts)\n val_dataset = VcrEvalDataset(\"val\", opts.val_txt_db,\n val_img_dir_gt, val_img_dir,\n max_txt_len=-1)\n val_final_dataset = VcrEvalDataset(\"test\", opts.val_txt_db,\n val_img_dir_gt, val_img_dir,\n max_txt_len=-1)\n\n # Prepare model\n train_txt_db = train_txt_dbs[0]\n emb_file = f'{train_txt_db}/embedding.pt'\n\n if opts.checkpoint and opts.checkpoint_from == \"pretrain\":\n if opts.checkpoint == 'google-bert':\n checkpoint = None\n else:\n checkpoint = torch.load(opts.checkpoint)\n else:\n checkpoint = {}\n bert_model = json.load(open(f'{train_txt_db}/meta.json'))['bert']\n if 'bert' not in bert_model:\n bert_model = 'bert-large-cased' # quick hack for glove exp\n model = BertForVisualCommonsenseReasoning.from_pretrained(\n bert_model, img_dim=2048, obj_cls=False,\n state_dict=checkpoint)\n model.init_type_embedding()\n model.init_word_embedding(NUM_SPECIAL_TOKENS)\n if opts.checkpoint_from == \"vcr\":\n checkpoint = torch.load(opts.checkpoint)\n state_dict = checkpoint.get('model_state', checkpoint)\n matched_state_dict = {}\n unexpected_keys = set()\n missing_keys = set()\n for name, param in model.named_parameters():\n missing_keys.add(name)\n for key, data in state_dict.items():\n if key in missing_keys:\n matched_state_dict[key] = data\n missing_keys.remove(key)\n else:\n unexpected_keys.add(key)\n print(\"Unexpected_keys:\", list(unexpected_keys))\n print(\"Missing_keys:\", list(missing_keys))\n model.load_state_dict(matched_state_dict, strict=False)\n if opts.cut_bert != -1:\n # cut some layers of BERT\n model.bert.encoder.layer = torch.nn.ModuleList(\n model.bert.encoder.layer[:opts.cut_bert])\n if exists(emb_file) and not opts.checkpoint:\n glove = torch.load(f'{train_txt_db}/embedding.pt')\n vsize = glove.size(0)\n hid_size = model.config.hidden_size\n model.bert.embeddings.word_embeddings = torch.nn.Embedding(\n vsize, hid_size)\n mul_ = hid_size // 300 + 1\n model.bert.embeddings.word_embeddings.weight.data = glove.repeat(\n 1, mul_)[:, :hid_size]\n LOGGER.info('using GloVe for BERT')\n del checkpoint\n for name, module in model.named_modules():\n # we might want to tune dropout for smaller dataset\n if isinstance(module, torch.nn.Dropout):\n if module.p != opts.dropout:\n module.p = opts.dropout\n LOGGER.info(f'{name} set to {opts.dropout}')\n model.to(device)\n if rank != -1:\n # make sure every process has same model parameters in the beginning\n broadcast_tensors([p.data for p in model.parameters()], 0)\n\n # Prepare optimizer\n param_optimizer = list(model.named_parameters())\n no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n optimizer_grouped_parameters = [\n {'params': [p for n, p in param_optimizer\n if not any(nd in n for nd in no_decay)],\n 'weight_decay': opts.weight_decay},\n {'params': [p for n, p in param_optimizer\n if any(nd in n for nd in no_decay)],\n 'weight_decay': 0.0}\n ]\n\n if opts.optim == 'adam':\n OptimCls = Adam\n elif opts.optim == 'adamax':\n OptimCls = Adamax\n elif opts.optim == 'adamw':\n OptimCls = AdamW\n else:\n raise ValueError('invalid optimizer')\n optimizer = OptimCls(optimizer_grouped_parameters,\n lr=opts.learning_rate, betas=opts.betas)\n model, optimizer = amp.initialize(model, optimizer,\n enabled=opts.fp16, opt_level='O2')\n\n train_sampler = DistributedTokenBucketSampler(\n n_gpu, rank, train_lens, bucket_size=8192,\n batch_size=opts.train_batch_size, droplast=True)\n val_sampler = DistributedSampler(\n val_dataset, num_replicas=n_gpu, rank=rank)\n val_final_sampler = DistributedSampler(\n val_final_dataset, num_replicas=n_gpu, rank=rank)\n train_dataloader = DataLoader(train_dataset,\n batch_sampler=train_sampler,\n num_workers=opts.n_workers,\n pin_memory=opts.pin_mem,\n collate_fn=vcr_collate)\n train_dataloader = PrefetchLoader(train_dataloader)\n val_dataloader = DataLoader(val_dataset,\n batch_size=opts.val_batch_size*3,\n sampler=val_sampler,\n num_workers=opts.n_workers,\n pin_memory=opts.pin_mem,\n collate_fn=vcr_eval_collate)\n val_final_dataloader = DataLoader(val_final_dataset,\n batch_size=opts.val_batch_size,\n sampler=val_final_sampler,\n num_workers=opts.n_workers,\n pin_memory=opts.pin_mem,\n collate_fn=vcr_eval_collate)\n val_dataloader = PrefetchLoader(val_dataloader)\n val_final_dataloader = PrefetchLoader(val_final_dataloader)\n\n global_step = 0\n if rank == 0:\n save_training_meta(opts)\n TB_LOGGER.create(join(opts.output_dir, 'log'))\n pbar = tqdm(total=opts.num_train_steps)\n model_saver = ModelSaver(join(opts.output_dir, 'ckpt'))\n os.makedirs(join(opts.output_dir, 'results')) # store VQA predictions\n add_log_to_file(join(opts.output_dir, 'log', 'log.txt'))\n else:\n LOGGER.disabled = True\n pbar = NoOp()\n model_saver = NoOp()\n\n LOGGER.info(f\"***** Running training with {n_gpu} GPUs *****\")\n LOGGER.info(\" Num examples = %d\", len(train_dataset))\n LOGGER.info(\" Batch size = %d\", opts.train_batch_size)\n LOGGER.info(\" Accumulate steps = %d\", opts.gradient_accumulation_steps)\n LOGGER.info(\" Num steps = %d\", opts.num_train_steps)\n\n running_vcr_loss = RunningMeter('vcr_loss')\n running_obj_loss = RunningMeter('obj_cls_loss')\n running_loss = RunningMeter('loss')\n model.train()\n n_examples = 0\n n_epoch = 0\n start = time()\n # quick hack for amp delay_unscale bug\n optimizer.zero_grad()\n optimizer.step()\n while True:\n for step, batch in enumerate(train_dataloader):\n *_, targets = batch\n n_examples += targets.size(0)\n\n vcr_loss, obj_cls_loss = model(*batch, compute_loss=True)\n # loss = loss.mean()\n loss = vcr_loss + obj_cls_loss\n delay_unscale = (step+1) % opts.gradient_accumulation_steps != 0\n with amp.scale_loss(loss, optimizer, delay_unscale=delay_unscale\n ) as scaled_loss:\n scaled_loss.backward()\n if not delay_unscale:\n # gather gradients from every processes\n # do this before unscaling to make sure every process uses\n # the same gradient scale\n grads = [p.grad.data for p in model.parameters()\n if p.requires_grad and p.grad is not None]\n all_reduce_and_rescale_tensors(grads, float(1))\n\n running_loss(loss.item())\n running_vcr_loss(vcr_loss.item())\n running_obj_loss(obj_cls_loss.item())\n\n if (step + 1) % opts.gradient_accumulation_steps == 0:\n global_step += 1\n\n # learning rate scheduling\n if opts.decay == 'linear':\n lr_this_step = opts.learning_rate * warmup_linear(\n global_step, opts.warmup_steps, opts.num_train_steps)\n elif opts.decay == 'invsqrt':\n lr_this_step = opts.learning_rate * noam_schedule(\n global_step, opts.warmup_steps)\n elif opts.decay == 'constant':\n lr_this_step = opts.learning_rate\n elif opts.decay == 'vqa':\n lr_this_step = opts.learning_rate * vqa_schedule(\n global_step, opts.warm_int, opts.decay_int,\n opts.decay_st, opts.decay_rate)\n if lr_this_step < 0:\n # save guard for possible miscalculation of train steps\n lr_this_step = 1e-8\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr_this_step\n TB_LOGGER.add_scalar('lr', lr_this_step, global_step)\n\n # log loss\n losses = all_gather_list(running_loss)\n running_loss = RunningMeter(\n 'loss', sum(l.val for l in losses)/len(losses))\n TB_LOGGER.add_scalar('loss', running_loss.val, global_step)\n\n vcr_losses = all_gather_list(running_vcr_loss)\n running_vcr_loss = RunningMeter(\n 'vcr_loss', sum(l.val for l in vcr_losses)/len(vcr_losses))\n TB_LOGGER.add_scalar('vcr_loss', running_vcr_loss.val,\n global_step)\n\n obj_losses = all_gather_list(running_obj_loss)\n running_obj_loss = RunningMeter(\n 'obj_cls_loss',\n sum(l.val for l in obj_losses)/len(obj_losses))\n TB_LOGGER.add_scalar('obj_cls_loss', running_obj_loss.val,\n global_step)\n TB_LOGGER.step()\n\n # update model params\n if opts.grad_norm != -1:\n grad_norm = clip_grad_norm_(amp.master_params(optimizer),\n opts.grad_norm)\n TB_LOGGER.add_scalar('grad_norm', grad_norm, global_step)\n optimizer.step()\n optimizer.zero_grad()\n pbar.update(1)\n\n if global_step % 5 == 0:\n torch.cuda.empty_cache()\n if global_step % 100 == 0:\n # monitor training throughput\n tot_ex = sum(all_gather_list(n_examples))\n ex_per_sec = int(tot_ex / (time()-start))\n LOGGER.info(f'{tot_ex} examples trained at '\n f'{ex_per_sec} ex/s')\n TB_LOGGER.add_scalar('perf/ex_per_s',\n ex_per_sec, global_step)\n if global_step % opts.valid_steps == 0:\n val_log, results = validate(\n model, val_dataloader)\n TB_LOGGER.log_scaler_dict(val_log)\n model_saver.save(model, global_step)\n if global_step >= opts.num_train_steps:\n break\n if global_step >= opts.num_train_steps:\n break\n n_epoch += 1\n LOGGER.info(f\"finished {n_epoch} epochs\")\n val_log, results = validate(\n model, val_final_dataloader)\n with open(f'{opts.output_dir}/results/'\n f'results_{global_step}_'\n f'rank{rank}.json', 'w') as f:\n json.dump(results, f)\n TB_LOGGER.log_scaler_dict(val_log)\n model_saver.save(model, f'{global_step}_final')\n\n\ndef compute_accuracies(out_qa, labels_qa, out_qar, labels_qar):\n outputs_qa = out_qa.max(dim=-1)[1]\n outputs_qar = out_qar.max(dim=-1)[1]\n matched_qa = outputs_qa.squeeze() == labels_qa.squeeze()\n matched_qar = outputs_qar.squeeze() == labels_qar.squeeze()\n matched_joined = matched_qa & matched_qar\n n_correct_qa = matched_qa.sum().item()\n n_correct_qar = matched_qar.sum().item()\n n_correct_joined = matched_joined.sum().item()\n return n_correct_qa, n_correct_qar, n_correct_joined\n\n\[email protected]_grad()\ndef validate(model, val_loader):\n if hvd.rank() == 0:\n val_pbar = tqdm(total=len(val_loader))\n else:\n val_pbar = NoOp()\n LOGGER.info(f\"start running evaluation ...\")\n model.eval()\n val_qa_loss, val_qar_loss = 0, 0\n tot_qa_score, tot_qar_score, tot_score = 0, 0, 0\n n_ex = 0\n st = time()\n results = {}\n for i, batch in enumerate(val_loader):\n qids, *inputs, qa_targets, qar_targets, _ = batch\n scores = model(\n *inputs, targets=None, compute_loss=False)\n scores = scores.view(len(qids), -1)\n vcr_qa_loss = F.cross_entropy(\n scores[:, :4], qa_targets.squeeze(-1), reduction=\"sum\")\n if scores.shape[1] > 8:\n qar_index = [4+answer_ind.item()*4+i for answer_ind in qa_targets\n for i in range(4)]\n qar_scores = scores[:, qar_index]\n else:\n qar_scores = scores[:, 4:]\n vcr_qar_loss = F.cross_entropy(\n qar_scores, qar_targets.squeeze(-1), reduction=\"sum\")\n val_qa_loss += vcr_qa_loss.item()\n val_qar_loss += vcr_qar_loss.item()\n curr_qa_score, curr_qar_score, curr_score = compute_accuracies(\n scores[:, :4], qa_targets, qar_scores, qar_targets)\n tot_qar_score += curr_qar_score\n tot_qa_score += curr_qa_score\n tot_score += curr_score\n for qid, score in zip(qids, scores):\n results[qid] = score.cpu().tolist()\n n_ex += len(qids)\n val_pbar.update(1)\n val_qa_loss = sum(all_gather_list(val_qa_loss))\n val_qar_loss = sum(all_gather_list(val_qar_loss))\n tot_qa_score = sum(all_gather_list(tot_qa_score))\n tot_qar_score = sum(all_gather_list(tot_qar_score))\n tot_score = sum(all_gather_list(tot_score))\n n_ex = sum(all_gather_list(n_ex))\n tot_time = time()-st\n val_qa_loss /= n_ex\n val_qar_loss /= n_ex\n val_qa_acc = tot_qa_score / n_ex\n val_qar_acc = tot_qar_score / n_ex\n val_acc = tot_score / n_ex\n val_log = {f'valid/vcr_qa_loss': val_qa_loss,\n f'valid/vcr_qar_loss': val_qar_loss,\n f'valid/acc_qa': val_qa_acc,\n f'valid/acc_qar': val_qar_acc,\n f'valid/acc': val_acc,\n f'valid/ex_per_s': n_ex/tot_time}\n model.train()\n LOGGER.info(f\"validation finished in {int(tot_time)} seconds, \"\n f\"score_qa: {val_qa_acc*100:.2f} \"\n f\"score_qar: {val_qar_acc*100:.2f} \"\n f\"score: {val_acc*100:.2f} \")\n return val_log, results\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n # Required parameters\n parser.add_argument(\"--task\",\n default=\"qa\", type=str,\n choices=['qa', 'qar'],\n help=\"VCR tasks: qa or qar\")\n parser.add_argument(\"--train_txt_db\",\n default=None, type=str,\n help=\"The input train corpus. (LMDB)\")\n parser.add_argument(\"--train_img_dir\",\n default=None, type=str,\n help=\"The input train images.\")\n parser.add_argument(\"--val_txt_db\",\n default=None, type=str,\n help=\"The input validation corpus. (LMDB)\")\n parser.add_argument(\"--val_img_dir\",\n default=None, type=str,\n help=\"The input validation images.\")\n parser.add_argument('--img_format', default='npz',\n choices=['npz', 'lmdb', 'lmdb-compress'],\n help='format of image feature')\n parser.add_argument(\"--checkpoint\",\n default=None, type=str,\n help=\"pretrained model (can take 'google-bert') \")\n parser.add_argument(\"--checkpoint_from\",\n default='pretrain', type=str,\n choices=['pretrain', 'vcr'],\n help=\"which setting is checkpoint from\")\n parser.add_argument(\"--cut_bert\", default=-1, type=int,\n help=\"reduce BERT layers (-1 for original depth)\")\n\n parser.add_argument(\n \"--output_dir\", default=None, type=str,\n help=\"The output directory where the model checkpoints will be \"\n \"written.\")\n\n # Prepro parameters\n parser.add_argument('--max_txt_len', type=int, default=60,\n help='max number of tokens in text (BERT BPE)')\n parser.add_argument('--conf_th', type=float, default=0.2,\n help='threshold for dynamic bounding boxes '\n '(-1 for fixed)')\n parser.add_argument('--max_bb', type=int, default=100,\n help='max number of bounding boxes')\n parser.add_argument('--min_bb', type=int, default=10,\n help='min number of bounding boxes')\n parser.add_argument('--num_bb', type=int, default=36,\n help='static number of bounding boxes')\n\n # training parameters\n parser.add_argument(\"--train_batch_size\",\n default=4096, type=int,\n help=\"Total batch size for training. \"\n \"(batch by tokens)\")\n parser.add_argument(\"--val_batch_size\",\n default=4096, type=int,\n help=\"Total batch size for validation. \"\n \"(batch by tokens)\")\n parser.add_argument('--gradient_accumulation_steps',\n type=int,\n default=16,\n help=\"Number of updates steps to accumualte before \"\n \"performing a backward/update pass.\")\n parser.add_argument(\"--learning_rate\",\n default=3e-5,\n type=float,\n help=\"The initial learning rate for Adam.\")\n parser.add_argument(\"--valid_steps\",\n default=1000,\n type=int,\n help=\"Run validation every X steps\")\n parser.add_argument(\"--num_train_steps\",\n default=100000,\n type=int,\n help=\"Total number of training updates to perform.\")\n parser.add_argument('--mask_prob', default=0.15, type=float,\n help='probability to mask in MRC training')\n parser.add_argument(\"--optim\", default='adam',\n choices=['adam', 'adamax', 'adamw'],\n help=\"optimizer\")\n parser.add_argument(\"--betas\", default=[0.9, 0.98], nargs='+',\n help=\"beta for adam optimizer\")\n parser.add_argument(\"--decay\", default='linear',\n choices=['linear', 'invsqrt', 'constant', 'vqa'],\n help=\"learning rate decay method\")\n parser.add_argument(\"--decay_int\", default=2000, type=int,\n help=\"interval between VQA lr decy\")\n parser.add_argument(\"--warm_int\", default=2000, type=int,\n help=\"interval for VQA lr warmup\")\n parser.add_argument(\"--decay_st\", default=20000, type=int,\n help=\"when to start decay\")\n parser.add_argument(\"--decay_rate\", default=0.2, type=float,\n help=\"ratio of lr decay\")\n parser.add_argument(\"--dropout\",\n default=0.1,\n type=float,\n help=\"tune dropout regularization\")\n parser.add_argument(\"--weight_decay\",\n default=0.0,\n type=float,\n help=\"weight decay (L2) regularization\")\n parser.add_argument(\"--grad_norm\",\n default=0.25,\n type=float,\n help=\"gradient clipping (-1 for no clipping)\")\n parser.add_argument(\"--warmup_steps\",\n default=4000,\n type=int,\n help=\"Number of training steps to perform linear \"\n \"learning rate warmup for. (invsqrt decay)\")\n\n # device parameters\n parser.add_argument('--seed',\n type=int,\n default=42,\n help=\"random seed for initialization\")\n parser.add_argument('--fp16',\n action='store_true',\n help=\"Whether to use 16-bit float precision instead \"\n \"of 32-bit\")\n parser.add_argument('--n_workers', type=int, default=4,\n help=\"number of data workers\")\n parser.add_argument('--pin_mem', action='store_true',\n help=\"pin memory\")\n\n # can use config files\n parser.add_argument('--config', help='JSON config files')\n\n args = parse_with_config(parser)\n\n if exists(args.output_dir) and os.listdir(args.output_dir):\n raise ValueError(\"Output directory ({}) already exists and is not \"\n \"empty.\".format(args.output_dir))\n\n # options safe guard\n # TODO\n\n if args.conf_th == -1:\n assert args.max_bb + args.max_txt_len + 2 <= 512\n else:\n assert args.num_bb + args.max_txt_len + 2 <= 512\n\n main(args)\n"
] |
[
[
"torch.utils.data.distributed.DistributedSampler",
"numpy.random.seed",
"torch.load",
"torch.manual_seed",
"torch.nn.ModuleList",
"torch.utils.data.DataLoader",
"torch.cuda.empty_cache",
"torch.nn.Embedding",
"torch.utils.data.ConcatDataset",
"torch.no_grad",
"torch.cuda.manual_seed_all"
]
] |
erslog/QGrain
|
[
"9644415c73a929bbdd30d7eb4c3fa861401a5ea4"
] |
[
"QGrain/algorithms.py"
] |
[
"import weakref\nfrom enum import Enum, unique\nfrom threading import Lock\nfrom typing import Callable, Dict, Iterable, List, Tuple\n\nimport numpy as np\nfrom scipy.special import gamma\n\nINFINITESIMAL = 1e-100\nFRACTION_PARAM_NAME = \"f\"\nNAME_KEY = \"Name\"\nBOUNDS_KEY = \"Bounds\"\nDEFAULT_VALUE_KEY = \"Default\"\nLOCATION_KEY = \"Location\"\nCOMPONENT_INDEX_KEY = \"ComponentIndex\"\nPARAM_INDEX_KEY = \"ParamIndex\"\n\n\n@unique\nclass DistributionType(Enum):\n Normal = 0\n Weibull = 1\n GeneralWeibull = 2\n\n\ndef check_component_number(component_number: int):\n # Check the validity of `component_number`\n if type(component_number) != int:\n raise TypeError(component_number)\n elif component_number < 1:\n raise ValueError(component_number)\n\ndef get_param_count(distribution_type: DistributionType) -> int:\n if distribution_type == DistributionType.Normal:\n return 2\n elif distribution_type == DistributionType.Weibull:\n return 2\n elif distribution_type == DistributionType.GeneralWeibull:\n return 3\n else:\n raise NotImplementedError(distribution_type)\n\ndef get_param_names(distribution_type: DistributionType) -> Tuple[str]:\n if distribution_type == DistributionType.Normal:\n return (\"mu\", \"sigma\")\n elif distribution_type == DistributionType.Weibull:\n return (\"beta\", \"eta\")\n elif distribution_type == DistributionType.GeneralWeibull:\n return (\"mu\", \"beta\", \"eta\")\n else:\n raise NotImplementedError(distribution_type)\n\ndef get_base_func_name(distribution_type: DistributionType) -> str:\n if distribution_type == DistributionType.Normal:\n return \"normal\"\n elif distribution_type == DistributionType.Weibull:\n return \"weibull\"\n elif distribution_type == DistributionType.GeneralWeibull:\n return \"gen_weibull\"\n else:\n raise NotImplementedError(distribution_type)\n\ndef get_param_bounds(distribution_type: DistributionType) -> Tuple[Tuple[float, float]]:\n if distribution_type == DistributionType.Normal:\n return ((INFINITESIMAL, None), (INFINITESIMAL, None))\n elif distribution_type == DistributionType.Weibull:\n return ((INFINITESIMAL, None), (INFINITESIMAL, None))\n elif distribution_type == DistributionType.GeneralWeibull:\n return ((INFINITESIMAL, None), (INFINITESIMAL, None), (INFINITESIMAL, None))\n else:\n raise NotImplementedError(distribution_type)\n\n# in order to obtain better performance,\n# the params of components should be different\ndef get_param_defaults(distribution_type: DistributionType, component_number: int) -> Tuple[Tuple]:\n check_component_number(component_number)\n if distribution_type == DistributionType.Normal:\n return tuple(((i*10, 2+i) for i in range(1, component_number+1)))\n elif distribution_type == DistributionType.Weibull:\n return tuple(((10+i, (i+1)*15) for i in range(1, component_number+1)))\n elif distribution_type == DistributionType.GeneralWeibull:\n return tuple(((0, 2+i, i*10) for i in range(1, component_number+1)))\n else:\n raise NotImplementedError(distribution_type)\n\ndef get_params(distribution_type: DistributionType, component_number: int) -> List[Dict]:\n check_component_number(component_number)\n params = []\n param_count = get_param_count(distribution_type)\n param_names = get_param_names(distribution_type)\n param_bounds = get_param_bounds(distribution_type)\n param_defaults = get_param_defaults(distribution_type, component_number)\n # generate params for all components\n for component_index, component_defaults in enumerate(param_defaults):\n for param_index, name, bounds, defalut in zip(range(param_count), param_names, param_bounds, component_defaults):\n params.append({NAME_KEY: name+str(component_index+1), BOUNDS_KEY: bounds,\n DEFAULT_VALUE_KEY: defalut, COMPONENT_INDEX_KEY: component_index,\n PARAM_INDEX_KEY: param_index, LOCATION_KEY: component_index*param_count+param_index})\n # generate fractions for front n-1 components\n for component_index in range(component_number-1):\n # the fraction of each distribution\n params.append({NAME_KEY: FRACTION_PARAM_NAME+str(component_index+1), BOUNDS_KEY: (0, 1),\n DEFAULT_VALUE_KEY: 1/component_number, COMPONENT_INDEX_KEY: component_index,\n LOCATION_KEY: component_number*param_count + component_index})\n sort_params_by_location_in_place(params)\n return params\n\ndef sort_params_by_location_in_place(params: List[Dict]):\n params.sort(key=lambda element: element[LOCATION_KEY])\n\ndef get_bounds(params: List[Dict]) -> Tuple[Tuple]:\n bounds = []\n for param in params:\n bounds.append(param[BOUNDS_KEY])\n return tuple(bounds)\n\ndef get_constrains(component_number: int) -> Tuple[Dict]:\n if component_number == 1:\n return ()\n elif component_number > 1:\n return ({'type': 'ineq', 'fun': lambda args: 1 - np.sum(args[1-component_number:]) + INFINITESIMAL})\n else:\n raise ValueError(component_number)\n\ndef get_defaults(params: List[Dict]) -> Tuple[float]:\n defaults = []\n for param in params:\n defaults.append(param[DEFAULT_VALUE_KEY])\n return tuple(defaults)\n\ndef get_lambda_str(distribution_type: DistributionType, component_number:int) -> str:\n base_func_name = get_base_func_name(distribution_type)\n param_count = get_param_count(distribution_type)\n param_names = get_param_names(distribution_type)\n if component_number == 1:\n return \"lambda x, {0}: {1}(x, {0})\".format(\", \".join(param_names), base_func_name)\n elif component_number > 1:\n parameter_list = \", \".join([\"x\"] + [name+str(i+1) for i in range(component_number) for name in param_names] + [FRACTION_PARAM_NAME+str(i+1) for i in range(component_number-1)])\n # \" + \" to connect each sub-function\n # the previous sub-function str list means the m-1 sub-functions with n params `fj * base_func(x, param_1_j, ..., param_i_j, ..., param_n_j)`\n # the last sub-function str which represents `(1-f_1-...-f_j-...-f_m-1) * base_func(x, param_1_j, ..., param_i_j, ..., param_n_j)`\n previous_format_str = \"{0}{1}*{2}(x, \" + \", \".join([\"{\"+str(i+3)+\"}{1}\" for i in range(param_count)]) + \")\"\n previous_sub_func_strs = [previous_format_str.format(FRACTION_PARAM_NAME, i+1, base_func_name, *param_names) for i in range(component_number-1)]\n last_format_str = \"({0})*{1}(x, \" + \", \".join([\"{\"+str(i+3)+\"}{2}\" for i in range(param_count)]) + \")\"\n last_sub_func_str = last_format_str.format(\"-\".join([\"1\"]+[\"f{0}\".format(i+1) for i in range(component_number-1)]), base_func_name, component_number, *param_names)\n expression = \" + \".join(previous_sub_func_strs + [last_sub_func_str])\n lambda_string = \"lambda {0}: {1}\".format(parameter_list, expression)\n return lambda_string\n else:\n raise ValueError(component_number)\n\n# prcess the raw params list to make it easy to use\ndef process_params(distribution_type: DistributionType, component_number: int, fitted_params: Iterable) -> Tuple[Tuple[Tuple, float]]:\n param_count = get_param_count(distribution_type)\n if component_number == 1:\n assert len(fitted_params) == param_count\n return ((tuple(fitted_params), 1.0),)\n elif component_number > 1:\n assert len(fitted_params) == (param_count+1) * component_number - 1\n expanded = list(fitted_params) + [1.0-sum(fitted_params[component_number*param_count:])]\n return tuple(((tuple(expanded[i*param_count:(i+1)*param_count]), expanded[component_number*param_count+i]) for i in range(component_number)))\n else:\n raise ValueError(component_number)\n\n# the pdf function of Normal distribution\ndef normal(x, mu, sigma):\n if sigma <= 0.0:\n return np.zeros_like(x, dtype=np.float64)\n else:\n return 1/(sigma*np.sqrt(2*np.pi))*np.exp(-np.square(x-mu)/(2*np.square(sigma)))\n\ndef double_normal(x, mu1, sigma1, mu2, sigma2, f1):\n return f1 * normal(x, mu1, sigma1) + (1-f1) * normal(x, mu2, sigma2)\n\ndef triple_normal(x, mu1, sigma1, mu2, sigma2, mu3, sigma3, f1, f2):\n return f1 * normal(x, mu1, sigma1) + f2 * normal(x, mu2, sigma2) + (1-f1-f2) * normal(x, mu3, sigma3)\n\ndef quadruple_normal(x, mu1, sigma1, mu2, sigma2, mu3, sigma3, mu4, sigma4, f1, f2, f3):\n return f1 * normal(x, mu1, sigma1) + f2 * normal(x, mu2, sigma2) + f3 * normal(x, mu3, sigma3) + (1-f1-f2-f3) * normal(x, mu4, sigma4)\n\ndef normal_mean(mu, sigma):\n if sigma <= 0.0:\n return np.nan\n else:\n return mu\n\ndef normal_median(mu, sigma):\n if sigma <= 0.0:\n return np.nan\n else:\n return mu\n\ndef normal_mode(mu, sigma):\n if sigma <= 0.0:\n return np.nan\n else:\n return mu\n\ndef normal_standard_deviation(mu, sigma):\n if sigma <= 0.0:\n return np.nan\n else:\n return sigma\n\ndef normal_variance(mu, sigma):\n if sigma <= 0.0:\n return np.nan\n else:\n return sigma**2\n\ndef normal_skewness(mu, sigma):\n if sigma <= 0.0:\n return np.nan\n else:\n return 0.0\n\ndef normal_kurtosis(mu, sigma):\n if sigma <= 0.0:\n return np.nan\n else:\n return 0.0\n\n\n# The pdf function of Weibull distribution\ndef weibull(x, beta, eta):\n results = np.zeros_like(x, dtype=np.float64)\n if beta <= 0.0 or eta <= 0.0:\n return results\n else:\n non_zero = np.greater(x, 0.0)\n results[non_zero] = (beta/eta) * (x[non_zero]/eta)**(beta-1) * np.exp(-(x[non_zero]/eta)**beta)\n return results\n # return (beta/eta) * (x/eta)**(beta-1) * np.exp(-(x/eta)**beta)\n\ndef double_weibull(x, beta1, eta1, beta2, eta2, f):\n return f * weibull(x, beta1, eta1) + (1-f) * weibull(x, beta2, eta2)\n\ndef triple_weibull(x, beta1, eta1, beta2, eta2, beta3, eta3, f1, f2):\n return f1 * weibull(x, beta1, eta1) + f2 * weibull(x, beta2, eta2) + (1-f1-f2) * weibull(x, beta3, eta3)\n\ndef quadruple_weibull(x, beta1, eta1, beta2, eta2, beta3, eta3, beta4, eta4, f1, f2, f3):\n return f1 * weibull(x, beta1, eta1) + f2 * weibull(x, beta2, eta2) + f3 * weibull(x, beta3, eta3) + (1-f1-f2-f3) * weibull(x, beta4, eta4)\n\ndef weibull_mean(beta, eta):\n if beta <= 0.0 or eta <= 0.0:\n return np.nan\n else:\n return eta*gamma(1/beta+1)\n\ndef weibull_median(beta, eta):\n if beta <= 0.0 or eta <= 0.0:\n return np.nan\n else:\n return eta*(np.log(2)**(1/beta))\n\ndef weibull_mode(beta, eta):\n if beta <= 0.0 or eta <= 0.0:\n return np.nan\n elif beta <= 1:\n return 0.0\n else:\n return eta*(1-1/beta)**(1/beta)\n\ndef weibull_standard_deviation(beta, eta):\n if beta <= 0.0 or eta <= 0.0:\n return np.nan\n else:\n return eta*np.sqrt(gamma(2/beta+1) - gamma(1/beta+1)**2)\n\ndef weibull_variance(beta, eta):\n if beta <= 0.0 or eta <= 0.0:\n return np.nan\n else:\n return (eta**2)*(gamma(2/beta+1)-gamma(1/beta+1)**2)\n\ndef weibull_skewness(beta, eta):\n if beta <= 0.0 or eta <= 0.0:\n return np.nan\n else:\n return (2*gamma(1/beta+1)**3 - 3*gamma(2/beta+1)*gamma(1/beta+1) + gamma(3/beta+1)) / (gamma(2/beta+1)-gamma(1/beta+1)**2)**(3/2)\n\ndef weibull_kurtosis(beta, eta):\n if beta <= 0.0 or eta <= 0.0:\n return np.nan\n else:\n return (-3*gamma(1/beta+1)**4 + 6*gamma(2/beta+1)*gamma(1/beta+1)**2 - 4*gamma(3/beta+1)*gamma(1/beta+1) + gamma(4/beta+1)) / (gamma(2/beta+1)-gamma(1/beta+1)**2)**2\n\n\ndef gen_weibull(x, mu, beta, eta):\n return weibull(x-mu, beta, eta)\n\ndef double_gen_weibull(x, mu1, beta1, eta1, mu2, beta2, eta2, f):\n return f * gen_weibull(x, mu1, beta1, eta1) + (1-f) * gen_weibull(x, mu2, beta2, eta2)\n\ndef triple_gen_weibull(x, mu1, beta1, eta1, mu2, beta2, eta2, mu3, beta3, eta3, f1, f2):\n return f1 * gen_weibull(x, mu1, beta1, eta1) + f2 * gen_weibull(x, mu2, beta2, eta2) + (1-f1-f2)*gen_weibull(x, mu3, beta3, eta3)\n\ndef quadruple_gen_weibull(x, mu1, beta1, eta1, mu2, beta2, eta2, mu3, beta3, eta3, mu4, beta4, eta4, f1, f2, f3):\n return f1 * gen_weibull(x, mu1, beta1, eta1) + f2 * gen_weibull(x, mu2, beta2, eta2) + f3 * gen_weibull(x, mu3, beta3, eta3) + (1-f1-f2-f3) * gen_weibull(x, mu4, beta4, eta4)\n\ndef gen_weibull_mean(mu, beta, eta):\n return weibull_mean(beta, eta) + mu\n\ndef gen_weibull_median(mu, beta, eta):\n return weibull_median(beta, eta) + mu\n\ndef gen_weibull_mode(mu, beta, eta):\n return weibull_mode(beta, eta) + mu\n\ndef gen_weibull_standard_deviation(mu, beta, eta):\n return weibull_standard_deviation(beta, eta)\n\ndef gen_weibull_variance(mu, beta, eta):\n return weibull_variance(beta, eta)\n\ndef gen_weibull_skewness(mu, beta, eta):\n return weibull_skewness(beta, eta)\n\ndef gen_weibull_kurtosis(mu, beta, eta):\n return weibull_kurtosis(beta, eta)\n\n\ndef get_single_func(distribution_type: DistributionType) -> Callable:\n if distribution_type == DistributionType.Normal:\n return normal\n elif distribution_type == DistributionType.Weibull:\n return weibull\n elif distribution_type == DistributionType.GeneralWeibull:\n return gen_weibull\n else:\n raise NotImplementedError(distribution_type)\n\ndef get_param_by_mean(distribution_type: DistributionType, component_number: int, mean_values: Iterable):\n assert len(mean_values) == component_number\n param_count = get_param_count(distribution_type)\n func_params = get_params(distribution_type, component_number)\n param_values = list(get_defaults(func_params))\n if distribution_type == DistributionType.Normal:\n for i in range(component_number):\n # for normal distribution\n # only change the loaction param (first param of each component)\n param_values[i*param_count] = mean_values[i]\n elif distribution_type == DistributionType.Weibull:\n for i in range(component_number):\n\n beta = param_values[i*param_count]\n param_values[i*param_count+1] = mean_values[i] / gamma(1/beta+1)\n elif distribution_type == DistributionType.GeneralWeibull:\n for i in range(component_number):\n mu = param_values[i*param_count]\n beta = param_values[i*param_count+1]\n param_values[i*param_count+2] = (mean_values[i]-mu) / gamma(1/beta+1)\n else:\n raise NotImplementedError(distribution_type)\n return tuple(param_values)\n\n\nclass AlgorithmData:\n __cache = weakref.WeakValueDictionary()\n __cache_lock = Lock()\n\n def __init__(self, distribution_type: DistributionType, component_number: int):\n check_component_number(component_number)\n self.__distribution_type = distribution_type\n self.__component_number = component_number\n self.__param_count = get_param_count(self.distribution_type)\n self.__param_names = get_param_names(self.distribution_type)\n self.__single_func = get_single_func(distribution_type)\n self.__lambda_str = get_lambda_str(distribution_type, component_number)\n self.__mixed_func = self.__get_func_by_lambda_str(self.__lambda_str)\n self.__func_params = get_params(distribution_type, component_number)\n self.__bounds = get_bounds(self.__func_params)\n self.__defaults = get_defaults(self.__func_params)\n self.__constrains = get_constrains(component_number)\n self.__get_statistic_func()\n\n def __get_func_by_lambda_str(self, lambda_str: str) -> Callable:\n local_params = {\"__tempMixedFunc\": None}\n exec(\"__tempMixedFunc=\" + lambda_str, None, local_params)\n mixed_func = local_params[\"__tempMixedFunc\"]\n return mixed_func\n\n def __get_statistic_func(self):\n if self.distribution_type == DistributionType.Normal:\n self.__mean = normal_mean\n self.__median = normal_median\n self.__mode = normal_mode\n self.__standard_deviation = normal_standard_deviation\n self.__variance = normal_variance\n self.__skewness = normal_skewness\n self.__kurtosis = normal_kurtosis\n elif self.distribution_type == DistributionType.Weibull:\n self.__mean = weibull_mean\n self.__median = weibull_median\n self.__mode = weibull_mode\n self.__standard_deviation = weibull_standard_deviation\n self.__variance = weibull_variance\n self.__skewness = weibull_skewness\n self.__kurtosis = weibull_kurtosis\n elif self.distribution_type == DistributionType.GeneralWeibull:\n self.__mean = gen_weibull_mean\n self.__median = gen_weibull_median\n self.__mode = gen_weibull_mode\n self.__standard_deviation = gen_weibull_standard_deviation\n self.__variance = gen_weibull_variance\n self.__skewness = gen_weibull_skewness\n self.__kurtosis = gen_weibull_kurtosis\n else:\n raise NotImplementedError(self.distribution_type)\n\n @property\n def distribution_type(self) -> DistributionType:\n return self.__distribution_type\n\n @property\n def component_number(self) -> int:\n return self.__component_number\n\n @property\n def param_count(self) -> int:\n return self.__param_count\n\n @property\n def param_names(self) -> Tuple[str]:\n return self.__param_names\n\n @property\n def single_func(self) -> Callable:\n return self.__single_func\n\n @property\n def mixed_func(self) -> Callable:\n return self.__mixed_func\n\n @property\n def bounds(self) -> Tuple[Tuple]:\n return self.__bounds\n\n @property\n def defaults(self) -> Tuple[float]:\n return self.__defaults\n\n @property\n def constrains(self) -> Tuple[Dict]:\n return self.__constrains\n\n @property\n def mean(self) -> Callable:\n return self.__mean\n\n @property\n def median(self) -> Callable:\n return self.__median\n\n @property\n def mode(self) -> Callable:\n return self.__mode\n\n @property\n def variance(self) -> Callable:\n return self.__variance\n\n @property\n def standard_deviation(self) -> Callable:\n return self.__standard_deviation\n\n @property\n def skewness(self) -> Callable:\n return self.__skewness\n\n @property\n def kurtosis(self) -> Callable:\n return self.__kurtosis\n\n @classmethod\n def get_algorithm_data(cls, distribution_type: DistributionType,\n component_number: int):\n cls.__cache_lock.acquire()\n key = (distribution_type, component_number)\n if key in cls.__cache:\n data = cls.__cache[key]\n else:\n data = AlgorithmData(distribution_type, component_number)\n cls.__cache[key] = data\n cls.__cache_lock.release()\n return data\n\n def process_params(self, fitted_params: Iterable, x_offset: float) -> Tuple[Tuple[Tuple, float]]:\n params_copy = np.array(fitted_params)\n param_count = get_param_count(self.distribution_type)\n if self.distribution_type == DistributionType.Normal or self.distribution_type == DistributionType.GeneralWeibull:\n for i in range(self.component_number):\n params_copy[i*param_count] += x_offset\n return process_params(self.distribution_type, self.component_number, params_copy)\n\n def get_param_by_mean(self, mean_values: Iterable):\n return get_param_by_mean(self.distribution_type, self.component_number, mean_values)\n\n\nif __name__ == \"__main__\":\n # test the generating speed of algorithm data\n import time\n import sys\n start_uncached = time.time()\n data_list_uncached = []\n for i in range(10000):\n for component_number in range(3, 11):\n data = AlgorithmData(DistributionType.GeneralWeibull, component_number)\n data_list_uncached.append(data)\n end_uncached = time.time()\n print(\"Uncached time spent:\", end_uncached-start_uncached, \"s\")\n\n start_cached = time.time()\n data_list_cached = []\n for i in range(10000):\n for component_number in range(3, 11):\n data = AlgorithmData.get_algorithm_data(DistributionType.GeneralWeibull, component_number)\n data_list_cached.append(data)\n end_cached = time.time()\n print(\"Cached time spent:\", end_cached-start_cached, \"s\")\n"
] |
[
[
"numpy.square",
"numpy.log",
"scipy.special.gamma",
"numpy.greater",
"numpy.sqrt",
"numpy.zeros_like",
"numpy.array",
"numpy.exp",
"numpy.sum"
]
] |
kirillProkofiev/deep-object-reid
|
[
"2abc96ec49bc0005ed556e203925354fdf12165c"
] |
[
"torchreid/models/osnet_ain.py"
] |
[
"from __future__ import division, absolute_import\n\nimport warnings\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom torchreid.losses import AngleSimpleLinear\nfrom torchreid.ops import Dropout, HSwish, GumbelSigmoid, LocalContrastNormalization\n\n\n__all__ = ['osnet_ain_x1_0', 'osnet_ain2_x1_0']\n\npretrained_urls = {\n 'osnet_ain_x1_0': 'https://drive.google.com/uc?id=1-CaioD9NaqbHK_kzSMW8VE4_3KcsRjEo'\n}\n\n\n##########\n# Basic layers\n##########\n\nclass ConvLayer(nn.Module):\n \"\"\"Convolution layer (conv + bn + relu).\"\"\"\n\n def __init__(\n self,\n in_channels,\n out_channels,\n kernel_size,\n stride=1,\n padding=0,\n groups=1,\n IN=False\n ):\n super(ConvLayer, self).__init__()\n\n self.conv = nn.Conv2d(\n in_channels,\n out_channels,\n kernel_size,\n stride=stride,\n padding=padding,\n bias=False,\n groups=groups\n )\n if IN:\n self.bn = nn.InstanceNorm2d(out_channels, affine=True)\n else:\n self.bn = nn.BatchNorm2d(out_channels)\n self.relu = nn.ReLU()\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n return self.relu(x)\n\n\nclass Conv1x1(nn.Module):\n \"\"\"1x1 convolution + bn + relu.\"\"\"\n\n def __init__(self, in_channels, out_channels, stride=1, groups=1, out_fn=nn.ReLU, use_in=False):\n super(Conv1x1, self).__init__()\n\n self.conv = nn.Conv2d(\n in_channels,\n out_channels,\n 1,\n stride=stride,\n padding=0,\n bias=False,\n groups=groups\n )\n self.bn = nn.InstanceNorm2d(out_channels, affine=True) if use_in else nn.BatchNorm2d(out_channels)\n self.out_fn = out_fn() if out_fn is not None else None\n\n def forward(self, x):\n y = self.conv(x)\n y = self.bn(y)\n y = self.out_fn(y) if self.out_fn is not None else y\n return y\n\n\nclass Conv1x1Linear(nn.Module):\n \"\"\"1x1 convolution + bn (w/o non-linearity).\"\"\"\n\n def __init__(self, in_channels, out_channels, stride=1, bn=True):\n super(Conv1x1Linear, self).__init__()\n self.conv = nn.Conv2d(\n in_channels, out_channels, 1, stride=stride, padding=0, bias=False\n )\n self.bn = None\n if bn:\n self.bn = nn.BatchNorm2d(out_channels)\n\n def forward(self, x):\n x = self.conv(x)\n if self.bn is not None:\n x = self.bn(x)\n return x\n\n\nclass Conv3x3(nn.Module):\n \"\"\"3x3 convolution + bn + relu.\"\"\"\n\n def __init__(self, in_channels, out_channels, stride=1, groups=1, out_fn=nn.ReLU):\n super(Conv3x3, self).__init__()\n self.conv = nn.Conv2d(\n in_channels,\n out_channels,\n 3,\n stride=stride,\n padding=1,\n bias=False,\n groups=groups\n )\n self.bn = nn.BatchNorm2d(out_channels)\n self.out_fn = out_fn() if out_fn is not None else None\n\n def forward(self, x):\n y = self.conv(x)\n y = self.bn(y)\n y = self.out_fn(y) if self.out_fn is not None else y\n return y\n\n\nclass LightConv3x3(nn.Module):\n \"\"\"Lightweight 3x3 convolution.\n\n 1x1 (linear) + dw 3x3 (nonlinear).\n \"\"\"\n\n def __init__(self, in_channels, out_channels):\n super(LightConv3x3, self).__init__()\n self.conv1 = nn.Conv2d(\n in_channels, out_channels, 1, stride=1, padding=0, bias=False\n )\n self.conv2 = nn.Conv2d(\n out_channels,\n out_channels,\n 3,\n stride=1,\n padding=1,\n bias=False,\n groups=out_channels\n )\n self.bn = nn.BatchNorm2d(out_channels)\n self.relu = nn.ReLU()\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.bn(x)\n return self.relu(x)\n\n\nclass LightConvStream(nn.Module):\n \"\"\"Lightweight convolution stream.\"\"\"\n\n def __init__(self, in_channels, out_channels, depth):\n super(LightConvStream, self).__init__()\n assert depth >= 1, 'depth must be equal to or larger than 1, but got {}'.format(\n depth\n )\n layers = []\n layers += [LightConv3x3(in_channels, out_channels)]\n for i in range(depth - 1):\n layers += [LightConv3x3(out_channels, out_channels)]\n self.layers = nn.Sequential(*layers)\n\n def forward(self, x):\n return self.layers(x)\n\n\n##########\n# Attention modules\n##########\n\nclass ResidualAttention(nn.Module):\n def __init__(self, in_channels, gumbel=True, reduction=4.0, residual=True):\n super(ResidualAttention, self).__init__()\n\n self.residual = residual\n\n internal_channels = int(in_channels / reduction)\n self.spatial_attention = nn.Sequential(\n Conv1x1(in_channels, internal_channels, out_fn=None),\n HSwish(),\n Conv3x3(internal_channels, internal_channels, groups=internal_channels, out_fn=None),\n HSwish(),\n Conv1x1(internal_channels, 1, out_fn=None),\n GumbelSigmoid(scale=5.0) if gumbel else nn.Sigmoid()\n )\n\n def forward(self, x, return_mask=False):\n soft_mask = self.spatial_attention(x)\n out = (1.0 + soft_mask) * x if self.residual else soft_mask * x\n\n if return_mask:\n return out, soft_mask\n else:\n return out\n\n\nclass AttributeAttention(nn.Module):\n def __init__(self, main_num_features, attr_num_feature, out_num_features):\n super(AttributeAttention, self).__init__()\n\n self.gate = nn.Sequential(\n nn.Linear(attr_num_feature, main_num_features),\n nn.BatchNorm1d(main_num_features),\n nn.Sigmoid()\n )\n self.fc = nn.Sequential(\n nn.Linear(main_num_features, out_num_features),\n nn.BatchNorm1d(out_num_features)\n )\n\n def forward(self, x, attr):\n return self.fc(x * self.gate(attr))\n\n\n##########\n# Building blocks for omni-scale feature learning\n##########\n\nclass LCTGate(nn.Module):\n def __init__(self, channels, groups=16):\n super(LCTGate, self).__init__()\n\n assert channels > 0\n assert groups > 0\n self.gn = nn.GroupNorm(groups, channels, affine=True)\n\n self.global_avgpool = nn.AdaptiveAvgPool2d(1)\n self.gate_activation = nn.Sigmoid()\n\n def init_params(self):\n nn.init.zeros_(self.gn.weight)\n nn.init.ones_(self.gn.bias)\n\n def forward(self, x):\n y = self.global_avgpool(x)\n y = self.gn(y)\n y = self.gate_activation(y)\n out = y * x\n\n return out\n\n\nclass ChannelGate(nn.Module):\n \"\"\"A mini-network that generates channel-wise gates conditioned on input tensor.\"\"\"\n\n def __init__(\n self,\n in_channels,\n num_gates=None,\n return_gates=False,\n gate_activation='sigmoid',\n reduction=16,\n layer_norm=False\n ):\n super(ChannelGate, self).__init__()\n if num_gates is None:\n num_gates = in_channels\n self.return_gates = return_gates\n self.global_avgpool = nn.AdaptiveAvgPool2d(1)\n self.fc1 = nn.Conv2d(\n in_channels,\n in_channels // reduction,\n kernel_size=1,\n bias=True,\n padding=0\n )\n self.norm1 = None\n if layer_norm:\n self.norm1 = nn.LayerNorm((in_channels // reduction, 1, 1))\n self.relu = nn.ReLU()\n self.fc2 = nn.Conv2d(\n in_channels // reduction,\n num_gates,\n kernel_size=1,\n bias=True,\n padding=0\n )\n if gate_activation == 'sigmoid':\n self.gate_activation = nn.Sigmoid()\n elif gate_activation == 'relu':\n self.gate_activation = nn.ReLU()\n elif gate_activation == 'linear':\n self.gate_activation = None\n else:\n raise RuntimeError(\"Unknown gate activation: {}\".format(gate_activation))\n\n def forward(self, x):\n input = x\n x = self.global_avgpool(x)\n x = self.fc1(x)\n if self.norm1 is not None:\n x = self.norm1(x)\n x = self.relu(x)\n x = self.fc2(x)\n if self.gate_activation is not None:\n x = self.gate_activation(x)\n if self.return_gates:\n return x\n return input * x\n\n\nclass OSBlock(nn.Module):\n \"\"\"Omni-scale feature learning block.\"\"\"\n\n def __init__(self, in_channels, out_channels, channel_gate, reduction=4, T=4, dropout_cfg=None, **kwargs):\n super(OSBlock, self).__init__()\n assert T >= 1\n assert out_channels >= reduction and out_channels % reduction == 0\n mid_channels = out_channels // reduction\n\n self.conv1 = Conv1x1(in_channels, mid_channels)\n self.conv2 = nn.ModuleList()\n for t in range(1, T + 1):\n self.conv2 += [LightConvStream(mid_channels, mid_channels, t)]\n self.gate = channel_gate(mid_channels)\n self.conv3 = Conv1x1Linear(mid_channels, out_channels)\n\n self.downsample = None\n if in_channels != out_channels:\n self.downsample = Conv1x1Linear(in_channels, out_channels)\n\n self.dropout = None\n if dropout_cfg is not None:\n self.dropout = Dropout(**dropout_cfg)\n\n def forward(self, x):\n identity = x\n if self.downsample is not None:\n identity = self.downsample(identity)\n\n x1 = self.conv1(x)\n\n x2 = 0\n for conv2_t in self.conv2:\n x2_t = conv2_t(x1)\n x2 = x2 + self.gate(x2_t)\n\n x3 = self.conv3(x2)\n if self.dropout is not None:\n x3 = self.dropout(x3, x)\n\n out = x3 + identity\n\n return F.relu(out)\n\n\nclass OSBlockINin(nn.Module):\n \"\"\"Omni-scale feature learning block with instance normalization.\"\"\"\n\n def __init__(self, in_channels, out_channels, channel_gate, reduction=4, T=4, dropout_cfg=None, **kwargs):\n super(OSBlockINin, self).__init__()\n assert T >= 1\n assert out_channels >= reduction and out_channels % reduction == 0\n mid_channels = out_channels // reduction\n\n self.conv1 = Conv1x1(in_channels, mid_channels)\n self.conv2 = nn.ModuleList()\n for t in range(1, T + 1):\n self.conv2 += [LightConvStream(mid_channels, mid_channels, t)]\n self.gate = channel_gate(mid_channels)\n self.conv3 = Conv1x1Linear(mid_channels, out_channels, bn=False)\n\n self.downsample = None\n if in_channels != out_channels:\n self.downsample = Conv1x1Linear(in_channels, out_channels)\n\n self.IN = nn.InstanceNorm2d(out_channels, affine=True)\n\n self.dropout = None\n if dropout_cfg is not None:\n self.dropout = Dropout(**dropout_cfg)\n\n def forward(self, x):\n identity = x\n if self.downsample is not None:\n identity = self.downsample(identity)\n\n x1 = self.conv1(x)\n\n x2 = 0\n for conv2_t in self.conv2:\n x2_t = conv2_t(x1)\n x2 = x2 + self.gate(x2_t)\n\n x3 = self.conv3(x2)\n x3 = self.IN(x3) # IN inside residual\n if self.dropout is not None:\n x3 = self.dropout(x3, x)\n\n out = x3 + identity\n\n return F.relu(out)\n\n\n##########\n# Network architecture\n##########\n\nclass OSNet(nn.Module):\n \"\"\"Omni-Scale Network.\n \n Reference:\n - Zhou et al. Omni-Scale Feature Learning for Person Re-Identification. ICCV, 2019.\n - Zhou et al. Learning Generalisable Omni-Scale Representations\n for Person Re-Identification. arXiv preprint, 2019.\n \"\"\"\n\n def __init__(\n self,\n num_classes,\n blocks,\n channels,\n classification=False,\n contrastive=False,\n head_attention=False,\n attentions=None,\n dropout_cfg=None,\n feature_dim=256,\n loss='softmax',\n input_lcn=False,\n IN_first=False,\n IN_conv1=False,\n bn_eval=False,\n bn_frozen=False,\n attr_names=None,\n attr_num_classes=None,\n lct_gate=False,\n pooling_type='avg',\n **kwargs\n ):\n super(OSNet, self).__init__()\n\n self.bn_eval = bn_eval\n self.bn_frozen = bn_frozen\n self.classification = classification\n self.contrastive = contrastive\n self.pooling_type = pooling_type\n\n num_blocks = len(blocks)\n assert num_blocks == len(channels) - 1\n\n self.loss = loss\n self.feature_dim = feature_dim\n assert self.feature_dim is not None and self.feature_dim > 0\n\n self.use_attentions = attentions\n if self.use_attentions is None:\n self.use_attentions = [False] * (num_blocks + 2)\n assert len(self.use_attentions) == num_blocks + 2\n\n if not isinstance(num_classes, (list, tuple)):\n num_classes = [num_classes]\n self.num_classes = num_classes\n assert len(self.num_classes) > 0\n\n self.input_lcn = LocalContrastNormalization(3, 5, affine=True) if input_lcn else None\n self.input_IN = nn.InstanceNorm2d(3, affine=True) if IN_first else None\n channel_gate = LCTGate if lct_gate else ChannelGate\n\n self.conv1 = ConvLayer(3, channels[0], 7, stride=2, padding=3, IN=IN_conv1)\n self.att1 = self._construct_attention_layer(channels[0], self.use_attentions[0])\n self.pool1 = nn.MaxPool2d(3, stride=2, padding=1)\n self.conv2 = self._construct_layer(blocks[0], channels[0], channels[1], channel_gate, dropout_cfg)\n self.att2 = self._construct_attention_layer(channels[1], self.use_attentions[1])\n self.pool2 = nn.Sequential(Conv1x1(channels[1], channels[1]), nn.AvgPool2d(2, stride=2))\n self.conv3 = self._construct_layer(blocks[1], channels[1], channels[2], channel_gate, dropout_cfg)\n self.att3 = self._construct_attention_layer(channels[2], self.use_attentions[2])\n self.pool3 = nn.Sequential(Conv1x1(channels[2], channels[2]), nn.AvgPool2d(2, stride=2))\n self.conv4 = self._construct_layer(blocks[2], channels[2], channels[3], channel_gate, dropout_cfg)\n self.att4 = self._construct_attention_layer(channels[3], self.use_attentions[3])\n\n backbone_out_num_channels = channels[3]\n self.conv5 = Conv1x1(channels[3], backbone_out_num_channels)\n self.att5 = self._construct_attention_layer(backbone_out_num_channels, self.use_attentions[4])\n\n self.head_att = self._construct_head_attention(backbone_out_num_channels, enable=head_attention)\n\n classifier_block = nn.Linear if self.loss not in ['am_softmax'] else AngleSimpleLinear\n\n self.use_attr = attr_names is not None and attr_num_classes is not None\n if self.use_attr:\n assert len(attr_names) == len(attr_num_classes)\n\n in_feature_dims = [2 * self.feature_dim] * len(self.num_classes)\n out_feature_dims = [self.feature_dim] * len(self.num_classes)\n\n self.attr_names = []\n self.attr, self.attr_classifier = nn.ModuleDict(), nn.ModuleDict()\n attr_feature_dim = self.feature_dim // 4\n for attr_name, attr_size in zip(attr_names, attr_num_classes):\n if attr_size is None or attr_size <= 0:\n continue\n\n self.attr[attr_name] = self._construct_fc_layer(backbone_out_num_channels, attr_feature_dim)\n self.attr_classifier[attr_name] = classifier_block(attr_feature_dim, attr_size)\n\n self.attr_names.append(attr_name)\n\n if len(self.attr) > 0:\n mixed_hum_features = len(self.attr) * attr_feature_dim\n self.attr_att = nn.ModuleList()\n for trg_id in range(len(self.num_classes)):\n self.attr_att.append(AttributeAttention(\n in_feature_dims[trg_id], mixed_hum_features, out_feature_dims[trg_id]\n ))\n else:\n self.use_attr = False\n\n if not self.use_attr:\n in_feature_dims = [self.feature_dim] * len(self.num_classes)\n out_feature_dims = [self.feature_dim] * len(self.num_classes)\n\n self.out_feature_dims = out_feature_dims\n\n self.fc, self.classifier = nn.ModuleList(), nn.ModuleList()\n for trg_id, trg_num_classes in enumerate(self.num_classes):\n self.fc.append(self._construct_fc_layer(backbone_out_num_channels, in_feature_dims[trg_id]))\n if not contrastive and trg_num_classes > 0:\n self.classifier.append(classifier_block(out_feature_dims[trg_id], trg_num_classes))\n\n self._init_params()\n\n @staticmethod\n def _construct_layer(blocks, in_channels, out_channels, channel_gate, dropout_cfg=None):\n layers = []\n layers += [blocks[0](in_channels, out_channels, channel_gate, dropout_cfg=dropout_cfg)]\n for i in range(1, len(blocks)):\n layers += [blocks[i](out_channels, out_channels, channel_gate, dropout_cfg=dropout_cfg)]\n\n return nn.Sequential(*layers)\n\n @staticmethod\n def _construct_attention_layer(num_channels, enable):\n return ResidualAttention(num_channels, gumbel=False, residual=True) if enable else None\n\n @staticmethod\n def _construct_head_attention(num_channels, enable, channel_factor=8, gumbel=True, gumbel_scale=5.0):\n if not enable:\n return None\n\n internal_num_channels = int(float(num_channels) / float(channel_factor))\n\n layers = [\n Conv1x1(num_channels, internal_num_channels, out_fn=None),\n HSwish(),\n Conv3x3(internal_num_channels, internal_num_channels, groups=internal_num_channels, out_fn=None),\n HSwish(),\n Conv1x1(internal_num_channels, 1, out_fn=None),\n GumbelSigmoid(scale=gumbel_scale) if gumbel else nn.Sigmoid()\n ]\n\n return nn.Sequential(*layers)\n\n @staticmethod\n def _construct_fc_layer(input_dim, output_dim, dropout=False):\n layers = []\n\n if dropout:\n layers.append(Dropout(p=0.2, dist='gaussian'))\n\n layers.extend([\n nn.Linear(input_dim, output_dim),\n nn.BatchNorm1d(output_dim)\n ])\n\n return nn.Sequential(*layers)\n\n def _init_params(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, (nn.InstanceNorm1d, nn.InstanceNorm2d)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, LocalContrastNormalization):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, 0, 0.01)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, LCTGate):\n m.init_params()\n\n def _backbone(self, x):\n att_maps = []\n\n y = self.input_lcn(x) if self.input_lcn is not None else x\n y = self.input_IN(y) if self.input_IN is not None else y\n\n y = self.conv1(y)\n if self.att1 is not None:\n y, att1 = self.att1(y, return_mask=True)\n att_maps.append(att1)\n y = self.pool1(y)\n\n y = self.conv2(y)\n if self.att2 is not None:\n y, att2 = self.att2(y, return_mask=True)\n att_maps.append(att2)\n y = self.pool2(y)\n\n y = self.conv3(y)\n if self.att3 is not None:\n y, att3 = self.att3(y, return_mask=True)\n att_maps.append(att3)\n y = self.pool3(y)\n\n y = self.conv4(y)\n if self.att4 is not None:\n y, att4 = self.att4(y, return_mask=True)\n att_maps.append(att4)\n\n y = self.conv5(y)\n if self.att5 is not None:\n y, att5 = self.att5(y, return_mask=True)\n att_maps.append(att5)\n\n return y, att_maps\n\n @staticmethod\n def _glob_feature_vector(x, mode='avg', head_att=None):\n att_map = None\n if mode == 'head_att':\n assert head_att is not None\n\n att_map = head_att(x)\n with torch.no_grad():\n num_values = torch.sum(att_map, dim=(2, 3), keepdim=True)\n scale = num_values.clamp_min(1.0).pow(-1)\n\n y = scale * att_map * x\n out = torch.sum(y, dim=(2, 3))\n elif mode == 'avg':\n out = F.adaptive_avg_pool2d(x, 1).view(x.size(0), -1)\n elif mode == 'max':\n out = F.adaptive_max_pool2d(x, 1).view(x.size(0), -1)\n elif mode == 'avg+max':\n avg_pool = F.adaptive_avg_pool2d(x, 1)\n max_pool = F.adaptive_max_pool2d(x, 1)\n out = (avg_pool + max_pool).view(x.size(0), -1)\n else:\n raise ValueError(f'Unknown pooling mode: {mode}')\n\n return out, att_map\n\n def forward(self, x, return_featuremaps=False, get_embeddings=False, get_extra_data=False):\n feature_maps, feature_att_maps = self._backbone(x)\n if return_featuremaps:\n return feature_maps\n\n glob_features, head_att_map = self._glob_feature_vector(feature_maps, self.pooling_type, self.head_att)\n embeddings = [fc(glob_features) for fc in self.fc]\n\n if self.training and len(self.classifier) == 0:\n return embeddings\n\n attr_embeddings = {}\n if self.use_attr:\n attr_embeddings = {attr_name: attr_fc(glob_features) for attr_name, attr_fc in self.attr.items()}\n\n attr_vector = torch.cat([attr_embeddings[attr_name] for attr_name in self.attr_names], dim=1)\n embeddings = [attr_module(e, attr_vector) for e, attr_module in zip(embeddings, self.attr_att)]\n\n if not self.training and not self.classification:\n return torch.cat(embeddings, dim=1)\n\n logits = [classifier(embd) for embd, classifier in zip(embeddings, self.classifier)]\n\n if not self.training and self.classification:\n return logits\n\n if len(logits) == 1:\n logits = logits[0]\n if len(embeddings) == 1:\n embeddings = embeddings[0]\n\n if get_embeddings:\n out_data = [logits, embeddings]\n elif self.loss in ['softmax', 'adacos', 'd_softmax', 'am_softmax']:\n out_data = [logits]\n elif self.loss in ['triplet']:\n out_data = [logits, embeddings]\n else:\n raise KeyError(\"Unsupported loss: {}\".format(self.loss))\n\n if get_extra_data:\n extra_out_data = dict()\n extra_out_data['att_maps'] = [head_att_map] + feature_att_maps\n\n if self.use_attr:\n attr_logits = {attr_name: attr_classifier(attr_embeddings[attr_name])\n for attr_name, attr_classifier in self.attr_classifier.items()}\n extra_out_data['attr_logits'] = attr_logits\n\n out_data += [extra_out_data]\n\n return tuple(out_data)\n\n def train(self, train_mode=True):\n super(OSNet, self).train(train_mode)\n\n if self.bn_eval:\n for m in self.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n\n if self.bn_frozen:\n for params in m.parameters():\n params.requires_grad = False\n\n return self\n\n def load_pretrained_weights(self, pretrained_dict):\n model_dict = self.state_dict()\n new_state_dict = OrderedDict()\n matched_layers, discarded_layers = [], []\n\n for k, v in pretrained_dict.items():\n if k.startswith('module.'):\n k = k[7:] # discard module.\n\n if k in model_dict and model_dict[k].size() == v.size():\n new_state_dict[k] = v\n matched_layers.append(k)\n else:\n discarded_layers.append(k)\n\n model_dict.update(new_state_dict)\n self.load_state_dict(model_dict)\n\n if len(matched_layers) == 0:\n warnings.warn(\n 'The pretrained weights cannot be loaded, '\n 'please check the key names manually '\n '(** ignored and continue **)'\n )\n else:\n print('Successfully loaded pretrained weights')\n if len(discarded_layers) > 0:\n print(\n '** The following layers are discarded '\n 'due to unmatched keys or layer size: {}'.\n format(discarded_layers)\n )\n\n\ndef init_pretrained_weights(model, key=''):\n \"\"\"Initializes model with pretrained weights.\n \n Layers that don't match with pretrained layers in name or size are kept unchanged.\n \"\"\"\n import os\n import errno\n import gdown\n\n def _get_torch_home():\n ENV_TORCH_HOME = 'TORCH_HOME'\n ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'\n DEFAULT_CACHE_DIR = '~/.cache'\n torch_home = os.path.expanduser(\n os.getenv(\n ENV_TORCH_HOME,\n os.path.join(\n os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'torch'\n )\n )\n )\n return torch_home\n\n torch_home = _get_torch_home()\n model_dir = os.path.join(torch_home, 'checkpoints')\n try:\n os.makedirs(model_dir)\n except OSError as e:\n if e.errno == errno.EEXIST:\n pass\n else:\n raise\n filename = key + '_imagenet.pth'\n cached_file = os.path.join(model_dir, filename)\n\n if not os.path.exists(cached_file):\n gdown.download(pretrained_urls[key], cached_file, quiet=False)\n\n state_dict = torch.load(cached_file)\n model.load_pretrained_weights(state_dict)\n\n\n##########\n# Instantiation\n##########\n\ndef osnet_ain_x1_0(num_classes, pretrained=False, download_weights=False,\n IN_first=False, IN_conv1=False, **kwargs):\n model = OSNet(\n num_classes,\n blocks=[\n [OSBlockINin, OSBlockINin],\n [OSBlock, OSBlockINin],\n [OSBlockINin, OSBlock]\n ],\n channels=[64, 256, 384, 512],\n IN_conv1=True,\n **kwargs\n )\n\n if pretrained and download_weights:\n init_pretrained_weights(model, key='osnet_ain_x1_0')\n\n return model\n\n\ndef osnet_ain2_x1_0(num_classes, pretrained=False, download_weights=False,\n enable_attentions=False, IN_first=False, IN_conv1=False,\n **kwargs):\n model = OSNet(\n num_classes,\n blocks=[\n [OSBlockINin, OSBlockINin],\n [OSBlock, OSBlockINin],\n [OSBlockINin, OSBlock]\n ],\n channels=[64, 256, 384, 512],\n attentions=[False, True, True, False, False] if enable_attentions else None,\n IN_first=True,\n IN_conv1=True,\n **kwargs\n )\n\n if pretrained and download_weights:\n init_pretrained_weights(model, key='osnet_ain_x1_0')\n\n return model\n"
] |
[
[
"torch.load",
"torch.cat",
"torch.sum",
"torch.no_grad",
"torch.nn.ModuleDict",
"torch.nn.Sigmoid",
"torch.nn.init.ones_",
"torch.nn.functional.adaptive_max_pool2d",
"torch.nn.functional.relu",
"torch.nn.GroupNorm",
"torch.nn.Sequential",
"torch.nn.BatchNorm1d",
"torch.nn.init.constant_",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.nn.functional.adaptive_avg_pool2d",
"torch.nn.Linear",
"torch.nn.AvgPool2d",
"torch.nn.InstanceNorm2d",
"torch.nn.init.normal_",
"torch.nn.init.zeros_",
"torch.nn.BatchNorm2d",
"torch.nn.LayerNorm",
"torch.nn.MaxPool2d",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.ReLU",
"torch.nn.init.kaiming_normal_"
]
] |
jhamman/MetSim
|
[
"538ebb141414355a5db0eddde6c0d4bec2e56390"
] |
[
"metsim/disaggregate.py"
] |
[
"\"\"\"\nDisaggregates daily data down to hourly data using some heuristics\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\nimport metsim\nfrom metsim.defaults import PARAMS as params\nfrom metsim.defaults import CONSTS as consts \n\ntiny_rad_fract = np.zeros(366) #This is updated during the mtclim run\n\ndef disaggregate(df_daily):\n \"\"\"\n TODO\n \"\"\"\n dates_hourly = pd.date_range(metsim.start, metsim.stop, freq='H') \n df_hourly = pd.DataFrame(index=dates_hourly)\n _disagg_shortwave(df_daily, df_hourly)\n _disagg_temp( df_daily, df_hourly)\n _disagg_precip( df_daily, df_hourly)\n _disagg_thermal( df_daily, df_hourly)\n _disagg_wind( df_daily, df_hourly)\n return df_hourly\n\n\ndef _disagg_temp(df_daily, df_hourly):\n \"\"\"\n TODO\n \"\"\"\n # Calculate times of min/max temps\n set_min_max_hour(df_daily, df_hourly)\n # Fit hermite polynomial and sample daily \n \n\n\ndef _disagg_precip(df_daily, df_hourly):\n \"\"\"\n TODO\n \"\"\"\n pass\n\n\ndef _disagg_thermal(df_daily, df_hourly):\n \"\"\"\n TODO\n \"\"\"\n pass\n\n\ndef _disagg_wind(df_daily, df_hourly):\n \"\"\"\n TODO\n \"\"\" \n pass\n\n\ndef _disagg_shortwave(df_daily, df_hourly):\n \"\"\"\n TODO\n \"\"\"\n tiny_step_per_hour = int(3600 / consts['SRADDT'])\n tmp_rad = df_daily['s_swrad']\n n_days = len(tmp_rad)\n hourlyrad = np.zeros(n_days*24+1)\n for i in range(n_days):\n for j in range(24):\n for k in range(tiny_step_per_hour):\n tinystep = j*tiny_step_per_hour + k\n if tinystep < 0:\n tinystep += 24*tiny_step_per_hour\n if tinystep > 24*tiny_step_per_hour - 1:\n tinystep -= 24*tiny_step_per_hour\n hourlyrad[i*24+j] += tiny_rad_fract[df_daily['day_of_year'][i]][tinystep]\n #FIXME: This calculation is incorrect\n hourlyrad[i*24+j] *= tmp_rad[i]\n df_hourly['s_swrad'] = hourlyrad\n\n\ndef set_min_max_hour(df_daily, df_hourly):\n \"\"\"\n TODO\n \"\"\" \n hourly_rad = df_hourly['s_swrad']\n n_days = len(df_daily)\n t_max = np.zeros(n_days)\n t_min = np.zeros(n_days)\n for i in range(n_days):\n risehour = sethour = -999\n for hour in range(12):\n if (hourly_rad[i*24+hour] > 0 and \n (i*24+hour==0 or hourly_rad[i*24 + hour-1]<= 0)):\n risehour = hour\n for hour in range(12,24):\n if (hourly_rad[i*24+hour] <= 0 and hourly_rad[i*24+hour-1]>0):\n sethour = hour\n if i == n_days -1 and sethour == -999:\n sethour = 23\n if risehour >=0 and sethour>=0:\n t_max[i] - 0.67 * (sethour - risehour) + risehour\n tminhour[i] = rishour - 1\n df_daily['t_Tmin'] = tminhour\n df_daily['t_Tmax'] = tmaxhour\n\n"
] |
[
[
"numpy.zeros",
"pandas.DataFrame",
"pandas.date_range"
]
] |
hbery/ML_Image_Compression_Ratio_Analysis
|
[
"16b21091bc4e3ced62f94f0e68ee302c1da5bf1e"
] |
[
"scripts/only_testing.py"
] |
[
"#!/usr/bin/env python3\n\n\"\"\"Script for testing model\n\n :Date: 06.2021 \n :Author: Adam Twardosz ([email protected], https://github.com/hbery)\n\"\"\"\n\nimport os, sys\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\nos.environ['CUDA_VISIBLE_DEVICES'] = '-1'\n\nimport numpy as np\nfrom keras.models import load_model, Sequential\nfrom keras.layers import Softmax\n\nfrom utils import banner\n\n\ndef main():\n\t\n\t\"\"\" ~~~~ PREPARE DATA ~~~~ \"\"\"\n\tif len(sys.argv) < 3:\n\t\tprint(f\"Usage: {os.path.basename(sys.argv[0])} <folder with batches> <'model_name'> [ <destination folder> ]\")\n\t\tsys.exit(-1)\n\n\tcwd = os.getcwd()\n\tfolder = os.path.basename(sys.argv[1])\n\tbase_path = os.path.abspath(folder)\n\tdst_path = \"\"\n\tif len(sys.argv) > 3:\n\t\tdst_path = os.path.abspath(sys.argv[3])\n\telse:\n\t\tdst_path = base_path\n\n\tmodel_name = os.path.basename(sys.argv[2])\n\tmodel_path = os.path.join(cwd, \"models\", model_name)\n\tstatistics = os.path.join(cwd, 'statistics')\n\tdefault_line_length = 65\n\n\tif not os.path.isdir(statistics):\n\t\tos.mkdir(statistics)\n\n\tdir_files = os.listdir(base_path)\n\ttest_files = list(filter(lambda file: \"test\" in file, dir_files))\n\n\tprint(banner(\"MODEL\"))\n\tmodel = load_model(model_path)\n \n\tprint(\"⇊ Adding Softmax Layer to model\\n\")\n\tprob_model = Sequential([model, Softmax()])\n\t\n\tprint(prob_model.summary(line_length=default_line_length))\n\tprint()\n\n\n\t\"\"\" ~~~~ TEST MODEL'S ACCURACY ~~~~ \"\"\"\n\tprint(banner(\"TESTING\", length=default_line_length))\n\n\tnasa_predictions = []\n\tnasa_labels = []\n\tnature_predictions = []\n\tnature_labels = []\n\n\tfor test_file in test_files:\n\t# Loading from *.npz\n\t\twith np.load(os.path.join(base_path, test_file)) as test_batch:\n\t# Storing real labels\n\t\t\tnasa_labels.extend(test_batch[\"nsltest\"])\n\t\t\tnature_labels.extend(test_batch[\"ntltest\"])\n\t# Predicting labels and storing\n\t\t\tnasa_predictions.extend(prob_model.predict(test_batch[\"nsdtest\"]))\n\t\t\tnature_predictions.extend(prob_model.predict(test_batch[\"ntdtest\"]))\n\t\t\t\n\t# Save data for plotting\n\tstats_path = os.path.join(dst_path, f'{model_name}_stats.npz')\n\tnp.savez(stats_path,\n\t\tnasa_predictions=nasa_predictions,\n\t\tnasa_labels=nasa_labels,\n\t\tnature_predictions=nature_predictions,\n\t\tnature_labels=nature_labels\n\t)\n\tprint(f\"⮔ Statistics saved as: {stats_path}\".center(default_line_length))\n\n\n\"\"\"MAIN \"\"\"\nif __name__ == \"__main__\":\n\tmain()\n"
] |
[
[
"numpy.savez"
]
] |
PurdueDualityLab/memoized-regex-engine
|
[
"e7edcb0033a1eba90589e7831733f6527d9c4909"
] |
[
"eval/libMemo.py"
] |
[
"\"\"\"Memoization: utils associated with memoization experiments\n\"\"\"\n\n# Import libLF\nimport os\nimport sys\nsys.path.append(os.path.join(os.environ['MEMOIZATION_PROJECT_ROOT'], 'eval', 'lib'))\nimport libLF\n\n# Other imports\nimport json\nimport re\nimport tempfile\nimport pandas as pd\n\n###\n# Constants\n###\n\nclass ProtoRegexEngine:\n \"\"\"One stop shop for interacting with the Prototype Regex Engine\n \n Don't instantiate this. Everything is static.\n \"\"\"\n CLI = os.path.join(os.environ['MEMOIZATION_PROJECT_ROOT'], \"src-simple\", \"re\")\n\n class SELECTION_SCHEME:\n SS_None = \"no memoization\"\n SS_Full = \"full memoization\"\n SS_InDeg = \"selective: indeg>1\"\n SS_Loop = \"selective: loop\"\n\n scheme2cox = {\n SS_None: \"none\",\n SS_Full: \"full\",\n SS_InDeg: \"indeg\",\n SS_Loop: \"loop\",\n }\n\n all = scheme2cox.keys()\n allMemo = [ SS_Full, SS_InDeg, SS_Loop ]\n\n class ENCODING_SCHEME:\n ES_None = \"no encoding\"\n ES_Negative = \"negative encoding\"\n ES_RLE = \"RLE\"\n ES_RLE_TUNED = \"RLE-tuned\"\n\n scheme2cox = {\n ES_None: \"none\",\n ES_Negative: \"neg\",\n ES_RLE: \"rle\",\n # ES_RLE_TUNED: \"rle-tuned\", # TODO Work out the right math here\n }\n\n all = scheme2cox.keys()\n\n @staticmethod\n def buildQueryFile(pattern, input, filePrefix=\"protoRegexEngineQueryFile-\"):\n \"\"\"Build a query file\n \n pattern: string\n input: string\n [filePrefix]: string\n \n returns: tmp fileName. Caller should unlink.\n \"\"\"\n fd, name = tempfile.mkstemp(suffix=\".json\", prefix=filePrefix)\n os.close(fd)\n with open(name, 'w') as outStream:\n json.dump({\n \"pattern\": pattern,\n \"input\": input,\n }, outStream)\n return name\n\n @staticmethod\n def query(selectionScheme, encodingScheme, queryFile, timeout=None):\n \"\"\"Query the engine\n\n selectionScheme: SELECTION_SCHEME\n encodingScheme: ENCODING_SCHEME\n queryFile: file path\n timeout: integer seconds before raising subprocess.TimeoutExpired\n\n returns: EngineMeasurements\n raises: on rc != 0, or on timeout\n \"\"\"\n rc, stdout, stderr = libLF.runcmd_OutAndErr(\n args= [ ProtoRegexEngine.CLI,\n ProtoRegexEngine.SELECTION_SCHEME.scheme2cox[selectionScheme],\n ProtoRegexEngine.ENCODING_SCHEME.scheme2cox[encodingScheme],\n '-f', queryFile ],\n timeout=timeout\n )\n if rc != 0:\n if \"syntax error\" in stderr:\n raise SyntaxError(\"Engine raised syntax error\\n rc: {}\\nstdout:\\n{}\\n\\nstderr:\\n{}\".format(rc, stdout, stderr))\n else:\n raise BaseException('Invocation failed; rc {} stdout\\n {}\\n\\nstderr\\n {}'.format(rc, stdout, stderr))\n\n res = re.search(r\"Need (\\d+) bits\", stdout)\n if res:\n libLF.log(\"Wished for {} bits\".format(res.group(1)))\n\n # libLF.log(\"stderr: <\" + stderr + \">\")\n return ProtoRegexEngine.EngineMeasurements(stderr.strip(), \"-no match-\" in stdout)\n \n class EngineMeasurements:\n \"\"\"Engine measurements\n \n This is a Python-native version of the JSON object \n emitted by the regex engine.\n It offers some assurance of type safety.\n \"\"\"\n def __init__(self, measAsJSON, misMatched):\n obj = json.loads(measAsJSON)\n self._unpackInputInfo(obj['inputInfo'])\n self._unpackMemoizationInfo(obj['memoizationInfo'])\n self._unpackSimulationInfo(obj['simulationInfo'])\n self.matched = not misMatched\n \n def _unpackInputInfo(self, dict):\n self.ii_lenW = int(dict['lenW'])\n self.ii_nStates = int(dict['nStates'])\n\n def _unpackMemoizationInfo(self, dict):\n self.mi_config_encoding = dict['config']['encoding']\n self.mi_config_vertexSelection = dict['config']['vertexSelection']\n\n self.mi_results_maxObservedAsymptoticCostsPerVertex = [\n int(cost) for cost in dict['results']['maxObservedAsymptoticCostsPerMemoizedVertex']\n ]\n self.mi_results_maxObservedMemoryBytesPerVertex = [\n int(cost) for cost in dict['results']['maxObservedMemoryBytesPerMemoizedVertex']\n ]\n self.mi_results_nSelectedVertices = int(dict['results']['nSelectedVertices'])\n self.mi_results_lenW = int(dict['results']['lenW'])\n\n def _unpackSimulationInfo(self, dict):\n self.si_nTotalVisits = int(dict['nTotalVisits'])\n self.si_simTimeUS = int(dict['simTimeUS'])\n self.si_visitsToMostVisitedSimPos = int(dict['visitsToMostVisitedSimPos'])\n self.si_nPossibleTotalVisitsWithMemoization = int(dict['nPossibleTotalVisitsWithMemoization'])\n self.si_visitsToMostVisitedSimPos = int(dict['visitsToMostVisitedSimPos'])\n\n###\n# Input classes\n###\n\nclass SimpleRegex:\n \"\"\"Simple regex for use with a memoized regex engine.\n Can be pattern (\"all\") or pattern+evilInput (\"SL\")\n \"\"\"\n def __init__(self):\n self.pattern = None\n self.evilInputs = []\n return\n \n def initFromNDJSON(self, line):\n obj = json.loads(line)\n self.pattern = obj['pattern']\n self.evilInputs = []\n if 'evilInputs' in obj:\n for _ei in obj['evilInputs']:\n _ei['couldParse'] = True # Hack\n ei = libLF.EvilInput()\n ei.initFromDict(_ei)\n self.evilInputs.append(ei)\n\n return self\n\n###\n# Output classes\n###\n\nclass MemoizationStaticAnalysis:\n \"\"\"Represents the result of regex pattern static analysis for memoization purposes\"\"\"\n def __init__(self):\n self.pattern = None\n self.policy2nSelectedVertices = {}\n \n def initFromRaw(self, pattern, policy2nSelectedVertices):\n self.pattern = pattern\n\n self.policy2nSelectedVertices = policy2nSelectedVertices\n # All memoization policies measured?\n s1 = set(policy2nSelectedVertices.keys())\n s2 = set(policy2nSelectedVertices.keys())\n assert s1 <= s2 <= s1\n \n return self\n \n def initFromNDJSON(self, jsonStr):\n obj = libLF.fromNDJSON(jsonStr)\n return self.initFromDict(obj)\n\n def initFromDict(self, obj):\n self.pattern = obj['pattern']\n self.policy2nSelectedVertices = obj['policy2nSelectedVertices']\n return self\n \n def toNDJSON(self):\n _dict = {\n 'pattern': self.pattern,\n 'policy2nSelectedVertices': self.policy2nSelectedVertices\n }\n return json.dumps(_dict)\n\nclass MemoizationDynamicAnalysis:\n \"\"\"Represents the result of regex pattern dynamic analysis for memoization purposes\"\"\"\n def __init__(self):\n self.pattern = None\n self.automatonSize = -1\n self.phiInDeg = -1\n self.phiQuantifier = -1\n self.inputLength = -1\n\n self.evilInput = None # If an SL regex\n self.nPumps = -1 # If an SL regex\n\n # Set these if you run a production regex analysis\n self.productionEnginePumps = -1\n self.perlBehavior = \"\"\n self.phpBehavior = \"\"\n self.csharpBehavior = \"\"\n\n self.selectionPolicy_to_enc2spaceAlgo = {} # Numeric space cost in algorithmic measure\n self.selectionPolicy_to_enc2spaceBytes = {} # Numeric space cost in bytes\n self.selectionPolicy_to_enc2time = {} # Numeric time cost\n\n for scheme in ProtoRegexEngine.SELECTION_SCHEME.scheme2cox.keys():\n if scheme != ProtoRegexEngine.SELECTION_SCHEME.SS_None:\n self.selectionPolicy_to_enc2spaceAlgo[scheme] = {}\n self.selectionPolicy_to_enc2spaceBytes[scheme] = {}\n self.selectionPolicy_to_enc2time[scheme] = {}\n \n def initFromRaw(self, pattern, automatonSize, phiInDeg, phiQuantifier, inputLength, evilInput, nPumps, selectionPolicy_to_enc2spaceAlgo, selectionPolicy_to_enc2spaceBytes, selectionPolicy_to_enc2time):\n self.pattern = pattern\n self.automatonSize = automatonSize\n self.phiInDeg = phiInDeg\n self.phiQuantifier = phiQuantifier\n self.inputLength = inputLength\n self.evilInput = evilInput\n self.nPumps = nPumps\n self.selectionPolicy_to_enc2time = selectionPolicy_to_enc2time\n self.selectionPolicy_to_enc2spaceAlgo = selectionPolicy_to_enc2spaceAlgo\n self.selectionPolicy_to_enc2spaceBytes = selectionPolicy_to_enc2spaceBytes\n return self\n \n def initFromNDJSON(self, jsonStr):\n obj = libLF.fromNDJSON(jsonStr)\n return self.initFromDict(obj)\n\n def initFromDict(self, obj):\n self.pattern = obj['pattern']\n self.automatonSize = obj['automatonSize']\n self.phiInDeg = obj['phiInDeg']\n self.phiQuantifier = obj['phiQuantifier']\n self.inputLength = obj['inputLength']\n\n if obj['evilInput'] is not None:\n ei = libLF.EvilInput()\n ei.initFromNDJSON(obj['evilInput'])\n self.evilInput = ei\n else:\n self.evilInput = None\n\n self.nPumps = obj['nPumps']\n\n self.productionEnginePumps = obj['productionEnginePumps']\n self.perlBehavior = obj['perlBehavior']\n self.phpBehavior = obj['phpBehavior']\n self.csharpBehavior = obj['csharpBehavior']\n\n self.selectionPolicy_to_enc2time = obj['selectionPolicy_to_enc2time']\n self.selectionPolicy_to_enc2spaceAlgo = obj['selectionPolicy_to_enc2spaceAlgo']\n self.selectionPolicy_to_enc2spaceBytes = obj['selectionPolicy_to_enc2spaceBytes']\n return self\n \n def toNDJSON(self):\n _dict = {\n 'pattern': self.pattern,\n 'automatonSize': self.automatonSize,\n 'phiInDeg': self.phiInDeg,\n 'phiQuantifier': self.phiQuantifier,\n 'inputLength': self.inputLength,\n 'evilInput': self.evilInput.toNDJSON() if self.evilInput else None,\n 'nPumps': self.nPumps,\n 'perlBehavior': self.perlBehavior,\n 'productionEnginePumps': self.productionEnginePumps,\n 'selectionPolicy_to_enc2time': self.selectionPolicy_to_enc2time,\n 'selectionPolicy_to_enc2spaceAlgo': self.selectionPolicy_to_enc2spaceAlgo,\n 'selectionPolicy_to_enc2spaceBytes': self.selectionPolicy_to_enc2spaceBytes,\n }\n return json.dumps(_dict)\n \n def validate(self):\n \"\"\"Returns True if everything looks OK, else raises an error\"\"\"\n assert self.automatonSize >= 0, \"No automaton\"\n assert self.phiInDeg >= 0, \"Negative |Phi_in-deg|?\"\n assert self.phiQuantifier >= 0, \"Negative |Phi_quantifier|?\"\n assert self.inputLength > 0, \"no input\"\n # Full space cost (algorithmic) for Phi=Q should be |Q| * |w|\n fullSpaceCostAlgo = self.selectionPolicy_to_enc2spaceAlgo[\n ProtoRegexEngine.SELECTION_SCHEME.SS_Full\n ][\n ProtoRegexEngine.ENCODING_SCHEME.ES_None\n ]\n # Should be \"bigger\" -- the difference can arise due to pump strings being > 1 character long\n assert fullSpaceCostAlgo <= self.automatonSize * (self.inputLength+1), \\\n \"fullSpaceCost {} is not >= {} * {}\".format(fullSpaceCostAlgo, self.automatonSize, self.inputLength)\n\n # Full table should have the most space complexity\n for selectionScheme, enc2space in self.selectionPolicy_to_enc2spaceAlgo.items():\n for encodingScheme, spaceCost in enc2space.items():\n assert spaceCost <= fullSpaceCostAlgo, \\\n \"General fullSpaceCost < cost for {}-{}\".format(selectionScheme, encodingScheme)\n assert spaceCost <= enc2space[ProtoRegexEngine.ENCODING_SCHEME.ES_None], \\\n \"Phi-specific fullSpaceCost < cost for {}-{}\".format(selectionScheme, encodingScheme)\n\n return True\n \n def toDataFrame(self):\n \"\"\"Return a pandas DataFrame\n \n This expands the selection-encoding dictionaries\n \"\"\"\n rows = []\n for selectionPolicy, d in self.selectionPolicy_to_enc2time.items():\n for encodingPolicy, _ in d.items():\n rows.append( {\n \"pattern\": self.pattern,\n \"|Q|\": self.automatonSize,\n \"|Phi_{in-deg > 1}|\": self.phiInDeg,\n \"|Phi_{quantifier}|\": self.phiQuantifier,\n \"|w|\": self.inputLength + 1, # Count the null byte\n \"SL\": True,\n \"nPumps\": self.nPumps,\n \"perlBehavior\": self.perlBehavior,\n \"phpBehavior\": self.phpBehavior,\n \"csharpBehavior\": self.csharpBehavior,\n \"productionEnginePumps\": self.productionEnginePumps,\n \"selectionPolicy\": selectionPolicy,\n \"encodingPolicy\": encodingPolicy,\n \"timeCost\": self.selectionPolicy_to_enc2time[selectionPolicy][encodingPolicy],\n \"spaceCostAlgo\": self.selectionPolicy_to_enc2spaceAlgo[selectionPolicy][encodingPolicy],\n \"spaceCostBytes\": self.selectionPolicy_to_enc2spaceBytes[selectionPolicy][encodingPolicy],\n })\n return pd.DataFrame(data=rows)\n"
] |
[
[
"pandas.DataFrame"
]
] |
kiototeko/PRIMAL2
|
[
"331ca7ba11d48483694594a9f2029d76238668bb"
] |
[
"Ray_ACNet.py"
] |
[
"import tensorflow as tf\nimport tensorflow.contrib.layers as layers\nimport numpy as np\n\n# parameters for training\nGRAD_CLIP = 10.0\nKEEP_PROB1 = 1 # was 0.5\nKEEP_PROB2 = 1 # was 0.7\nRNN_SIZE = 512\nGOAL_REPR_SIZE = 12\n\n\n# Used to initialize weights for policy and value output layers (Do we need to use that? Maybe not now)\ndef normalized_columns_initializer(std=1.0):\n def _initializer(shape, dtype=None, partition_info=None):\n out = np.random.randn(*shape).astype(np.float32)\n out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))\n return tf.constant(out)\n\n return _initializer\n\n\nclass ACNet:\n def __init__(self, scope, a_size, trainer, TRAINING, NUM_CHANNEL, OBS_SIZE, GLOBAL_NET_SCOPE, GLOBAL_NETWORK=False, RELATIONAL_LEARNING=False):\n with tf.variable_scope(str(scope) + '/qvalues'):\n self.trainer = trainer\n # The input size may require more work to fit the interface.\n self.inputs = tf.placeholder(shape=[None, NUM_CHANNEL, OBS_SIZE, OBS_SIZE], dtype=tf.float32)\n self.goal_pos = tf.placeholder(shape=[None, 3], dtype=tf.float32)\n self.myinput = tf.transpose(self.inputs, perm=[0, 2, 3, 1])\n self.policy, self.value, self.state_out, self.state_in, self.state_init, self.valids = self._build_net(\n self.myinput, self.goal_pos, RNN_SIZE, TRAINING, a_size, RELATIONAL_LEARNING)\n if TRAINING:\n self.actions = tf.placeholder(shape=[None], dtype=tf.int32)\n self.actions_onehot = tf.one_hot(self.actions, a_size, dtype=tf.float32)\n self.train_valid = tf.placeholder(shape=[None, a_size], dtype=tf.float32)\n self.target_v = tf.placeholder(tf.float32, [None], 'Vtarget')\n self.advantages = tf.placeholder(shape=[None], dtype=tf.float32)\n\n self.responsible_outputs = tf.reduce_sum(self.policy * self.actions_onehot, [1])\n self.train_value = tf.placeholder(tf.float32, [None])\n \n self.train_policy = tf.placeholder(tf.float32, [None])\n \n self.train_imitation = tf.placeholder(tf.float32, [None]) # NEED THIS\n\n self.optimal_actions = tf.placeholder(tf.int32, [None]) # NEED THIS\n\n self.optimal_actions_onehot = tf.one_hot(self.optimal_actions, a_size, dtype=tf.float32) # NEED THIS\n \n self.train_valids= tf.placeholder(tf.float32, [None,1])\n\n # Loss Functions\n self.value_loss = 0.1 * tf.reduce_mean(\n self.train_value * tf.square(self.target_v - tf.reshape(self.value, shape=[-1])))\n \n self.entropy = - tf.reduce_mean(self.policy * tf.log(tf.clip_by_value(self.policy, 1e-10, 1.0)))\n \n self.policy_loss = - 0.5 * tf.reduce_mean(self.train_policy*\n tf.log(tf.clip_by_value(self.responsible_outputs, 1e-15, 1.0)) * self.advantages)\n\n \n self.valid_loss = - 16 * tf.reduce_mean(self.train_valids * tf.log(tf.clip_by_value(self.valids, 1e-10, 1.0)) * \\\n self.train_valid + tf.log(\n tf.clip_by_value(1 - self.valids, 1e-10, 1.0)) * (1 - self.train_valid))\n \n\n self.loss = self.value_loss + self.policy_loss + self.valid_loss - self.entropy * 0.01\n\n\n # IMPORTANT: 0 * self.value_loss is important so we can\n # fetch the gradients properly\n self.imitation_loss = 0 * self.value_loss + tf.reduce_mean(self.train_imitation*\n tf.keras.backend.categorical_crossentropy(self.optimal_actions_onehot, self.policy))\n \n \n # Get gradients from local network using local losses and\n # normalize the gradients using clipping\n \n local_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope + '/qvalues')\n self.gradients = tf.gradients(self.loss, local_vars)\n self.var_norms = tf.global_norm(local_vars)\n self.grads, self.grad_norms = tf.clip_by_global_norm(self.gradients, GRAD_CLIP)\n\n # Apply local gradients to global network\n global_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, GLOBAL_NET_SCOPE + '/qvalues')\n if self.trainer:\n self.apply_grads = self.trainer.apply_gradients(zip(self.grads, global_vars))\n\n\n self.local_vars = local_vars\n \n # now the gradients for imitation loss\n self.i_gradients = tf.gradients(self.imitation_loss, local_vars)\n self.i_var_norms = tf.global_norm(local_vars)\n self.i_grads, self.i_grad_norms = tf.clip_by_global_norm(self.i_gradients, GRAD_CLIP)\n\n # Apply local gradients to global network\n if self.trainer:\n self.apply_imitation_grads = self.trainer.apply_gradients(zip(self.i_grads, global_vars))\n\n \n if GLOBAL_NETWORK:\n print(\"\\n\\n\\n\\n is a global network\\n\\n\\n\\n\")\n weightVars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n self.tempGradients = [tf.placeholder(shape=w.get_shape(), dtype=tf.float32) for w in weightVars]\n self.apply_grads = self.trainer.apply_gradients(zip(self.tempGradients, weightVars))\n #self.clippedGrads, norms = tf.clip_by_global_norm(self.tempGradients, GRAD_CLIP)\n #self.apply_grads = self.trainer.apply_gradients(zip(self.clippedGrads, weightVars))\n \n print(\"Hello World... From \" + str(scope)) # :)\n\n def _build_net(self, inputs, goal_pos, RNN_SIZE, TRAINING, a_size, RELATIONAL_LEARNING):\n def conv_mlp(inputs, kernal_size, output_size):\n inputs = tf.reshape(inputs, [-1, 1, kernal_size, 1])\n conv = layers.conv2d(inputs=inputs, padding=\"VALID\", num_outputs=output_size,\n kernel_size=[1, kernal_size], stride=1,\n data_format=\"NHWC\", weights_initializer=w_init, activation_fn=tf.nn.relu)\n\n return conv\n\n def VGG_Block(inputs):\n def conv_2d(inputs, kernal_size, output_size):\n conv = layers.conv2d(inputs=inputs, padding=\"SAME\", num_outputs=output_size,\n kernel_size=[kernal_size[0], kernal_size[1]], stride=1,\n data_format=\"NHWC\", weights_initializer=w_init, activation_fn=tf.nn.relu)\n\n return conv\n\n conv1 = conv_2d(inputs, [3, 3], RNN_SIZE // 4)\n conv1a = conv_2d(conv1, [3, 3], RNN_SIZE // 4)\n conv1b = conv_2d(conv1a, [3, 3], RNN_SIZE // 4)\n pool1 = layers.max_pool2d(inputs=conv1b, kernel_size=[2, 2])\n return pool1\n \n #From here on, these are functions used for the relational module which were obtained from https://github.com/RLOpensource/Relational_Deep_Reinforcement_Learning/blob/5945fab3fe6c2f344ab7ac78c95c8d1aee7f6e3b/core.py\n #Except the mlp function\n \n def flatten(nnk, shape):\n flatten = tf.reshape(nnk, [-1, shape[1]*shape[2]*shape[3]])\n return flatten\n \n def mlp(x): #this function was added as it was missing in the code I used\n for i in range(2):\n x = tf.layers.dense(inputs=x, units=x.get_shape()[2], activation=tf.nn.relu)\n return x\n\n def query_key_value(nnk, shape):\n flatten = tf.reshape(nnk, [-1, shape[1]*shape[2], shape[3]])\n after_layer = [tf.layers.dense(inputs=flatten, units=shape[3], activation=tf.nn.relu) for i in range(3)]\n\n return after_layer[0], after_layer[1], after_layer[2], flatten\n\n def self_attention(query, key, value):\n key_dim_size = float(key.get_shape().as_list()[-1])\n key = tf.transpose(key, perm=[0, 2, 1])\n S = tf.matmul(query, key) / tf.sqrt(key_dim_size)\n attention_weight = tf.nn.softmax(S)\n A = tf.matmul(attention_weight, value)\n shape = A.get_shape()\n return A, attention_weight, [s.value for s in shape]\n \n def layer_normalization(x):\n feature_shape = x.get_shape()[-1:]\n mean, variance = tf.nn.moments(x, [2], keep_dims=True)\n beta = tf.Variable(tf.zeros(feature_shape), trainable=False)\n gamma = tf.Variable(tf.ones(feature_shape), trainable=False)\n return gamma * (x - mean) / tf.sqrt(variance + 1e-8) + beta\n\n def residual(x, inp, residual_time):\n x = x + inp\n x = layer_normalization(x)\n return x\n\n def feature_wise_max(x):\n return tf.reduce_max(x, axis=2)\n\n def relational_module(x):\n shape = x.get_shape()\n query, key, value, E = query_key_value(x, shape)\n normalized_query = layer_normalization(query)\n normalized_key = layer_normalization(key)\n normalized_value = layer_normalization(value)\n A, attention_weight, shape = self_attention(normalized_query, normalized_key, normalized_value)\n A_mlp = mlp(A)\n E_hat = residual(A_mlp, E, 2)\n max_E_hat = feature_wise_max(E_hat)\n return max_E_hat\n\n w_init = layers.variance_scaling_initializer()\n vgg1 = VGG_Block(inputs)\n vgg2 = VGG_Block(vgg1)\n \n if RELATIONAL_LEARNING:\n vgg2 = relational_module(vgg2) #We add relational module in here\n\n #An error occurs here because of the size\n conv3 = layers.conv2d(inputs=vgg2, padding=\"VALID\", num_outputs=RNN_SIZE - GOAL_REPR_SIZE, kernel_size=[2, 2],\n stride=1, data_format=\"NHWC\", weights_initializer=w_init, activation_fn=None) \n\n flat = tf.nn.relu(layers.flatten(conv3))\n goal_layer = layers.fully_connected(inputs=goal_pos, num_outputs=GOAL_REPR_SIZE)\n hidden_input = tf.concat([flat, goal_layer], 1)\n h1 = layers.fully_connected(inputs=hidden_input, num_outputs=RNN_SIZE)\n d1 = layers.dropout(h1, keep_prob=KEEP_PROB1, is_training=TRAINING)\n h2 = layers.fully_connected(inputs=d1, num_outputs=RNN_SIZE, activation_fn=None)\n d2 = layers.dropout(h2, keep_prob=KEEP_PROB2, is_training=TRAINING)\n self.h3 = tf.nn.relu(d2 + hidden_input)\n # Recurrent network for temporal dependencies\n lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(RNN_SIZE, state_is_tuple=True)\n c_init = np.zeros((1, lstm_cell.state_size.c), np.float32)\n h_init = np.zeros((1, lstm_cell.state_size.h), np.float32)\n state_init = [c_init, h_init]\n c_in = tf.placeholder(tf.float32, [1, lstm_cell.state_size.c])\n h_in = tf.placeholder(tf.float32, [1, lstm_cell.state_size.h])\n state_in = (c_in, h_in)\n rnn_in = tf.expand_dims(self.h3, [0])\n step_size = tf.shape(inputs)[:1]\n state_in = tf.nn.rnn_cell.LSTMStateTuple(c_in, h_in)\n lstm_outputs, lstm_state = tf.nn.dynamic_rnn(\n lstm_cell, rnn_in, initial_state=state_in, sequence_length=step_size,\n time_major=False)\n lstm_c, lstm_h = lstm_state\n state_out = (lstm_c[:1, :], lstm_h[:1, :])\n self.rnn_out = tf.reshape(lstm_outputs, [-1, RNN_SIZE])\n\n policy_layer = layers.fully_connected(inputs=self.rnn_out, num_outputs=a_size,\n weights_initializer=normalized_columns_initializer(1. / float(a_size)),\n biases_initializer=None, activation_fn=None)\n policy = tf.nn.softmax(policy_layer)\n policy_sig = tf.sigmoid(policy_layer)\n value = layers.fully_connected(inputs=self.rnn_out, num_outputs=1,\n weights_initializer=normalized_columns_initializer(1.0), biases_initializer=None,\n activation_fn=None)\n\n return policy, value, state_out, state_in, state_init, policy_sig\n"
] |
[
[
"tensorflow.nn.dynamic_rnn",
"tensorflow.concat",
"tensorflow.nn.rnn_cell.LSTMStateTuple",
"tensorflow.zeros",
"tensorflow.reduce_sum",
"tensorflow.contrib.layers.flatten",
"numpy.random.randn",
"numpy.square",
"tensorflow.contrib.layers.variance_scaling_initializer",
"tensorflow.get_collection",
"tensorflow.nn.moments",
"tensorflow.gradients",
"tensorflow.layers.dense",
"numpy.zeros",
"tensorflow.nn.rnn_cell.BasicLSTMCell",
"tensorflow.matmul",
"tensorflow.shape",
"tensorflow.placeholder",
"tensorflow.contrib.layers.dropout",
"tensorflow.contrib.layers.conv2d",
"tensorflow.one_hot",
"tensorflow.global_norm",
"tensorflow.clip_by_value",
"tensorflow.nn.relu",
"tensorflow.reduce_max",
"tensorflow.nn.softmax",
"tensorflow.constant",
"tensorflow.transpose",
"tensorflow.contrib.layers.max_pool2d",
"tensorflow.keras.backend.categorical_crossentropy",
"tensorflow.reshape",
"tensorflow.sigmoid",
"tensorflow.expand_dims",
"tensorflow.ones",
"tensorflow.contrib.layers.fully_connected",
"tensorflow.clip_by_global_norm",
"tensorflow.sqrt"
]
] |
alcinos/auto_yolo
|
[
"78727596f937b38d4de47dd9f0a7cc8c6104323f"
] |
[
"experiments/comparison/baseline_search.py"
] |
[
"from auto_yolo import envs\nimport argparse\nimport numpy as np\n\nreadme = \"Searching for baseline threshold.\"\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--n-digits\", type=int, default=1)\nparser.add_argument(\"--transfer\", action=\"store_true\")\nparser.add_argument(\"--sc\", choices=\"AP count_error count_1norm\".split())\nargs, _ = parser.parse_known_args()\n\n# dist_dict = {\n# 3: np.linspace(0, .1, 101),\n# 5: np.linspace(0, .1, 101),\n# 7: np.linspace(.6599-0.05, .6599+0.05, 101),\n# 9: np.linspace(.599-0.05, .599+0.05, 101),\n# }\n\ndistributions = [dict(cc_threshold=t) for t in np.linspace(0.01, 3.0, 100)]\n\ndurations = dict(\n oak=dict(\n max_hosts=1, ppn=4, cpp=1, gpu_set=\"0\", wall_time=\"1year\",\n cleanup_time=\"1mins\", slack_time=\"1mins\", n_repeats=1, kind=\"parallel\", host_pool=\":\"),\n)\n\n\ndef build_net(scope):\n from dps.utils.tf import MLP\n return MLP(n_units=[10, 10], scope=scope)\n\n\nconfig = dict(\n curriculum=[dict()],\n render_hook=None,\n cc_threshold=0.000001,\n do_train=False,\n build_object_encoder=build_net,\n build_object_decoder=build_net\n)\n\nif args.sc == \"AP\":\n config.update(stopping_criteria=\"AP,max\", threshold=1.0)\nelif args.sc == \"count_error\":\n config.update(stopping_criteria=\"count_error,min\", threshold=0.0)\nelif args.sc == \"count_1norm\":\n config.update(stopping_criteria=\"count_1norm,min\", threshold=0.0)\nelse:\n raise Exception()\n\nif args.transfer:\n config[\"min_chars\"] = args.n_digits\n config[\"max_chars\"] = args.n_digits\n config[\"n_train\"] = 25000\n task = \"scatter\"\nelse:\n config[\"min_digits\"] = args.n_digits\n config[\"max_digits\"] = args.n_digits\n config[\"n_train\"] = 64000\n task = \"arithmetic\"\n\n\nenvs.run_experiment(\n \"baseline_search_sc={}_n_digits={}\".format(args.sc, args.n_digits), config, readme,\n distributions=distributions, alg=\"baseline\", durations=durations, task=task\n)\n"
] |
[
[
"numpy.linspace"
]
] |
XiaoyongNI/hybrid-inference
|
[
"c268e1ada019e08f62e3f02fc6d5059130ec5358"
] |
[
"datasets/nclt.py"
] |
[
"from __future__ import print_function\nimport sys, os\nsys.path.append('../')\nimport torch.utils.data as data\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport torch\nimport pickle\nimport settings\nimport time\ndates = [];\ndates.append('2012-01-08')\ndates.append('2012-01-15')\ndates.append('2012-01-22')\ndates.append('2012-02-02')\ndates.append('2012-02-04')\ndates.append('2012-02-05')\ndates.append('2012-02-12')\ndates.append('2012-02-18')\ndates.append('2012-02-19')\ndates.append('2012-03-17')\ndates.append('2012-03-25')\ndates.append('2012-03-31')\ndates.append('2012-04-29')\ndates.append('2012-05-11')\ndates.append('2012-05-26')\ndates.append('2012-06-15')\ndates.append('2012-08-04')\ndates.append('2012-08-20')\ndates.append('2012-09-28')\ndates.append('2012-10-28')\ndates.append('2012-11-04')\ndates.append('2012-11-16')\ndates.append('2012-11-17')\ndates.append('2012-12-01')\ndates.append('2013-01-10')\ndates.append('2013-02-23')\ndates.append('2013-04-05')\ndates = ['2012-01-22']\npath_gps = \"data/nclt/sensor_data/%s/gps.csv\"\npath_gps_rtk = \"data/nclt/sensor_data/%s/gps_rtk.csv\"\npath_gps_rtk_err = \"data/nclt/sensor_data/%s/gps_rtk_err.csv\"\npath_gt = \"data/nclt/ground_truth/groundtruth_%s.csv\"\ncompact_path = \"temp/nclt_%s.pickle\"\n\nclass NCLT(data.Dataset):\n def __init__(self, date, partition='train', ratio=1.0):\n self.partition = partition\n self.ratio = ratio\n if not os.path.exists(compact_path % date):\n print(\"Loading NCLT dataset ...\")\n self.gps, self.gps_rtk, self.gps_rtk_err, self.gt = self.__load_data(date)\n self.__process_data()\n self.dump(compact_path % date, [self.gps, self.gps_rtk, self.gps_rtk_err, self.gt])\n\n else:\n [self.gps, self.gps_rtk, self.gps_rtk_err, self.gt] = self.load(compact_path % date)\n\n if self.partition == 'train':\n indexes = [1, 3]\n elif self.partition == 'val':\n indexes = [0, 2]\n elif self.partition == 'test':\n indexes = [4, 5, 6]\n else:\n raise Exception('Wrong partition')\n\n\n self.gps = [self.gps[i].astype(np.float32) for i in indexes]\n self.gps_rtk = [self.gps_rtk[i].astype(np.float32) for i in indexes]\n self.gt = [self.gt[i].astype(np.float32) for i in indexes]\n\n self.cut_data()\n\n\n print(\"NCLT %s loaded: %d samples \" % (partition, sum([x.shape[0] for x in self.gps_rtk])))\n\n self.operators_b = [self.__buildoperators_sparse(self.gps[i].shape[0]) for i in range(len(self.gps))]\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: (state, meas) where target is index of the target class.\n \"\"\"\n x0, P0 = self.__pos2x0(self.gps_rtk[index][0, 1:].astype(np.float32))\n return self.gt[index][:, 0], self.gt[index][:, 1:], self.gps_rtk[index][:, 1:], x0, P0, self.operators_b[index]\n\n def cut_data(self):\n self.gps = [cut_array(e, self.ratio) for e in self.gps]\n self.gps_rtk = [cut_array(e, self.ratio) for e in self.gps_rtk]\n self.gt = [cut_array(e, self.ratio) for e in self.gt]\n\n def __pos2x0(self, pos):\n if settings.x0_v.shape[0] == 4:\n x0 = np.zeros(4).astype(np.float32)\n x0[0] = pos[0]\n x0[2] = pos[1]\n P0 = np.eye(4)*1\n else:\n x0 = np.zeros(6).astype(np.float32)\n x0[0] = pos[0]\n x0[3] = pos[1]\n P0 = np.eye(6)*1\n return x0, P0\n\n def dump(self, path, object):\n if not os.path.exists('temp'):\n os.makedirs('temp')\n with open(path, 'wb') as f:\n # Pickle the 'data' dictionary using the highest protocol available.\n pickle.dump(object, f, pickle.HIGHEST_PROTOCOL)\n\n def load(self, path):\n with open(path, 'rb') as f:\n # The protocol version used is detected automatically, so we do not\n # have to specify it.\n return pickle.load(f)\n\n def __len__(self):\n return len(self.gt)\n\n def total_len(self):\n total = 0\n for arr in self.gt:\n total += arr.shape[0]\n return total\n\n def _generate_sample(self, seed):\n np.random.seed(seed)\n\n if self.acceleration:\n return simulate_system(create_model_parameters_a, K=self.K, x0=self.x0)\n else:\n return simulate_system(create_model_parameters_v, K=self.K, x0=self.x0)\n\n def __buildoperators_sparse_old(self, nn=20):\n # Identity\n i = torch.LongTensor([[i, i] for i in range(nn)])\n v = torch.FloatTensor([1 for i in range(nn)])\n I = torch.sparse.FloatTensor(i.t(), v)\n\n #Message right\n i = torch.LongTensor([[i, i+1] for i in range(nn-1)] + [[nn-1, nn-1]])\n v = torch.FloatTensor([1 for i in range(nn-1)] + [0])\n mr = torch.sparse.FloatTensor(i.t(), v)\n\n #Message left\n i = torch.LongTensor([[0, nn-1]] + [[i+1, i] for i in range(nn-1)])\n v = torch.FloatTensor([0] + [1 for i in range(nn-1)])\n ml = torch.sparse.FloatTensor(i.t(), v)\n\n return [I, mr, ml]\n\n def __buildoperators_sparse(self, nn=20):\n # Message right to left\n m_left_r = []\n m_left_c = []\n\n m_right_r = []\n m_right_c = []\n\n m_up_r = []\n m_up_c = []\n\n for i in range(nn - 1):\n m_left_r.append(i)\n m_left_c.append((i + 1))\n\n m_right_r.append(i + 1)\n m_right_c.append((i))\n\n for i in range(nn):\n m_up_r.append(i)\n m_up_c.append(i + nn)\n\n m_left = [torch.LongTensor(m_left_r), torch.LongTensor(m_left_c)]\n m_right = [torch.LongTensor(m_right_r), torch.LongTensor(m_right_c)]\n m_up = [torch.LongTensor(m_up_r), torch.LongTensor(m_up_c)]\n\n return {\"m_left\": m_left, \"m_right\": m_right, \"m_up\": m_up}\n\n def __load_gps(self, path, date):\n df = pd.read_csv(path % date)\n df = df.iloc[:, [0, 3, 4]]\n return df.values\n\n def __load_gps_err(self, date):\n df = pd.read_csv(path_gps % date)\n df = df.iloc[:, 6]\n return df.values\n\n def __load_gt(self, date):\n df = pd.read_csv(path_gt % date)\n gt = df.iloc[:, [0, 2, 1]].values\n gt_err = df.iloc[:, [5, 4]].values\n return gt, gt_err\n\n def __load_gps_rtk_err(self, date):\n df = pd.read_csv(path_gps_rtk_err % date)\n return df.values\n\n def __compute_gps_err(self, gps, gt):\n return np.mean(np.square(gps - gt), axis=1)\n\n def __load_data(self, date):\n \"We use the timestamp of gps_rtk which has the lowest frequency 1 Hz\"\n gps = self.__load_gps(path_gps, date)\n gps_rtk = self.__load_gps(path_gps_rtk, date)\n gps_rtk_err = self.__load_gps_rtk_err(date)\n gt, _ = self.__load_gt(date)\n\n self.lat0 = gps_rtk[0, 1]\n self.lng0 = gps_rtk[0, 2]\n self.bias = [gt[0, 1], gt[0, 2]]\n\n gps_rtk_dec = self.__decompose(gps_rtk, date)\n gps_rtk_err_dec = self.__decompose(gps_rtk_err, date)\n\n gps_ar = []\n gt_ar = []\n gps_rtk_ar, gps_rtk_err_ar = [], []\n\n for gps_rtk_i, gps_rtk_err_i in zip(gps_rtk_dec, gps_rtk_err_dec):\n idxs = self.__filer_freq(gps_rtk_i[:, 0], f=1.)\n gps_rtk_ar.append(gps_rtk_i[idxs, :])\n gps_rtk_err_ar.append(gps_rtk_err_i[idxs, :])\n\n\n #Matching with GT\n idxs_gt = self.__match_tt(gps_rtk_ar[-1][:, 0], gt[:, 0])\n gt_ar.append(gt[idxs_gt, :])\n\n #Matching with gps\n idxs = self.__match_tt(gps_rtk_ar[-1][:, 0], gps[:, 0])\n gps_ar.append(gps[idxs, :])\n\n return gps_ar, gps_rtk_ar, gps_rtk_err_ar, gt_ar\n\n def __decompose(self, data, date):\n if date == '2012-01-22':\n return [data[100:2054], data[2054:4009], data[4147:6400], data[6400:8890], data[9103:10856], data[11113:12608],\n data[12733:13525]]#, [0, 4147, 9103, 11113, 12733]\n else:\n return data\n\n def concatenate(self, arrays):\n return np.concatenate(arrays, axis=0)\n\n def __process_data(self):\n '''\n lat0 = self.gps_rtk[0][0, 1]\n lng0 = self.gps_rtk[0][0, 2]\n bias = [self.gt[0][0, 1], self.gt[0][0, 2]]\n '''\n\n for i in range(len(self.gps_rtk)):\n self.gps_rtk[i][:, 1:] = polar2cartesian(self.gps_rtk[i][:, 1], self.gps_rtk[i][:, 2], self.lat0,\n self.lng0)\n self.gps[i][:, 1:] = polar2cartesian(self.gps[i][:, 1], self.gps[i][:, 2], self.lat0,\n self.lng0)\n\n self.gt[i][:, 1:] = remove_bias(self.gt[i][:, 1:], self.bias)\n\n def __match_tt(self, tt1, tt2):\n print(\"\\tMatching gps and gt timestamps\")\n arr_idx = []\n for i, ti in enumerate(tt1):\n diff = np.abs(tt2 - ti)\n min_idx = np.argmin(diff)\n arr_idx.append(min_idx)\n return arr_idx\n\n def _match_gt_step1(self, gps, gps_err, gt, margin=5):\n gt_aux = gt.copy()\n min_err = 1e10\n min_x, min_y = 0, 0\n for x in np.linspace(-margin, margin, 200):\n for y in np.linspace(-margin, margin, 200):\n gt_aux[:, 0] = gt[:, 0] + x\n gt_aux[:, 1] = gt[:, 1] + y\n err = mse(gps, gps_err, gt_aux)\n if err < min_err:\n min_err = err\n min_x = x\n min_y = y\n #print(\"x: %.4f \\t y:%.4f \\t err:%.4f\" % (min_x, min_y, err))\n\n print(err)\n print(\"Fixing GT bias x: %.4f \\t y:%.4f \\t error:%.4f\" % (min_x, min_y, min_err))\n return (min_x, min_y)\n\n def _match_gt_step2(self, gt, err):\n (min_x, min_y) = err\n gt[:, 0] = gt[:, 0] + min_x\n gt[:, 1] = gt[:, 1] + min_y\n return gt\n\n def __filer_freq(self, ts, f=1., window=5):\n arr_idx = []\n last_id = 0\n arr_idx.append(last_id)\n check = False\n while last_id < len(ts) - window:\n rel_j = []\n for j in range(1, window):\n rel_j.append(np.abs(f - (ts[last_id+j] - ts[last_id])/1000000))\n last_id = last_id + 1 + np.argmin(rel_j)\n\n min_val = np.min(rel_j)\n if min_val > 0.05:\n check = True\n arr_idx.append(last_id)\n if check:\n print(\"\\tWarning: Not all frequencies are %.3fHz\" % f)\n print(\"\\tFiltering finished!\")\n return arr_idx\n\n\ndef mse(gps, gps_err, gt, th=2):\n error = np.mean(np.square(gps - gt), axis=1)\n mapping = (gps_err < th).astype(np.float32)\n return np.mean(error*mapping)\n\ndef polar2cartesian(lat, lng, lat0, lng0):\n dLat = lat - lat0\n dLng = lng - lng0\n\n r = 6400000 # approx. radius of earth (m)\n x = r * np.cos(lat0) * np.sin(dLng)\n y = r * np.sin(dLat)\n return np.concatenate((np.expand_dims(x, 1), np.expand_dims(y, 1)), 1)\n\n\ndef remove_bias(vector, bias):\n for i in range(vector.shape[1]):\n vector[:, i] = vector[:, i] - bias[i]\n return vector\n\nif __name__ == '__main__':\n for date in dates:\n dataset = NCLT('2012-01-22', partition='train')\n dataset = NCLT('2012-01-22', partition='val')\n dataset = NCLT('2012-01-22', partition='test')\n\n\ndef cut_array(array, ratio):\n length = len(array)\n return array[0:int(round(ratio*length))]"
] |
[
[
"numpy.square",
"torch.LongTensor",
"pandas.read_csv",
"numpy.expand_dims",
"numpy.abs",
"numpy.random.seed",
"numpy.linspace",
"numpy.min",
"numpy.eye",
"numpy.cos",
"numpy.sin",
"numpy.concatenate",
"numpy.mean",
"numpy.argmin",
"numpy.zeros"
]
] |
Next-Generation-Neural-Interfaces/Hardware-efficient-MUA-compression
|
[
"853c6e0f3d085812e88fd0572ac7c64a172255d7"
] |
[
"Behavioral decoding/HPC code/Flint_HPC_BDP_S_test.py"
] |
[
"\"\"\"\r\nEvaluating spike-based BMI decoding using Wiener filter\r\nLoad Flint data from matlab. HPC version.\r\n\"\"\"\r\n\r\n# import packages\r\nimport numpy as np\r\nfrom HPC_working_dir.functions.preprocess import input_shaping, split_index\r\nfrom HPC_working_dir.functions.decoders import WienerCascadeDecoder\r\nfrom HPC_working_dir.functions.metrics import compute_rmse, compute_pearson\r\nimport time as timer\r\nimport pickle\r\n\r\nfrom scipy import io\r\nimport copy\r\n\r\nimport os\r\nfrom os.path import exists \r\n\r\ndef moving_average(a, n=3) :\r\n a = np.hstack((np.zeros(n-1),a))\r\n ret = np.cumsum(a, dtype=float)\r\n ret[n:] = ret[n:] - ret[:-n]\r\n return ret[n - 1:] / n\r\n\r\n# Create formatted data folder on HPC directory \"working_directory\", upload \r\n# formatted data (only the .mat files, all_binned_data.pkl not required) to it,\r\n# and add the path below.\r\n# Create a results_test_Flint folder, and add the path below.\r\n# Upload filenames_Flint_test.txt to the HPC \"working_directory\".\r\n\r\ndef BDP_for_S_and_BP():\r\n \r\n\r\n # Path to HPC working directory (where neural data and results folders are loctaed)\r\n working_directory = ''\r\n \r\n if working_directory == '':\r\n print('Fill in path to working directory')\r\n return 0\r\n \r\n \r\n file_names = working_directory + 'filenames_Flint_test.txt'\r\n \r\n # Directories\r\n mat_folder = working_directory + 'neural_data/' # spike features folder\r\n result_folder = working_directory + 'results_test_Flint/' # results folder\r\n \r\n \r\n delta_time_vec = [0.001, 0.005, 0.01, 0.02, 0.05, 0.1]\r\n time_steps_vec = [5, 10, 15]\r\n lag_values_vec = [0, 5, 10]\r\n window_len_vec = [0, 0.05, 0.1, 0.2]\r\n alpha_vec = [0, 1e-4, 1e-2]\r\n degree_vec = [2,3,4]\r\n regular = 'l2' # regularisation type\r\n num_fold = 5 # number of cross-validation folds\r\n\r\n \r\n PBS_ARRAY_INDEX = int(os.environ['PBS_ARRAY_INDEX'])\r\n print('PBS_ARRAY_INDEX: ' + str(PBS_ARRAY_INDEX))\r\n \r\n print (\"Starting simulation\")\r\n run_start = timer.time()\r\n \r\n # Look at all stored file names, we have it this way so the indexing is \r\n # consistent, always relative to the .txt file \r\n with open(file_names) as f:\r\n lines = f.readlines()\r\n \r\n # Rec indexing\r\n PBS_ARRAY_INDEX_rec = PBS_ARRAY_INDEX % len(lines)\r\n count_1 = int(np.floor(PBS_ARRAY_INDEX/(len(lines))))\r\n \r\n # BP indexing\r\n PBS_ARRAY_INDEX_BP = count_1 % len(delta_time_vec)\r\n count_2 = int(np.floor(count_1/(len(delta_time_vec))))\r\n \r\n # Wdw indexing\r\n PBS_ARRAY_INDEX_wdw = count_2 % len(window_len_vec)\r\n \r\n delta_time = delta_time_vec[PBS_ARRAY_INDEX_BP]\r\n file_name = lines[PBS_ARRAY_INDEX_rec].replace('\\n','')\r\n wdw_time = window_len_vec[PBS_ARRAY_INDEX_wdw]\r\n \r\n print('BP: ' + str(delta_time*1000) + '; Rec-sub: ' + file_name + ' - wdw: ' + str(wdw_time)) \r\n\r\n # Load neural data\r\n mat_filename = mat_folder+file_name + '_BP_'+ str(int(delta_time*1000))+'_ms.mat' \r\n print (\"Loading input features from file: \"+mat_filename)\r\n f = io.loadmat(mat_filename)\r\n \r\n model = WienerCascadeDecoder() # instantiate model\r\n \r\n # Moving average window\r\n wdw_samples = int(np.round(wdw_time / delta_time))\r\n\r\n input_feature_1 = f['binned_MUA'][:]\r\n cursor_vel_1 = f['collated_hand_vel'][:] # in mm/s\r\n \r\n print('input shape: ' + str(np.shape(input_feature_1)))\r\n print('output shape: ' + str(np.shape(cursor_vel_1)))\r\n\r\n for timesteps in time_steps_vec:\r\n \r\n input_feature = copy.deepcopy(input_feature_1)\r\n cursor_vel = copy.deepcopy(cursor_vel_1[:,:2]) # ignore z-axis\r\n \r\n input_dim = input_feature.shape[1] # input dimension\r\n output_dim = cursor_vel.shape[1] # output dimension NOTE CHANGE FOR ORIG\r\n\r\n # Initialise performance scores (RMSE and CC) with nan values\r\n rmse_valid = np.full((num_fold,output_dim),np.nan)\r\n rmse_test = np.copy(rmse_valid)\r\n cc_valid = np.copy(rmse_valid)\r\n cc_test = np.copy(rmse_valid)\r\n time_train = np.full((num_fold),np.nan)\r\n time_test = np.copy(time_train) \r\n \r\n print (\"Formatting input feature data\")\r\n stride = 1 # number of samples to be skipped\r\n X_in = input_shaping(input_feature,timesteps,stride)\r\n X_in = X_in.reshape(X_in.shape[0],(X_in.shape[1]*X_in.shape[2]),order='F')\r\n \r\n print (\"Formatting output (kinematic) data\")\r\n diff_samp = cursor_vel.shape[0]-X_in.shape[0]\r\n Y_out = cursor_vel[diff_samp:,:] # in mm/s (remove it for new corrected velocity)\r\n\r\n print (\"Splitting input dataset into training, validation, and testing subdataset\")\r\n all_train_idx,all_valid_idx,all_test_idx = split_index(X_in,num_fold)\r\n \r\n S_vector = np.arange(2,40)\r\n copy_X_in = copy.deepcopy(X_in)\r\n for S in S_vector:\r\n X_in = copy.deepcopy(copy_X_in)\r\n \r\n # Clip dynamic range\r\n X_in[X_in>S] = S\r\n print('S: ' + str(S))\r\n \r\n # Moving average window\r\n if wdw_samples != 0:\r\n for channel in np.arange(len(X_in[0,:])):\r\n X_in[:,channel] = moving_average(X_in[:,channel],wdw_samples)\r\n \r\n\r\n for lag_value in lag_values_vec:\r\n lag = int(-0.004 / delta_time * lag_value) # lag between kinematic and ´neural data (minus indicates neural input occurs before kinematic)\r\n \r\n for alpha in alpha_vec:\r\n for degree in degree_vec:\r\n params = {'timesteps':timesteps, 'regular': regular, 'alpha':alpha, 'degree':degree}\r\n \r\n # Storing evaluation results into pkl file\r\n result_filename = result_folder+file_name+\\\r\n '_delta_'+str(int(delta_time*1e3))+'ms_S_'+str(int(S))+\\\r\n '_wdw_' + str(int(wdw_time*1000)) + '_lag_'+str(lag_value)\\\r\n + '_timestep_'+str(timesteps) +\\\r\n '_alpha_' + str(alpha) + '_deg_' \\\r\n + str(degree) + '.pkl'\r\n \r\n if exists(result_filename):\r\n print('Results exist \\n')\r\n continue\r\n \r\n \r\n for i in range(num_fold): \r\n train_idx = all_train_idx[i]\r\n valid_idx = all_valid_idx[i]\r\n test_idx = all_test_idx[i]\r\n \r\n # specify training dataset\r\n X_train = X_in[train_idx,:] \r\n Y_train = Y_out[train_idx,:]\r\n \r\n # specify validation dataset\r\n X_valid = X_in[valid_idx,:]\r\n Y_valid = Y_out[valid_idx,:]\r\n \r\n # specify validation dataset\r\n X_test = X_in[test_idx,:]\r\n Y_test = Y_out[test_idx,:]\r\n \r\n # Standardise (z-score) input dataset\r\n X_train_mean = np.nanmean(X_train,axis=0)\r\n X_train_std = np.nanstd(X_train,axis=0) \r\n X_train = (X_train - X_train_mean)/X_train_std \r\n X_valid = (X_valid - X_train_mean)/X_train_std \r\n X_test = (X_test - X_train_mean)/X_train_std \r\n \r\n # Remove nan columns\r\n remove = np.isnan(X_train[0,:])\r\n X_train = np.delete(X_train,remove,1)\r\n X_valid = np.delete(X_valid,remove,1)\r\n X_test = np.delete(X_test,remove,1)\r\n \r\n # Zero mean (centering) output dataset\r\n Y_train_mean = np.nanmean(Y_train,axis=0) \r\n Y_train = Y_train - Y_train_mean \r\n Y_valid = Y_valid - Y_train_mean\r\n Y_test = Y_test - Y_train_mean\r\n \r\n \r\n #Re-align data to take lag into account\r\n if lag < 0:\r\n X_train = X_train[:lag,:] # remove lag first from end (X lag behind Y)\r\n Y_train = Y_train[-lag:,:] # reomve lag first from beginning\r\n X_valid = X_valid[:lag,:]\r\n Y_valid = Y_valid[-lag:,:]\r\n X_test = X_test[:lag,:]\r\n Y_test = Y_test[-lag:,:]\r\n if lag > 0:\r\n X_train = X_train[lag:,:] # remove lag first from beginning\r\n Y_train = Y_train[:-lag,:] # remove lag first from end (X lead in front of Y)\r\n X_valid = X_valid[lag:,:]\r\n Y_valid = Y_valid[:-lag,:] \r\n X_test = X_test[lag:,:]\r\n Y_test = Y_test[:-lag,:] \r\n \r\n print(\"Instantiating and training model...\") \r\n model = WienerCascadeDecoder() # instantiate model\r\n start = timer.time()\r\n \r\n model.fit(X_train,Y_train,**params) # train model\r\n end = timer.time()\r\n print(\"Model training took {:.2f} seconds\".format(end - start)) \r\n time_train[i] = end - start\r\n print(\"Evaluating model...\")\r\n Y_valid_predict = model.predict(X_valid)\r\n start = timer.time()\r\n Y_test_predict = model.predict(X_test)\r\n end = timer.time()\r\n print(\"Model testing took {:.2f} seconds\".format(end - start)) \r\n time_test[i] = end - start\r\n \r\n # Compute performance metrics \r\n rmse_vld = compute_rmse(Y_valid,Y_valid_predict)\r\n rmse_tst = compute_rmse(Y_test,Y_test_predict)\r\n cc_vld = compute_pearson(Y_valid,Y_valid_predict)\r\n cc_tst = compute_pearson(Y_test,Y_test_predict)\r\n rmse_valid[i,:] = rmse_vld\r\n rmse_test[i,:] = rmse_tst\r\n cc_valid[i,:] = cc_vld\r\n cc_test[i,:] = cc_tst\r\n \r\n print(\"Fold-{} | Validation RMSE: {:.2f}\".format(i,np.mean(rmse_vld)))\r\n print(\"Fold-{} | Validation CC: {:.2f}\".format(i,np.mean(cc_vld)))\r\n print(\"Fold-{} | Testing RMSE: {:.2f}\".format(i,np.mean(rmse_tst)))\r\n print(\"Fold-{} | Testing CC: {:.2f}\".format(i,np.mean(cc_tst)))\r\n \r\n run_end = timer.time()\r\n mean_rmse_valid = np.nanmean(rmse_valid,axis=0)\r\n mean_rmse_test = np.nanmean(rmse_test,axis=0)\r\n mean_cc_valid = np.nanmean(cc_valid,axis=0)\r\n mean_cc_test = np.nanmean(cc_test,axis=0)\r\n mean_time = np.nanmean(time_train,axis=0)\r\n print(\"----------------------------------------------------------------------\")\r\n print(\"Validation Mean RMSE: %.3f \" %(np.mean(mean_rmse_valid)))\r\n print(\"Validation Mean CC: %.3f \" %(np.mean(mean_cc_valid)))\r\n print(\"Testing Mean RMSE: %.3f \" %(np.mean(mean_rmse_test)))\r\n print(\"Testing Mean CC: %.3f \" %(np.mean(mean_cc_test)))\r\n print(\"----------------------------------------------------------------------\") \r\n print (\"Storing results into file: \"+result_filename)\r\n \r\n \r\n # Store results\r\n with open(result_filename, 'wb') as file:\r\n results = {'rmse_valid': rmse_valid,\r\n 'rmse_test': rmse_test,\r\n 'cc_valid': cc_valid,\r\n 'cc_test': cc_test} # Shows how much of the validation data is used for assignment vs CR\r\n #'Y_true': Y_test, # Shows how much of the validation data is used for assignment vs CR\r\n #'Y_predict': Y_test_predict}\r\n # A new file will be created\r\n pickle.dump(results, file)\r\n \r\n run_time = run_end - run_start\r\n print (\"Finished whole processes within %.2f seconds\" % run_time)\r\n print(\"All done\")\r\n"
] |
[
[
"numpy.isnan",
"numpy.arange",
"scipy.io.loadmat",
"numpy.cumsum",
"numpy.full",
"numpy.round",
"numpy.copy",
"numpy.delete",
"numpy.shape",
"numpy.nanmean",
"numpy.mean",
"numpy.nanstd",
"numpy.zeros"
]
] |
twild-fb/pytext
|
[
"07cadc0d130dac30d71d9da70380f124b3f5ac59"
] |
[
"pytext/utils/distributed.py"
] |
[
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport torch\nimport torch.distributed as dist_c10d\n\n\ndef dist_init(\n distributed_rank: int,\n world_size: int,\n init_method: str,\n device_id: int,\n backend: str = \"nccl\",\n):\n \"\"\"\n 1. After spawn process per GPU, we want all workers to call init_process_group\n around the same time or times out.\n 2. After dist_init, we want all workers to start calling all_reduce/barrier\n around the same time or NCCL timeouts.\n \"\"\"\n if init_method and world_size > 1 and torch.cuda.is_available():\n dist_c10d.init_process_group(\n backend=backend,\n init_method=init_method,\n world_size=world_size,\n rank=distributed_rank,\n )\n # calling all_reduce for synchronzing all workers\n dist_tensor = torch.tensor(\n [1], dtype=torch.float32, device=\"cuda:{}\".format(device_id)\n )\n dist_c10d.all_reduce(dist_tensor)\n\n if distributed_rank != 0:\n suppress_output()\n\n\ndef suppress_output():\n import builtins as __builtin__\n\n builtin_print = __builtin__.print\n\n def print(*args, **kwargs):\n # force print the result when kwargs contains force and value is True\n if kwargs.pop(\"force\", False):\n builtin_print(*args, **kwargs)\n\n __builtin__.print = print\n\n\ndef get_shard_range(dataset_size: int, rank: int, world_size: int):\n \"\"\"\n In case dataset_size is not evenly divided by world_size, we need to pad\n one extra example in each shard\n shard_len = dataset_size // world_size + 1\n\n Case 1 rank < remainder: each shard start position is rank * shard_len\n\n Case 2 rank >= remainder: without padding, each shard start position is\n rank * (shard_len - 1) + remainder = rank * shard_len - (rank - remainder)\n But to make sure all shard have same size, we need to pad one extra example\n when rank >= remainder, so start_position = start_position - 1\n\n For example, dataset_size = 21, world_size = 8\n rank 0 to 4: [0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11], [12, 13, 14]\n rank 5 to 7: [14, 15, 16], [16, 17, 18], [18, 19, 20]\n \"\"\"\n remainder = dataset_size % world_size\n shard_len = dataset_size // world_size\n\n if remainder == 0:\n shard_offset = rank * shard_len\n else:\n # take one extra when dataset_size is not evenly divided by world_size\n shard_len += 1\n shard_offset = rank * shard_len - max(0, rank + 1 - remainder)\n shard_end = shard_offset + shard_len - 1\n\n return (shard_offset, shard_end)\n"
] |
[
[
"torch.distributed.all_reduce",
"torch.distributed.init_process_group",
"torch.cuda.is_available"
]
] |
ml-jku/mc-lstm
|
[
"8bbaece3ecb4187a76c6318d4c6e40c1dcc71303"
] |
[
"modelzoo/hnn.py"
] |
[
"import torch\nfrom torch import nn\n\n\nclass HNN(nn.Module):\n '''Learn arbitrary vector fields that are sums of conservative and solenoidal fields'''\n\n def __init__(self, input_dim, differentiable_model, field_type='solenoidal',\n baseline=False, assume_canonical_coords=True):\n super(HNN, self).__init__()\n self.baseline = baseline\n self.differentiable_model = differentiable_model\n self.assume_canonical_coords = assume_canonical_coords\n self.M = self.permutation_tensor(input_dim) # Levi-Civita permutation tensor\n self.field_type = field_type\n\n def forward(self, x):\n # traditional forward pass\n if self.baseline:\n return self.differentiable_model(x)\n\n y = self.differentiable_model(x)\n assert y.dim() == 2 and y.shape[1] == 2, \"Output tensor should have shape [batch_size, 2]\"\n return y.split(1, 1)\n\n def time_derivative(self, x, t=None, separate_fields=False):\n '''NEURAL ODE-STLE VECTOR FIELD'''\n if self.baseline:\n return self.differentiable_model(x)\n\n '''NEURAL HAMILTONIAN-STLE VECTOR FIELD'''\n F1, F2 = self.forward(x) # traditional forward pass\n\n conservative_field = torch.zeros_like(x) # start out with both components set to 0\n solenoidal_field = torch.zeros_like(x)\n\n if self.field_type != 'solenoidal':\n dF1 = torch.autograd.grad(F1.sum(), x, create_graph=True)[0] # gradients for conservative field\n conservative_field = dF1 @ torch.eye(*self.M.shape)\n\n if self.field_type != 'conservative':\n dF2 = torch.autograd.grad(F2.sum(), x, create_graph=True)[0] # gradients for solenoidal field\n solenoidal_field = dF2 @ self.M.t()\n\n if separate_fields:\n return [conservative_field, solenoidal_field]\n\n return conservative_field + solenoidal_field\n\n def permutation_tensor(self, n):\n M = None\n if self.assume_canonical_coords:\n M = torch.eye(n)\n M = torch.cat([M[n // 2:], -M[:n // 2]])\n else:\n '''Constructs the Levi-Civita permutation tensor'''\n M = torch.ones(n, n) # matrix of ones\n M *= 1 - torch.eye(n) # clear diagonals\n M[::2] *= -1 # pattern of signs\n M[:, ::2] *= -1\n\n for i in range(n): # make asymmetric\n for j in range(i + 1, n):\n M[i, j] *= -1\n return M\n\n\nclass MLP(nn.Module):\n '''Just a salt-of-the-earth MLP'''\n\n def __init__(self, input_dim, hidden_dim, output_dim, nonlinearity='tanh'):\n super(MLP, self).__init__()\n self.linear1 = torch.nn.Linear(input_dim, hidden_dim)\n self.linear2 = torch.nn.Linear(hidden_dim, hidden_dim)\n self.linear3 = torch.nn.Linear(hidden_dim, output_dim, bias=None)\n\n for l in [self.linear1, self.linear2, self.linear3]:\n torch.nn.init.orthogonal_(l.weight) # use a principled initialization\n\n self.nonlinearity = choose_nonlinearity(nonlinearity)\n\n def forward(self, x, separate_fields=False):\n h = self.nonlinearity(self.linear1(x))\n h = self.nonlinearity(self.linear2(h))\n return self.linear3(h)\n\n\ndef choose_nonlinearity(name):\n nl = None\n if name == 'tanh':\n nl = torch.tanh\n elif name == 'relu':\n nl = torch.relu\n elif name == 'sigmoid':\n nl = torch.sigmoid\n elif name == 'softplus':\n nl = torch.nn.functional.softplus\n elif name == 'selu':\n nl = torch.nn.functional.selu\n elif name == 'elu':\n nl = torch.nn.functional.elu\n elif name == 'swish':\n nl = lambda x: x * torch.sigmoid(x)\n else:\n raise ValueError(\"nonlinearity not recognized\")\n return nl\n"
] |
[
[
"torch.sigmoid",
"torch.ones",
"torch.cat",
"torch.eye",
"torch.zeros_like",
"torch.nn.Linear",
"torch.nn.init.orthogonal_"
]
] |
ODU-Internship/BedSore
|
[
"c9927ce181eb48fc93a3d2adf2330cc0ec412182"
] |
[
"app.py"
] |
[
"import os\r\nimport plotly.express as px\r\nimport plotly.figure_factory as ff\r\nimport dash\r\nimport dash_core_components as dcc\r\nimport dash_html_components as html\r\nimport numpy as np\r\nimport time\r\n\r\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\r\n\r\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\r\n#pressure_data = np.random.randint(1000, size=(10, 10))\r\n\r\n#a = np.random.randint(1000, size=(5, 5))\r\n\r\nt = np.zeros((5, 5))\r\n\r\nserver = app.server\r\n\r\napp.layout = html.Div([\r\n html.H2('Bed Pressure Status'),\r\n html.Div([\r\n dcc.Graph(id='bed_pressure'),\r\n html.Div(id='sen_alerts'),\r\n dcc.Interval(id='graph-update', interval=10000, n_intervals=0)\r\n ], style={'width': '100%', 'display': 'inline-block', 'padding': '0 20'}),\r\n])\r\n\r\n\r\[email protected](\r\n [dash.dependencies.Output('bed_pressure', 'figure'),\r\n dash.dependencies.Output('sen_alerts', 'children')],\r\n [dash.dependencies.Input('graph-update', 'n_intervals')])\r\ndef update_graph(n):\r\n #pressure_data = np.random.randint(1000, size=(15, 30))\r\n a = np.random.randint(1000, size=(5, 5))\r\n alert_msg = detect(a, t)\r\n sens = html.Ul([html.Li(x) for x in alert_msg])\r\n #fig = px.imshow(a,color_continuous_scale='Hot_r')\r\n #colorscale = [[0, 'white'],[400,'red'] [1000, 'black']]\r\n font_colors = ['black', 'white']\r\n fig = ff.create_annotated_heatmap(\r\n a, colorscale='Hot_r', font_colors=font_colors, showscale=True)\r\n #fig = ff.create_annotated_heatmap(a,color_continuous_scale='Hot_r')\r\n # fig.update_layout(width=int(1000))\r\n\r\n #fig = px.imshow(pressure_data)\r\n return fig, sens\r\n\r\n\r\ndef detect(arr, t):\r\n sen_list = []\r\n for (x, y), element in np.ndenumerate(np.array(arr)):\r\n if(element > 400 and t[x][y] == 0.0):\r\n t[x][y] = time.time()\r\n elif(element > 400 and t[x][y] != 0):\r\n if(time.time() - t[x][y] > 9):\r\n sen_list.append(\"Alert of Sensor placed at \" +\r\n str(x) + \",\" + str(y))\r\n elif(element <= 400):\r\n t[x][y] = 0.0\r\n return sen_list\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n"
] |
[
[
"numpy.array",
"numpy.zeros",
"numpy.random.randint"
]
] |
Hongbo-Miao/Cirq
|
[
"d6c6f9b1ea282e79db4475e5327d0380e6558ba6"
] |
[
"cirq/ops/three_qubit_gates.py"
] |
[
"# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Common quantum gates that target three qubits.\"\"\"\n\nfrom typing import AbstractSet, Any, List, Optional, Tuple, TYPE_CHECKING\n\nimport numpy as np\nimport sympy\n\nfrom cirq import linalg, protocols, value\nfrom cirq._compat import proper_repr\nfrom cirq._doc import document\nfrom cirq.ops import (\n common_gates,\n controlled_gate,\n eigen_gate,\n gate_features,\n pauli_gates,\n swap_gates,\n)\n\nif TYPE_CHECKING:\n # pylint: disable=unused-import\n import cirq\n\n\nclass CCZPowGate(\n eigen_gate.EigenGate, gate_features.ThreeQubitGate, gate_features.InterchangeableQubitsGate\n):\n \"\"\"A doubly-controlled-Z that can be raised to a power.\n\n The matrix of `CCZ**t` is `diag(1, 1, 1, 1, 1, 1, 1, exp(i pi t))`.\n \"\"\"\n\n def _eigen_components(self):\n return [\n (0, np.diag([1, 1, 1, 1, 1, 1, 1, 0])),\n (1, np.diag([0, 0, 0, 0, 0, 0, 0, 1])),\n ]\n\n def _trace_distance_bound_(self) -> Optional[float]:\n if self._is_parameterized_():\n return None\n return abs(np.sin(self._exponent * 0.5 * np.pi))\n\n def _pauli_expansion_(self) -> value.LinearDict[str]:\n if protocols.is_parameterized(self):\n return NotImplemented\n global_phase = 1j ** (2 * self._exponent * self._global_shift)\n z_phase = 1j ** self._exponent\n c = -1j * z_phase * np.sin(np.pi * self._exponent / 2) / 4\n return value.LinearDict(\n {\n 'III': global_phase * (1 - c),\n 'IIZ': global_phase * c,\n 'IZI': global_phase * c,\n 'ZII': global_phase * c,\n 'ZZI': global_phase * -c,\n 'ZIZ': global_phase * -c,\n 'IZZ': global_phase * -c,\n 'ZZZ': global_phase * c,\n }\n )\n\n def _decompose_(self, qubits):\n \"\"\"An adjacency-respecting decomposition.\n\n 0: ───p───@──────────────@───────@──────────@──────────\n │ │ │ │\n 1: ───p───X───@───p^-1───X───@───X──────@───X──────@───\n │ │ │ │\n 2: ───p───────X───p──────────X───p^-1───X───p^-1───X───\n\n where p = T**self._exponent\n \"\"\"\n if protocols.is_parameterized(self):\n return NotImplemented\n\n a, b, c = qubits\n\n # Hacky magic: avoid the non-adjacent edge.\n if hasattr(b, 'is_adjacent'):\n if not b.is_adjacent(a):\n b, c = c, b\n elif not b.is_adjacent(c):\n a, b = b, a\n\n p = common_gates.T ** self._exponent\n sweep_abc = [common_gates.CNOT(a, b), common_gates.CNOT(b, c)]\n\n return [\n p(a),\n p(b),\n p(c),\n sweep_abc,\n p(b) ** -1,\n p(c),\n sweep_abc,\n p(c) ** -1,\n sweep_abc,\n p(c) ** -1,\n sweep_abc,\n ]\n\n def _apply_unitary_(self, args: 'protocols.ApplyUnitaryArgs') -> np.ndarray:\n if protocols.is_parameterized(self):\n return NotImplemented\n ooo = args.subspace_index(0b111)\n args.target_tensor[ooo] *= np.exp(1j * self.exponent * np.pi)\n p = 1j ** (2 * self._exponent * self._global_shift)\n if p != 1:\n args.target_tensor *= p\n return args.target_tensor\n\n def _circuit_diagram_info_(\n self, args: 'cirq.CircuitDiagramInfoArgs'\n ) -> 'cirq.CircuitDiagramInfo':\n return protocols.CircuitDiagramInfo(('@', '@', '@'), exponent=self._diagram_exponent(args))\n\n def _qasm_(self, args: 'cirq.QasmArgs', qubits: Tuple['cirq.Qid', ...]) -> Optional[str]:\n if self._exponent != 1:\n return None\n\n args.validate_version('2.0')\n lines = [\n args.format('h {0};\\n', qubits[2]),\n args.format('ccx {0},{1},{2};\\n', qubits[0], qubits[1], qubits[2]),\n args.format('h {0};\\n', qubits[2]),\n ]\n return ''.join(lines)\n\n def _quil_(\n self, qubits: Tuple['cirq.Qid', ...], formatter: 'cirq.QuilFormatter'\n ) -> Optional[str]:\n if self._exponent != 1:\n return None\n lines = [\n formatter.format('H {0}\\n', qubits[2]),\n formatter.format('CCNOT {0} {1} {2}\\n', qubits[0], qubits[1], qubits[2]),\n formatter.format('H {0}\\n', qubits[2]),\n ]\n return ''.join(lines)\n\n def __repr__(self) -> str:\n if self._global_shift == 0:\n if self._exponent == 1:\n return 'cirq.CCZ'\n return '(cirq.CCZ**{})'.format(proper_repr(self._exponent))\n return 'cirq.CCZPowGate(exponent={}, global_shift={!r})'.format(\n proper_repr(self._exponent), self._global_shift\n )\n\n def __str__(self) -> str:\n if self._exponent == 1:\n return 'CCZ'\n return 'CCZ**{}'.format(self._exponent)\n\n\[email protected]_equality()\nclass ThreeQubitDiagonalGate(gate_features.ThreeQubitGate):\n \"\"\"A gate given by a diagonal 8x8 matrix.\"\"\"\n\n def __init__(self, diag_angles_radians: List[value.TParamVal]) -> None:\n r\"\"\"A three qubit gate with only diagonal elements.\n\n This gate's off-diagonal elements are zero and it's on diagonal\n elements are all phases.\n\n Args:\n diag_angles_radians: The list of angles on the diagonal in radians.\n If these values are $(x_0, x_1, \\ldots , x_7)$ then the unitary\n has diagonal values $(e^{i x_0}, e^{i x_1}, \\ldots, e^{i x_7})$.\n \"\"\"\n self._diag_angles_radians: List[value.TParamVal] = diag_angles_radians\n\n def _is_parameterized_(self) -> bool:\n return any(protocols.is_parameterized(angle) for angle in self._diag_angles_radians)\n\n def _parameter_names_(self) -> AbstractSet[str]:\n return {\n name for angle in self._diag_angles_radians for name in protocols.parameter_names(angle)\n }\n\n def _resolve_parameters_(\n self, resolver: 'cirq.ParamResolverOrSimilarType'\n ) -> 'ThreeQubitDiagonalGate':\n return self.__class__(\n [protocols.resolve_parameters(angle, resolver) for angle in self._diag_angles_radians]\n )\n\n def _has_unitary_(self) -> bool:\n return not self._is_parameterized_()\n\n def _unitary_(self) -> np.ndarray:\n if self._is_parameterized_():\n return NotImplemented\n return np.diag([np.exp(1j * angle) for angle in self._diag_angles_radians])\n\n def _apply_unitary_(self, args: 'protocols.ApplyUnitaryArgs') -> np.ndarray:\n if self._is_parameterized_():\n return NotImplemented\n for index, angle in enumerate(self._diag_angles_radians):\n little_endian_index = 4 * (index & 1) + 2 * ((index >> 1) & 1) + ((index >> 2) & 1)\n subspace_index = args.subspace_index(little_endian_index)\n args.target_tensor[subspace_index] *= np.exp(1j * angle)\n return args.target_tensor\n\n def _circuit_diagram_info_(\n self, args: 'cirq.CircuitDiagramInfoArgs'\n ) -> 'cirq.CircuitDiagramInfo':\n rounded_angles = np.array(self._diag_angles_radians)\n if args.precision is not None:\n rounded_angles = rounded_angles.round(args.precision)\n diag_str = 'diag({})'.format(', '.join(proper_repr(angle) for angle in rounded_angles))\n return protocols.CircuitDiagramInfo((diag_str, '#2', '#3'))\n\n def __pow__(self, exponent: Any) -> 'ThreeQubitDiagonalGate':\n if not isinstance(exponent, (int, float, sympy.Basic)):\n return NotImplemented\n return ThreeQubitDiagonalGate(\n [protocols.mul(angle, exponent, NotImplemented) for angle in self._diag_angles_radians]\n )\n\n def _decompose_(self, qubits):\n \"\"\"An adjacency-respecting decomposition.\n\n 0: ───p_0───@──────────────@───────@──────────@──────────\n │ │ │ │\n 1: ───p_1───X───@───p_3────X───@───X──────@───X──────@───\n │ │ │ │\n 2: ───p_2───────X───p_4────────X───p_5────X───p_6────X───\n\n where p_i = T**(4*x_i) and x_i solve the system of equations\n [0, 0, 1, 0, 1, 1, 1][x_0] [r_1]\n [0, 1, 0, 1, 1, 0, 1][x_1] [r_2]\n [0, 1, 1, 1, 0, 1, 0][x_2] [r_3]\n [1, 0, 0, 1, 1, 1, 0][x_3] = [r_4]\n [1, 0, 1, 1, 0, 0, 1][x_4] [r_5]\n [1, 1, 0, 0, 0, 1, 1][x_5] [r_6]\n [1, 1, 1, 0, 1, 0, 0][x_6] [r_7]\n where r_i is self._diag_angles_radians[i].\n\n The above system was created by equating the composition of the gates\n in the circuit diagram to np.diag(self._diag_angles) (shifted by a\n global phase of np.exp(-1j * self._diag_angles[0])).\n \"\"\"\n\n a, b, c = qubits\n if hasattr(b, 'is_adjacent'):\n if not b.is_adjacent(a):\n b, c = c, b\n elif not b.is_adjacent(c):\n a, b = b, a\n sweep_abc = [common_gates.CNOT(a, b), common_gates.CNOT(b, c)]\n phase_matrix_inverse = 0.25 * np.array(\n [\n [-1, -1, -1, 1, 1, 1, 1],\n [-1, 1, 1, -1, -1, 1, 1],\n [1, -1, 1, -1, 1, -1, 1],\n [-1, 1, 1, 1, 1, -1, -1],\n [1, 1, -1, 1, -1, -1, 1],\n [1, -1, 1, 1, -1, 1, -1],\n [1, 1, -1, -1, 1, 1, -1],\n ]\n )\n shifted_angles_tail = [\n angle - self._diag_angles_radians[0] for angle in self._diag_angles_radians[1:]\n ]\n phase_solutions = phase_matrix_inverse.dot(shifted_angles_tail)\n p_gates = [pauli_gates.Z ** (solution / np.pi) for solution in phase_solutions]\n\n return [\n p_gates[0](a),\n p_gates[1](b),\n p_gates[2](c),\n sweep_abc,\n p_gates[3](b),\n p_gates[4](c),\n sweep_abc,\n p_gates[5](c),\n sweep_abc,\n p_gates[6](c),\n sweep_abc,\n ]\n\n def _value_equality_values_(self):\n return tuple(self._diag_angles_radians)\n\n def _pauli_expansion_(self) -> value.LinearDict[str]:\n if protocols.is_parameterized(self):\n return NotImplemented\n x = [np.exp(1j * angle) for angle in self._diag_angles_radians]\n return value.LinearDict(\n {\n 'III': (x[0] + x[1] + x[2] + x[3] + x[4] + x[5] + x[6] + x[7]) / 8,\n 'IIZ': (x[0] - x[1] + x[2] - x[3] + x[4] - x[5] + x[6] - x[7]) / 8,\n 'IZI': (x[0] + x[1] - x[2] - x[3] + x[4] + x[5] - x[6] - x[7]) / 8,\n 'IZZ': (x[0] - x[1] - x[2] + x[3] + x[4] - x[5] - x[6] + x[7]) / 8,\n 'ZII': (x[0] + x[1] + x[2] + x[3] - x[4] - x[5] - x[6] - x[7]) / 8,\n 'ZIZ': (x[0] - x[1] + x[2] - x[3] - x[4] + x[5] - x[6] + x[7]) / 8,\n 'ZZI': (x[0] + x[1] - x[2] - x[3] - x[4] - x[5] + x[6] + x[7]) / 8,\n 'ZZZ': (x[0] - x[1] - x[2] + x[3] - x[4] + x[5] + x[6] - x[7]) / 8,\n }\n )\n\n def __repr__(self) -> str:\n return 'cirq.ThreeQubitDiagonalGate([{}])'.format(\n ','.join(proper_repr(angle) for angle in self._diag_angles_radians)\n )\n\n\nclass CCXPowGate(\n eigen_gate.EigenGate, gate_features.ThreeQubitGate, gate_features.InterchangeableQubitsGate\n):\n \"\"\"A Toffoli (doubly-controlled-NOT) that can be raised to a power.\n\n The matrix of `CCX**t` is an 8x8 identity except the bottom right 2x2 area\n is the matrix of `X**t`.\n \"\"\"\n\n def _eigen_components(self):\n return [\n (0, linalg.block_diag(np.diag([1, 1, 1, 1, 1, 1]), np.array([[0.5, 0.5], [0.5, 0.5]]))),\n (\n 1,\n linalg.block_diag(\n np.diag([0, 0, 0, 0, 0, 0]), np.array([[0.5, -0.5], [-0.5, 0.5]])\n ),\n ),\n ]\n\n def _trace_distance_bound_(self) -> Optional[float]:\n if self._is_parameterized_():\n return None\n return abs(np.sin(self._exponent * 0.5 * np.pi))\n\n def _pauli_expansion_(self) -> value.LinearDict[str]:\n if protocols.is_parameterized(self):\n return NotImplemented\n global_phase = 1j ** (2 * self._exponent * self._global_shift)\n z_phase = 1j ** self._exponent\n c = -1j * z_phase * np.sin(np.pi * self._exponent / 2) / 4\n return value.LinearDict(\n {\n 'III': global_phase * (1 - c),\n 'IIX': global_phase * c,\n 'IZI': global_phase * c,\n 'ZII': global_phase * c,\n 'ZZI': global_phase * -c,\n 'ZIX': global_phase * -c,\n 'IZX': global_phase * -c,\n 'ZZX': global_phase * c,\n }\n )\n\n def qubit_index_to_equivalence_group_key(self, index):\n return index < 2\n\n def _apply_unitary_(self, args: 'protocols.ApplyUnitaryArgs') -> np.ndarray:\n if protocols.is_parameterized(self):\n return NotImplemented\n p = 1j ** (2 * self._exponent * self._global_shift)\n if p != 1:\n args.target_tensor *= p\n return protocols.apply_unitary(\n controlled_gate.ControlledGate(\n controlled_gate.ControlledGate(pauli_gates.X ** self.exponent)\n ),\n protocols.ApplyUnitaryArgs(args.target_tensor, args.available_buffer, args.axes),\n default=NotImplemented,\n )\n\n def _decompose_(self, qubits):\n c1, c2, t = qubits\n yield common_gates.H(t)\n yield CCZ(c1, c2, t) ** self._exponent\n yield common_gates.H(t)\n\n def _circuit_diagram_info_(\n self, args: 'cirq.CircuitDiagramInfoArgs'\n ) -> 'cirq.CircuitDiagramInfo':\n return protocols.CircuitDiagramInfo(('@', '@', 'X'), exponent=self._diagram_exponent(args))\n\n def _qasm_(self, args: 'cirq.QasmArgs', qubits: Tuple['cirq.Qid', ...]) -> Optional[str]:\n if self._exponent != 1:\n return None\n\n args.validate_version('2.0')\n return args.format('ccx {0},{1},{2};\\n', qubits[0], qubits[1], qubits[2])\n\n def _quil_(\n self, qubits: Tuple['cirq.Qid', ...], formatter: 'cirq.QuilFormatter'\n ) -> Optional[str]:\n if self._exponent != 1:\n return None\n return formatter.format('CCNOT {0} {1} {2}\\n', qubits[0], qubits[1], qubits[2])\n\n def __repr__(self) -> str:\n if self._global_shift == 0:\n if self._exponent == 1:\n return 'cirq.TOFFOLI'\n return '(cirq.TOFFOLI**{})'.format(proper_repr(self._exponent))\n return 'cirq.CCXPowGate(exponent={}, global_shift={!r})'.format(\n proper_repr(self._exponent), self._global_shift\n )\n\n def __str__(self) -> str:\n if self._exponent == 1:\n return 'TOFFOLI'\n return 'TOFFOLI**{}'.format(self._exponent)\n\n\[email protected]_equality()\nclass CSwapGate(gate_features.ThreeQubitGate, gate_features.InterchangeableQubitsGate):\n \"\"\"A controlled swap gate. The Fredkin gate.\"\"\"\n\n def qubit_index_to_equivalence_group_key(self, index):\n return 0 if index == 0 else 1\n\n def _pauli_expansion_(self) -> value.LinearDict[str]:\n return value.LinearDict(\n {\n 'III': 3 / 4,\n 'IXX': 1 / 4,\n 'IYY': 1 / 4,\n 'IZZ': 1 / 4,\n 'ZII': 1 / 4,\n 'ZXX': -1 / 4,\n 'ZYY': -1 / 4,\n 'ZZZ': -1 / 4,\n }\n )\n\n def _trace_distance_bound_(self) -> float:\n return 1.0\n\n def _decompose_(self, qubits):\n c, t1, t2 = qubits\n\n # Hacky magic: special case based on adjacency.\n if hasattr(t1, 'is_adjacent'):\n if not t1.is_adjacent(t2):\n # Targets separated by control.\n return self._decompose_inside_control(t1, c, t2)\n if not t1.is_adjacent(c):\n # Control separated from t1 by t2.\n return self._decompose_outside_control(c, t2, t1)\n\n return self._decompose_outside_control(c, t1, t2)\n\n def _decompose_inside_control(\n self, target1: 'cirq.Qid', control: 'cirq.Qid', target2: 'cirq.Qid'\n ) -> 'cirq.OP_TREE':\n \"\"\"A decomposition assuming the control separates the targets.\n\n target1: ─@─X───────T──────@────────@─────────X───@─────X^-0.5─\n │ │ │ │ │ │\n control: ─X─@─X─────@─T^-1─X─@─T────X─@─X^0.5─@─@─X─@──────────\n │ │ │ │ │ │\n target2: ─────@─H─T─X─T──────X─T^-1───X─T^-1────X───X─H─S^-1───\n \"\"\"\n a, b, c = target1, control, target2\n yield common_gates.CNOT(a, b)\n yield common_gates.CNOT(b, a)\n yield common_gates.CNOT(c, b)\n yield common_gates.H(c)\n yield common_gates.T(c)\n yield common_gates.CNOT(b, c)\n yield common_gates.T(a)\n yield common_gates.T(b) ** -1\n yield common_gates.T(c)\n yield common_gates.CNOT(a, b)\n yield common_gates.CNOT(b, c)\n yield common_gates.T(b)\n yield common_gates.T(c) ** -1\n yield common_gates.CNOT(a, b)\n yield common_gates.CNOT(b, c)\n yield pauli_gates.X(b) ** 0.5\n yield common_gates.T(c) ** -1\n yield common_gates.CNOT(b, a)\n yield common_gates.CNOT(b, c)\n yield common_gates.CNOT(a, b)\n yield common_gates.CNOT(b, c)\n yield common_gates.H(c)\n yield common_gates.S(c) ** -1\n yield pauli_gates.X(a) ** -0.5\n\n def _apply_unitary_(self, args: 'protocols.ApplyUnitaryArgs') -> np.ndarray:\n return protocols.apply_unitary(\n controlled_gate.ControlledGate(swap_gates.SWAP),\n protocols.ApplyUnitaryArgs(args.target_tensor, args.available_buffer, args.axes),\n default=NotImplemented,\n )\n\n def _decompose_outside_control(\n self, control: 'cirq.Qid', near_target: 'cirq.Qid', far_target: 'cirq.Qid'\n ) -> 'cirq.OP_TREE':\n \"\"\"A decomposition assuming one of the targets is in the middle.\n\n control: ───T──────@────────@───@────────────@────────────────\n │ │ │ │\n near: ─X─T──────X─@─T^-1─X─@─X────@─X^0.5─X─@─X^0.5────────\n │ │ │ │ │\n far: ─@─Y^-0.5─T─X─T──────X─T^-1─X─T^-1────X─S─────X^-0.5─\n \"\"\"\n a, b, c = control, near_target, far_target\n\n t = common_gates.T\n sweep_abc = [common_gates.CNOT(a, b), common_gates.CNOT(b, c)]\n\n yield common_gates.CNOT(c, b)\n yield pauli_gates.Y(c) ** -0.5\n yield t(a), t(b), t(c)\n yield sweep_abc\n yield t(b) ** -1, t(c)\n yield sweep_abc\n yield t(c) ** -1\n yield sweep_abc\n yield t(c) ** -1\n yield pauli_gates.X(b) ** 0.5\n yield sweep_abc\n yield common_gates.S(c)\n yield pauli_gates.X(b) ** 0.5\n yield pauli_gates.X(c) ** -0.5\n\n def _has_unitary_(self) -> bool:\n return True\n\n def _unitary_(self) -> np.ndarray:\n return linalg.block_diag(np.diag([1, 1, 1, 1, 1]), np.array([[0, 1], [1, 0]]), np.diag([1]))\n\n def _circuit_diagram_info_(\n self, args: 'cirq.CircuitDiagramInfoArgs'\n ) -> 'cirq.CircuitDiagramInfo':\n if not args.use_unicode_characters:\n return protocols.CircuitDiagramInfo(('@', 'swap', 'swap'))\n return protocols.CircuitDiagramInfo(('@', '×', '×'))\n\n def _qasm_(self, args: 'cirq.QasmArgs', qubits: Tuple['cirq.Qid', ...]) -> Optional[str]:\n args.validate_version('2.0')\n return args.format('cswap {0},{1},{2};\\n', qubits[0], qubits[1], qubits[2])\n\n def _quil_(\n self, qubits: Tuple['cirq.Qid', ...], formatter: 'cirq.QuilFormatter'\n ) -> Optional[str]:\n return formatter.format('CSWAP {0} {1} {2}\\n', qubits[0], qubits[1], qubits[2])\n\n def _value_equality_values_(self):\n return ()\n\n def __str__(self) -> str:\n return 'FREDKIN'\n\n def __repr__(self) -> str:\n return 'cirq.FREDKIN'\n\n\nCCZ = CCZPowGate()\ndocument(\n CCZ,\n \"\"\"The Controlled-Controlled-Z gate.\n\n The `exponent=1` instance of `cirq.CCZPowGate`.\n\n Matrix:\n\n ```\n [[1 . . . . . . .],\n [. 1 . . . . . .],\n [. . 1 . . . . .],\n [. . . 1 . . . .],\n [. . . . 1 . . .],\n [. . . . . 1 . .],\n [. . . . . . 1 .],\n [. . . . . . . -1]]\n ```\n \"\"\",\n)\n\nCCNotPowGate = CCXPowGate\nCCX = TOFFOLI = CCNOT = CCXPowGate()\ndocument(\n CCX,\n \"\"\"The TOFFOLI gate.\n\n The `exponent=1` instance of `cirq.CCXPowGate`.\n\n Matrix:\n ```\n [[1 . . . . . . .],\n [. 1 . . . . . .],\n [. . 1 . . . . .],\n [. . . 1 . . . .],\n [. . . . 1 . . .],\n [. . . . . 1 . .],\n [. . . . . . . 1],\n [. . . . . . 1 .]]\n ```\n \"\"\",\n)\n\nCSWAP = FREDKIN = CSwapGate()\ndocument(\n CSWAP,\n \"\"\"The Controlled Swap gate.\n\n An instance of `cirq.CSwapGate`.\n\n Matrix:\n ```\n [[1 . . . . . . .],\n [. 1 . . . . . .],\n [. . 1 . . . . .],\n [. . . 1 . . . .],\n [. . . . 1 . . .],\n [. . . . . . 1 .],\n [. . . . . 1 . .],\n [. . . . . . . 1]]\n ```\n \"\"\",\n)\n"
] |
[
[
"numpy.diag",
"numpy.array",
"numpy.exp",
"numpy.sin"
]
] |
sourcepirate/ML-From-Scratch
|
[
"c6839bf47c360d6fa48861302fd90ccd4a8c38db"
] |
[
"mlfromscratch/supervised_learning/regression.py"
] |
[
"from __future__ import print_function, division\nimport numpy as np\nimport math\nfrom mlfromscratch.utils import normalize, polynomial_features\n\n\nclass Regression(object):\n \"\"\" Base regression model. Models the relationship between a scalar dependent variable y and the independent \n variables X. \n Parameters:\n -----------\n reg_factor: float\n The factor that will determine the amount of regularization and feature\n shrinkage. \n n_iterations: float\n The number of training iterations the algorithm will tune the weights for.\n learning_rate: float\n The step length that will be used when updating the weights.\n gradient_descent: boolean\n True or false depending if gradient descent should be used when training. If \n false then we use batch optimization by least squares.\n \"\"\"\n def __init__(self, reg_factor, n_iterations, learning_rate, gradient_descent):\n self.w = None\n self.n_iterations = n_iterations\n self.learning_rate = learning_rate\n self.gradient_descent = gradient_descent\n self.reg_factor = reg_factor\n\n def initialize_weights(self, n_features):\n \"\"\" Initialize weights randomly [-1/N, 1/N] \"\"\"\n limit = 1 / math.sqrt(n_features)\n self.w = np.random.uniform(-limit, limit, (n_features, ))\n\n def regularization(self):\n # No regularization by default\n return 0\n\n def regularization_gradient(self):\n # No regularization by default\n return 0\n\n def fit(self, X, y):\n # Insert constant ones as first column (for bias weights)\n X = np.insert(X, 0, 1, axis=1)\n n_features = np.shape(X)[1]\n # Get weights by gradient descent opt.\n if self.gradient_descent:\n self.training_errors = []\n self.initialize_weights(n_features)\n # Do gradient descent for n_iterations\n for _ in range(self.n_iterations):\n y_pred = X.dot(self.w)\n # Calculate mean squared error\n mse = np.mean(0.5 * (y - y_pred)**2 + self.regularization())\n self.training_errors.append(mse)\n # Gradient of l2 loss w.r.t w\n grad_w = - (y - y_pred).dot(X) + self.regularization_gradient()\n # Update the weights\n self.w -= self.learning_rate * grad_w\n # Get weights by least squares (using Moore-Penrose pseudoinverse)\n else:\n U, S, V = np.linalg.svd(X.T.dot(X) + self.reg_factor * np.identity(n_features))\n S = np.diag(S)\n X_sq_reg_inv = V.dot(np.linalg.pinv(S)).dot(U.T)\n self.w = X_sq_reg_inv.dot(X.T).dot(y)\n\n def predict(self, X):\n # Insert constant ones for bias weights\n X = np.insert(X, 0, 1, axis=1)\n y_pred = X.dot(self.w)\n return y_pred\n\nclass LinearRegression(Regression):\n \"\"\"Linear model.\n Parameters:\n -----------\n n_iterations: float\n The number of training iterations the algorithm will tune the weights for.\n learning_rate: float\n The step length that will be used when updating the weights.\n gradient_descent: boolean\n True or false depending if gradient descent should be used when training. If \n false then we use batch optimization by least squares.\n \"\"\"\n def __init__(self, n_iterations=100, learning_rate=0.001, gradient_descent=True):\n super(LinearRegression, self).__init__(reg_factor=0, n_iterations=n_iterations, \\\n learning_rate=learning_rate, gradient_descent=gradient_descent)\n\nclass PolynomialRegression(Regression):\n \"\"\"Performs a non-linear transformation of the data before fitting the model\n and doing predictions which allows for doing non-linear regression.\n Parameters:\n -----------\n degree: int\n The power of the polynomial that the independent variable X will be transformed to.\n n_iterations: float\n The number of training iterations the algorithm will tune the weights for.\n learning_rate: float\n The step length that will be used when updating the weights.\n gradient_descent: boolean\n True or false depending if gradient descent should be used when training. If \n false then we use batch optimization by least squares.\n \"\"\"\n def __init__(self, degree, n_iterations=3000, learning_rate=0.001, gradient_descent=True):\n self.degree = degree\n super(PolynomialRegression, self).__init__(reg_factor=0, n_iterations=n_iterations, \\\n learning_rate=learning_rate, gradient_descent=gradient_descent)\n\n def fit(self, X, y):\n X_transformed = polynomial_features(X, degree=self.degree)\n super(PolynomialRegression, self).fit(X_transformed, y)\n\n def predict(self, X):\n X_transformed = polynomial_features(X, degree=self.degree)\n return super(PolynomialRegression, self).predict(X_transformed)\n\nclass RidgeRegression(Regression):\n \"\"\"Also referred to as Tikhonov regularization. Linear regression model with a regularization factor.\n Model that tries to balance the fit of the model with respect to the training data and the complexity\n of the model. A large regularization factor with decreases the variance of the model.\n Parameters:\n -----------\n reg_factor: float\n The factor that will determine the amount of regularization and feature\n shrinkage. \n n_iterations: float\n The number of training iterations the algorithm will tune the weights for.\n learning_rate: float\n The step length that will be used when updating the weights.\n gradient_descent: boolean\n True or false depending if gradient descent should be used when training. If \n false then we use batch optimization by least squares.\n \"\"\"\n def __init__(self, reg_factor, n_iterations=1000, learning_rate=0.001, gradient_descent=True):\n super(RidgeRegression, self).__init__(reg_factor, n_iterations, learning_rate, gradient_descent)\n\n def regularization(self):\n return self.reg_factor * self.w.T.dot(self.w)\n\n def regularization_gradient(self):\n return self.reg_factor * self.w\n\nclass LassoRegression(Regression):\n \"\"\"Linear regression model with a regularization factor which does both variable selection \n and regularization. Model that tries to balance the fit of the model with respect to the training \n data and the complexity of the model. A large regularization factor with decreases the variance of \n the model and do para.\n Parameters:\n -----------\n degree: int\n The power of the polynomial that the independent variable X will be transformed to.\n reg_factor: float\n The factor that will determine the amount of regularization and feature\n shrinkage. \n n_iterations: float\n The number of training iterations the algorithm will tune the weights for.\n learning_rate: float\n The step length that will be used when updating the weights.\n gradient_descent: boolean\n True or false depending if gradient descent should be used when training. If \n false then we use batch optimization by least squares.\n \"\"\"\n def __init__(self, degree, reg_factor, n_iterations=3000, learning_rate=0.01, gradient_descent=True):\n self.degree = degree\n super(LassoRegression, self).__init__(reg_factor, n_iterations, learning_rate, gradient_descent)\n\n def fit(self, X, y):\n X_transformed = normalize(polynomial_features(X, degree=self.degree))\n super(LassoRegression, self).fit(X_transformed, y)\n\n def predict(self, X):\n X_transformed = normalize(polynomial_features(X, degree=self.degree))\n return super(LassoRegression, self).predict(X_transformed)\n\n def regularization(self):\n return self.reg_factor * len(self.w)\n\n def regularization_gradient(self):\n return self.reg_factor * np.sign(self.w)\n\nclass PolynomialRidgeRegression(Regression):\n \"\"\"Similar to regular ridge regression except that the data is transformed to allow\n for polynomial regression.\n Parameters:\n -----------\n degree: int\n The power of the polynomial that the independent variable X will be transformed to.\n reg_factor: float\n The factor that will determine the amount of regularization and feature\n shrinkage. \n n_iterations: float\n The number of training iterations the algorithm will tune the weights for.\n learning_rate: float\n The step length that will be used when updating the weights.\n gradient_descent: boolean\n True or false depending if gradient descent should be used when training. If \n false then we use batch optimization by least squares.\n \"\"\"\n def __init__(self, degree, reg_factor, n_iterations=3000, learning_rate=0.01, gradient_descent=True):\n self.degree = degree\n super(PolynomialRidgeRegression, self).__init__(reg_factor, n_iterations, learning_rate, gradient_descent)\n\n def fit(self, X, y):\n X_transformed = normalize(polynomial_features(X, degree=self.degree))\n super(PolynomialRidgeRegression, self).fit(X_transformed, y)\n\n def predict(self, X):\n X_transformed = normalize(polynomial_features(X, degree=self.degree))\n return super(PolynomialRidgeRegression, self).predict(X_transformed)\n\n def regularization(self):\n return self.reg_factor * self.w.T.dot(self.w)\n\n def regularization_gradient(self):\n return self.reg_factor * self.w\n\n"
] |
[
[
"numpy.diag",
"numpy.sign",
"numpy.linalg.pinv",
"numpy.shape",
"numpy.insert",
"numpy.identity",
"numpy.random.uniform"
]
] |
jornix/Stavanger-school-learning-results
|
[
"9974cac4ebb91ea51b0437f8b7750feac3049804"
] |
[
"script.py"
] |
[
"import pandas as pd\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\n\n# Read csv and store in dataframe df\ndf = pd.read_csv(\"results.csv\")\ndf.drop([\"index\"], axis=1).reset_index(drop=True)\n\n\n# Separate fifth grade tests\nfemte_trinn = df[\n (df[\"statistikk\"] == \"Nasjonale prøver 5. trinn\") & (pd.isna(df[\"verdi\"]) == False)\n].reset_index(drop=True)\n\n# Separate the different tests for fifth grade (engelsk, lesing, regning)\nfemte_trinn_engelsk = femte_trinn[\n (femte_trinn[\"indikator_delskar\"] == \"Engelsk\")\n & (femte_trinn[\"kjonn\"] == \"Begge kjønn\")\n].reset_index(drop=True)\nfemte_trinn_lesing = femte_trinn[\n (femte_trinn[\"indikator_delskar\"] == \"Lesing\")\n & (femte_trinn[\"kjonn\"] == \"Begge kjønn\")\n].reset_index(drop=True)\nfemte_trinn_regning = femte_trinn[\n (femte_trinn[\"indikator_delskar\"] == \"Regning\")\n & (femte_trinn[\"kjonn\"] == \"Begge kjønn\")\n].reset_index(drop=True)\n\n# Set some seaborn estethic variables\nsns.set_theme(style=\"ticks\", color_codes=True)\nsns.set_style(\"darkgrid\")\nsns.set_context(\"paper\")\nsns.set_palette(\"PiYG\")\n\n# calculate and print boxplots to files\nfig, axes = plt.subplots(figsize=(10, 15))\nfig.suptitle(\"Nasjonale prøver 5. trinn, spredning i resultater\")\nsns.boxplot(data=femte_trinn_engelsk, x=\"verdi\", y=\"enhetsnavn\", palette=\"RdYlBu\")\nsns.stripplot(\n data=femte_trinn_engelsk,\n x=\"verdi\",\n y=\"enhetsnavn\",\n palette=\"PRGn\",\n hue=\"periode\",\n)\naxes.set_title(\"Engelsk\")\nplt.savefig(\"plots/boxplot_femte_trinn_engelsk.png\")\n\nfig, axes = plt.subplots(figsize=(10, 15))\nfig.suptitle(\"Nasjonale prøver 5. trinn, spredning i resultater\")\nsns.boxplot(data=femte_trinn_lesing, x=\"verdi\", y=\"enhetsnavn\", palette=\"RdYlBu\")\nsns.stripplot(\n data=femte_trinn_lesing,\n x=\"verdi\",\n y=\"enhetsnavn\",\n palette=\"PRGn\",\n hue=\"periode\",\n)\naxes.set_title(\"Lesing\")\nplt.savefig(\"plots/boxplot_femte_trinn_lesing.png\")\n\nfig, axes = plt.subplots(figsize=(10, 15))\nfig.suptitle(\"Nasjonale prøver 5. trinn, spredning i resultater\")\nsns.boxplot(data=femte_trinn_regning, x=\"verdi\", y=\"enhetsnavn\", palette=\"RdYlBu\")\nsns.stripplot(\n data=femte_trinn_regning,\n x=\"verdi\",\n y=\"enhetsnavn\",\n palette=\"PRGn\",\n hue=\"periode\",\n)\naxes.set_title(\"Regning\")\nplt.savefig(\"plots/boxplot_femte_trinn_regning.png\")\n# sns.despine(offset=10, trim=True)\n\n\nplt.show()\n"
] |
[
[
"pandas.read_csv",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"pandas.isna",
"matplotlib.pyplot.show"
]
] |
misads/cv_template
|
[
"9976ee0ada449a494d26f896c598610f233edc10"
] |
[
"dataloader/dataloaders.py"
] |
[
"# encoding=utf-8\nfrom dataloader.image_list import ListTrainValDataset, ListTestDataset\nfrom dataloader.transforms import get_transform\nfrom torch.utils.data import DataLoader\nfrom options import opt\nimport pdb\nimport os\n###################\n\nTEST_DATASET_HAS_OPEN = False # 有没有开放测试集\n\n###################\n\ntrain_list = os.path.join('datasets', opt.dataset, 'train.txt')\nval_list = os.path.join('datasets', opt.dataset, 'val.txt')\n\nmax_size = 128 if opt.debug else None\n\n# transforms\ntransform = get_transform(opt.transform)\ntrain_transform = transform.train_transform\nval_transform = transform.val_transform\n\n# datasets和dataloaders\ntrain_dataset = ListTrainValDataset(train_list, transforms=train_transform, max_size=max_size)\ntrain_dataloader = DataLoader(train_dataset, batch_size=opt.batch_size, shuffle=True, num_workers=opt.workers, drop_last=True)\n\nval_dataset = ListTrainValDataset(val_list, transforms=val_transform, max_size=max_size)\nval_dataloader = DataLoader(val_dataset, batch_size=1, shuffle=False, num_workers=opt.workers//2)\n\nif TEST_DATASET_HAS_OPEN:\n test_list = os.path.join('datasets', opt.dataset, 'test.txt') # 还没有\n\n test_dataset = ListTestDataset(test_list, scale=opt.scale, max_size=max_size, norm=opt.norm_input)\n test_dataloader = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=1)\n\nelse:\n test_dataloader = None\n"
] |
[
[
"torch.utils.data.DataLoader"
]
] |
wfbradley/snpko
|
[
"abc77349d702915519518eacdf919f06579413d0"
] |
[
"make_knockoffs.py"
] |
[
"#!/usr/bin/env python\n\n\nimport pandas as pd\nimport os\nimport numpy as np\nimport SNPknock.fastphase as fp\nfrom SNPknock import knockoffHMM\nfrom joblib import Parallel, delayed\nimport utils_snpko as utils\n\nlogger = utils.logger\n\n\ndef make_knockoff(chromosome=None, grouped_by_chromosome=None, df_SNP=None,\n df_geno_experiment=None, df_geno_ensembl=None,\n SNP_to_wild_type=None, cache_dir=None, path_to_fp=None,\n em_iterations=25, random_seed=123):\n # assert chromosome!=None and grouped_by_chromosome!=None and df_SNP!=None\n assert chromosome is not None\n assert grouped_by_chromosome is not None\n assert df_SNP is not None\n\n logger.debug(\"################\")\n logger.debug(\"Chromosome %2d #\" % chromosome)\n logger.debug(\"################\")\n\n num_experiment_people = len(df_geno_experiment)\n num_ensembl_people = len(df_geno_ensembl)\n\n indices = grouped_by_chromosome.groups[chromosome]\n df_SNP_chromo = df_SNP.iloc[indices].sort_values('chromosome_position')\n SNPs_on_chromosome = df_SNP_chromo['SNP'].values\n\n X_experiment = np.empty((num_experiment_people, len(SNPs_on_chromosome)))\n X_ensembl = np.empty((num_ensembl_people, len(SNPs_on_chromosome)))\n for X, df in [\n (X_experiment, df_geno_experiment),\n (X_ensembl, df_geno_ensembl)]:\n\n for j, SNP in enumerate(SNPs_on_chromosome):\n X[:, j] = utils.genotype_to_nonwild_type_count(\n df[SNP].values, SNP_to_wild_type[SNP])\n\n out_path = '%s/chrom_%d' % (cache_dir, chromosome)\n\n # If all relevant files are found in cache, skip EM recomputation; otherwise,\n # redo the whole thing.\n target_file_suffix_list = [\n 'alphahat.txt', 'finallikelihoods', 'origchars', 'rhat.txt', 'thetahat.txt']\n already_in_cache = True\n for suffix in target_file_suffix_list:\n target_path = os.path.join(\n cache_dir, 'chrom_%d_%s' % (chromosome, suffix))\n if not os.path.exists(target_path):\n already_in_cache = False\n break\n if already_in_cache:\n logger.debug(\"Found chrom %d HMM in cache\" % chromosome)\n else:\n # Write array to file\n Xfp_file = '%s/X_%d.inp' % (cache_dir, chromosome)\n fp.writeX(X_ensembl, Xfp_file)\n\n # Run fastPhase on data (which runs EM)\n fp.runFastPhase(path_to_fp, Xfp_file, out_path,\n K=12, numit=em_iterations)\n\n # Read in fastPhase results (i.e., HMM parameters) from file:\n r_file = out_path + \"_rhat.txt\"\n alpha_file = out_path + \"_alphahat.txt\"\n theta_file = out_path + \"_thetahat.txt\"\n # Why is X_ensembl[0, :] in the function arguments below?\n hmm = fp.loadFit(r_file, theta_file, alpha_file, X_ensembl[0, :])\n\n # Actually produce the knockoffs\n knockoffs = knockoffHMM(hmm[\"pInit\"], hmm[\"Q\"], hmm[\n \"pEmit\"], seed=random_seed)\n X_knockoffs = knockoffs.sample(X_experiment)\n\n return(X_knockoffs, X_experiment, SNPs_on_chromosome)\n\n\ndef make_all_knockoffs(args):\n '''\n For each chromosome, independently:\n Sort SNPs according to position on genome.\n Train HMM parameters with EM on ENSEMBL data.\n Generate knockoffs of experimentals SNP data.\n\n For now, we ignore sex of persons, although that is\n available in ENSEMBL\n '''\n\n logger.info(\"####################################\")\n logger.info(\"Fitting HMM and generating knockoffs\")\n\n path_to_fp = os.path.join(args.fastPHASE_path, 'fastPHASE')\n if not(os.path.exists(path_to_fp)):\n logger.info(\"Cannot find fastPHASE at %s\" % path_to_fp)\n raise Exception\n\n cache_dir = os.path.join(args.working_dir, 'fastphase_cache')\n utils.safe_mkdir(cache_dir)\n\n df_geno_ensembl = pd.read_csv(os.path.join(\n (args.working_dir), 'pruned_ensembl.csv'))\n\n # SNP,wild_type,chromosome,chromosome_position\n df_SNP = pd.read_csv(os.path.join(\n (args.working_dir), 'pruned_SNP_facts.csv'))\n df_wild = pd.read_csv(os.path.join(args.working_dir, 'wild_types.csv'))\n SNP_to_wild_type = dict(\n zip(df_wild['SNP'].values, df_wild['wild_type'].values))\n\n chromosome_list = np.sort(np.unique(df_SNP['chromosome']))\n for chromosome in chromosome_list:\n assert chromosome in np.arange(1, 24)\n\n df_geno_experiment = pd.read_csv(os.path.join(\n (args.working_dir), 'pruned_experiment.csv'))\n\n # Make sure we have the same SNPs everywhere.\n assert (set([c for c in df_geno_ensembl.columns if c.startswith('rs')]) ==\n set([c for c in df_geno_experiment.columns if c.startswith('rs')]))\n for SNP in df_SNP.SNP.values:\n assert SNP in df_geno_ensembl.columns\n\n grouped_by_chromosome = df_SNP.groupby('chromosome')\n num_experiment_people = len(df_geno_experiment)\n\n knockoff_SNP_list = []\n\n utils.safe_mkdir(os.path.join(args.working_dir, 'knockoffs'))\n\n em_iterations = 500\n logger.info('Number of EM iterations: %d' % em_iterations)\n\n for knockoff_trial_count in xrange(args.num_knockoff_trials):\n random_seed = knockoff_trial_count + args.random_seed\n if ((args.num_knockoff_trials <= 20) or\n knockoff_trial_count % ((args.num_knockoff_trials) // 20) == 0):\n logger.info(\"Knockoff sampling %d of %d\" % (\n knockoff_trial_count, args.num_knockoff_trials))\n\n if False:\n # Serial version; code preserved for debugging purposes\n for chromosome in chromosome_list:\n knockoff_SNP_list.append(\n make_knockoff(\n chromosome=chromosome,\n grouped_by_chromosome=grouped_by_chromosome, df_SNP=df_SNP,\n df_geno_experiment=df_geno_experiment, df_geno_ensembl=df_geno_ensembl,\n SNP_to_wild_type=SNP_to_wild_type, cache_dir=cache_dir,\n path_to_fp=path_to_fp, em_iterations=em_iterations, random_seed=random_seed))\n else:\n knockoff_SNP_list = Parallel(n_jobs=args.num_workers)(\n delayed(make_knockoff)(\n chromosome=i,\n grouped_by_chromosome=grouped_by_chromosome, df_SNP=df_SNP,\n df_geno_experiment=df_geno_experiment, df_geno_ensembl=df_geno_ensembl,\n SNP_to_wild_type=SNP_to_wild_type, cache_dir=cache_dir, path_to_fp=path_to_fp,\n em_iterations=em_iterations, random_seed=random_seed)\n for i in chromosome_list)\n\n # Stitch results for each chromosome back together into a single dataframe\n # Knockoff results\n SNP_columns = [\n x for x in df_geno_ensembl.columns if x.startswith('rs')]\n df_knockoffs = pd.DataFrame(\n columns=SNP_columns, index=np.arange(num_experiment_people))\n\n # Matched experimental observations + knockoffs in one dataframe\n matched_columns = []\n data_labels = []\n for field in df_geno_experiment.columns:\n if field.startswith('rs'):\n matched_columns.append(field)\n matched_columns.append(field + '_knockoff')\n elif field.startswith(args.data_prefix):\n data_labels.append(field)\n else:\n continue\n df_matched = pd.DataFrame(columns=matched_columns + data_labels,\n index=np.arange(num_experiment_people))\n\n for (X_knockoffs, X_experiment, SNPs_on_chromosome) in knockoff_SNP_list:\n for i in xrange(num_experiment_people):\n for j, SNP in enumerate(SNPs_on_chromosome):\n df_knockoffs[SNP].values[i] = X_knockoffs[i, j]\n df_matched[SNP].values[i] = int(X_experiment[i, j])\n df_matched[\n SNP + '_knockoff'].values[i] = int(X_knockoffs[i, j])\n for data_label in data_labels:\n df_matched[data_label] = df_geno_experiment[data_label]\n\n # Sanity check that all fields are filled in.\n for field in df_knockoffs:\n for i in xrange(num_experiment_people):\n assert pd.notnull(df_knockoffs[field].values[i])\n\n df_matched.to_csv(os.path.join((args.working_dir), 'knockoffs',\n 'knockoffs_%03d.csv' % knockoff_trial_count),\n index=False)\n\n logger.info(\"Done making knockoffs!!!\")\n\n\nif __name__ == '__main__':\n args = utils.parse_arguments()\n utils.initialize_logger(args)\n make_all_knockoffs(args)\n"
] |
[
[
"numpy.arange",
"pandas.notnull",
"numpy.unique"
]
] |
pfnet-research/hierarchical-molecular-learning
|
[
"2c88a4737c9268e691e97d92bf2e9e2c7e2c1790"
] |
[
"unsupNFP/train.py"
] |
[
"import argparse\n\nfrom chainer import optimizers\nfrom chainer import serializers\nimport numpy as np\n\nimport model\nimport load_mutag\nimport load_nci1\nimport classification\n\n\nn_epoch = 200\nn_parts = 5\n\nparser = argparse.ArgumentParser()\nparser.add_argument('dataset', type=str, choices=('mutag', 'ptc'))\nargs = parser.parse_args()\n\nif args.dataset == 'mutag':\n mutag_file_name = \"MUTAG.mat\"\n graphs = load_mutag.load_whole_data('MUTAG.mat')\n MAX_EDGE_TYPE = load_mutag.MAX_EDGE_TYPE\n MAX_NUMBER_ATOM = load_mutag.MAX_NUMBER_ATOM\nelif args.dataset == 'ptc':\n smile_filename = 'corrected_smiles.txt'\n result_filename = 'corrected_results.txt'\n graphs = load_nci1.load_ptc(smile_filename, result_filename)\n MAX_EDGE_TYPE = load_nci1.MAX_EDGE_TYPE\n MAX_NUMBER_ATOM = load_nci1.MAX_NUMBER_ATOM\nelse:\n raise ValueError('Invalid dataset type: {}'.format(args.dataset))\n\nmodel.MAX_EDGE_TYPE = MAX_EDGE_TYPE\nmodel.MAX_NUMBER_ATOM = MAX_NUMBER_ATOM\n\nindexs_test = np.random.permutation(len(graphs))\nn_graphs = len(graphs)\nprint(\"num of graphs:\", n_graphs)\n\n\nrep_dim = 101\nmax_degree = 5\nnum_levels = 6\nneg_size = 10\nbatchsize = 100\n\nhid_dim = 100\nout_dim = 2\n\nsoftmax = model.SoftmaxCrossEntropy(rep_dim, MAX_NUMBER_ATOM)\nprint(\"[CONFIG: representation dim =\", rep_dim, \"]\")\natom2vec = model.Atom2vec(MAX_NUMBER_ATOM, rep_dim, max_degree, softmax)\nmodel = model.Mol2Vec(len(graphs), rep_dim, max_degree,\n num_levels, neg_size, atom2vec)\n\noptimizer = optimizers.Adam()\noptimizer.setup(model)\nprint(\"start training\")\nfor epoch in range(1, n_epoch + 1):\n print(\"epoch:\", epoch)\n indexes = np.random.permutation(len(graphs))\n sum_loss = 0\n\n for i in range(0, n_graphs, batchsize):\n maxid = min(i + batchsize, n_graphs)\n ids = indexes[i:maxid]\n\n graphids = []\n adjs = []\n atom_arrays = []\n for id in indexes[i:maxid]:\n graphids.append(graphs[id][0])\n # index 1 and 2 need to be changed for MUTAG or NCI1 datasets\n atom_arrays.append(graphs[id][1])\n adjs.append(graphs[id][2])\n\n graphids = np.asarray(graphids)\n adjs = np.asarray(adjs, dtype=np.float32)\n atom_arrays = np.asarray(atom_arrays, dtype=np.int32)\n optimizer.update(model, graphids, adjs, atom_arrays)\n\n sum_loss += float(model.loss.data) * len(graphids)\n print(\"-----\", float(model.loss.data) * len(graphids))\n print(\"loss: \", sum_loss / n_graphs)\n serializers.save_npz(str(rep_dim) + \"_model_ptc.npz\", model)\n\n # after each epcoh, check result\n if epoch % 10 == 0:\n classification.MLPClassifier(model, graphs, indexs_test,\n rep_dim, batchsize)\n"
] |
[
[
"numpy.asarray"
]
] |
cirmuw/functional-twin-analysis
|
[
"b6730f09f2143d5372f1a90d5fac47e3385e54fb",
"b6730f09f2143d5372f1a90d5fac47e3385e54fb"
] |
[
"Code/PrepareTables/SelectedROICorrs_positionVar.py",
"Code/PrepareTables/SelectedROICorrs_vertex.py"
] |
[
"#script to create tabels containig x, y and z coordinates of functionally corresponding vertices (position variability) for each twin, one table per vertex\n#input:id of functionally corresponding vetices of each twin to reference\n#output: tables with vertex position in each subject, one table per vetex\nimport numpy as np\nimport nibabel as nib\nimport pandas as pd\nfrom glob import glob\nimport os, sys\ncurrentdir = os.path.dirname(os.path.realpath(__file__))\nparentdir = os.path.dirname(currentdir)\nsys.path.append(parentdir)\nimport settings as s\nimport pickle\n\n\n\n\n#paths to subject data,id of vertices without signal, surface file, parcelation, chosen rois\ninfile =s.HCP_information_sheet_path #\\\nsubjectpath1=s.HCProot+'HCP_3T_RESTA_fmri/'# used obtain subject ids\nsubjectpath2=s.HCProot+'HCP_3T_RESTB_fmri/'#/\nsource_dir=s.projectfolder+'7NETS_vertex/5_7nets_corresponding/' # path containing id of functionally corresponding vetices of each twin to reference\ntarget_dir=s.projectfolder+'/7NETS_vertex/10_PositionVar_cosine/'# output tables with vertex position in each subject\nif not os.path.exists(target_dir):\n os.mkdir(target_dir)\n\nzerovertexlh=np.load('../../Deliveries/0verticeslh.npy')#ids of vertices without signal\nzerovertexrh=np.load('../../Deliveries/0verticesrh.npy')\n\nsurfacedirlh='../../Deliveries/fsaverage4/lh.inflated' # surface on which vertex coordinates are based\nsurfacedirrh='../../Deliveries/fsaverage4/rh.inflated'\nlhsurf=nib.freesurfer.io.read_geometry(surfacedirlh)\nrhsurf=nib.freesurfer.io.read_geometry(surfacedirrh)\nlhsurf=lhsurf[0]\nlhsurf=np.delete(lhsurf,zerovertexlh,0) \nrhsurf=rhsurf[0]\nrhsurf=np.delete(rhsurf,zerovertexrh,0) \nsurf=np.concatenate([lhsurf,rhsurf],axis=0) \n\nlhparpath='../../Deliveries/lh.Schaefer2018_600Parcels_7Networks_order.annot'\nrhparpath='../../Deliveries/rh.Schaefer2018_600Parcels_7Networks_order.annot'\nlhannot=nib.freesurfer.io.read_annot(lhparpath)\nlhlabels=lhannot[0]\nrhannot=nib.freesurfer.io.read_annot(rhparpath)\nrhlabels=rhannot[0]\nlabelslh=np.delete(lhlabels,zerovertexlh,0)\nlabelsrh=np.delete(rhlabels,zerovertexrh,0)\n\nlhrois=list(np.load('../../Deliveries/chosenroislh.npy'))#save id of chosen rois\nrhrois=list(np.load('../../Deliveries/chosenroisrh.npy'))\nlhrois=lhrois[1:]\nrhrois=rhrois[1:]\nnameslhrois=['l_'+str(s) for s in lhrois]\nnamesrhrois=['r_'+str(s) for s in rhrois]\n\n\n#get assigenment of parcels to yeo nets based on color table\nlhnetwork=np.zeros((9))\nrhnetwork=np.zeros((9))\nlhnetwork[8]=301\nrhnetwork[8]=301\nc1=1\nc2=1\n\nfor i in range(1,301):\n if abs(lhannot[1][i][0]-lhannot[1][i-1][0])>5:\n lhnetwork[c1]=int(i)\n c1=c1+1\n \n if abs(rhannot[1][i][0]-rhannot[1][i-1][0])>5:\n rhnetwork[c2]=int(i)\n c2=c2+1\n\n \n\n#Get paths to mgh-files of available subjects\nxl=pd.ExcelFile(infile)\ndataframe1=xl.parse('Sheet1')\nisNotTwin=dataframe1['Twin_Stat']=='NotTwin'\nisNotTwin=np.where(isNotTwin)[0]\ndataframe2=dataframe1.drop(isNotTwin,0)\nSubjects=dataframe2['Subject'].values\n\npath1=[]\npath2=[]\nfor i in range(Subjects.shape[0]):\n path1.append(subjectpath1+str(Subjects[i]))\n path2.append(subjectpath2+str(Subjects[i]))\n\n\ntruesubjects=[]\n\nfor i in range(Subjects.shape[0]):\n if os.path.isdir(path1[i])==True:\n truesubjects.append(Subjects[i])\n if os.path.isdir(path2[i])==True:\n truesubjects.append(Subjects[i])\n\n\n\nname=['Subject','Zygosity','Mother_ID']\nnonvertexdat=np.zeros((len(truesubjects),3),dtype=object)\n\nfor j in range(len(labelslh)):\n if labelslh[j]!=0:\n positionvar=[] \n for i in range(len(truesubjects)):\n functional=pickle.load(open(source_dir+'lh_'+str(j+1)+'correspondingvertices.p','rb'))\n index=np.where(functional[1]==-1)[0]\n index=functional[0][i][index]\n index=index[0]\n coords=surf[index]\n positionframe=pd.DataFrame(coords) \n positionframe.columns=['x','y','z']\n positionvar.append(positionframe)\n \n\n\n if j==0:\n index=dataframe2[dataframe2['Subject']==truesubjects[i]].index.tolist()\n tmp1=np.array([str(truesubjects[i]),dataframe2['Zygosity'][index].values[0], str(dataframe2['Mother_ID'][index].values[0])])\n nonvertexdat[i,:]=tmp1\n \n nonvertextable=pd.DataFrame(data=nonvertexdat)\n nonvertextable.columns=name\n positionframe=pd.concat(positionvar,axis=0,ignore_index=True)\n table=pd.concat([nonvertextable,positionframe],axis=1)\n table=table.sort_values(['Zygosity', 'Mother_ID'], axis=0, ascending=[True,True])\n table.reset_index(inplace=True)\n table=table.drop('index',axis=1) \n writefile=target_dir+'lh_'+str(j+1)+'_mean_position.csv.gz'\n table.to_csv(writefile, compression='gzip')\nfor j in range(len(labelsrh)):\n if labelsrh[j]!=0:\n positionvar=[] \n for i in range(len(truesubjects)):\n functional=pickle.load(open(source_dir+'rh_'+str(j+1)+'correspondingvertices.p','rb'))\n index=np.where(functional[1]==-1)[0]\n index=functional[0][i][index]\n index=index[0]\n coords=surf[index]\n positionframe=pd.DataFrame(coords) \n positionframe.columns=['x','y','z']\n positionvar.append(positionframe)\n \n\n \n nonvertextable=pd.DataFrame(data=nonvertexdat)\n nonvertextable.columns=name\n positionframe=pd.concat(positionvar,axis=0,ignore_index=True)\n table=pd.concat([nonvertextable,positionframe],axis=1)\n table=table.sort_values(['Zygosity', 'Mother_ID'], axis=0, ascending=[True,True])\n table.reset_index(inplace=True)\n table=table.drop('index',axis=1) \n writefile=target_dir+'rh_'+str(j+1)+'_mean_position.csv.gz'\n table.to_csv(writefile, compression='gzip')\nprint('Finished')\n",
"#script to calculate for each vertex the correlation of the vertex time series with time series of selected parcels\n#input:fmri series of twin subjects\n#output:tabels containig correlation values for each twin, one table per vertex\n #correlation matrices for each subjects as intermediate output\nimport numpy as np\nimport nibabel as nib\nimport pandas as pd\nfrom glob import glob\nimport os, sys\ncurrentdir = os.path.dirname(os.path.realpath(__file__))\nparentdir = os.path.dirname(currentdir)\nsys.path.append(parentdir)\nimport settings as s\nimport pickle\nfrom myfuns import subject_into_csv_vertex\n\n\nWhichModelType='Original' #'Original' # setting for antomical aligned data ('Original') or functional aligned data ('AfterEmbedding')\ncorr_mats_calculated=False #set if correlatin matrix of time series have aleady been calculated\n\n\n#paths to fmri data, parcellation, id of vetices without signal,chosen rois\ninfile =s.HCP_information_sheet_path # \\\nsubjectpath1=s.HCProot+'HCP_3T_RESTA_fmri/'# used to get paths to fmri runs of twin subjects\nsubjectpath2=s.HCProot+'HCP_3T_RESTB_fmri/'# /\n\nzerovertexlh=np.load('../../Deliveries/0verticeslh.npy')# id of vertices without signal\nzerovertexrh=np.load('../../Deliveries/0verticesrh.npy')\n\nlhparpath='../../Deliveries/lh.Schaefer2018_600Parcels_7Networks_order.annot'# path to parcelation\nrhparpath='../../Deliveries/rh.Schaefer2018_600Parcels_7Networks_order.annot'\n\nlhrois=list(np.load('../../Deliveries/chosenroislh.npy'))\nrhrois=list(np.load('../../Deliveries/chosenroisrh.npy'))\nlhrois=lhrois[1:]\nrhrois=rhrois[1:]\nnameslhrois=['l_'+str(s) for s in lhrois]\nnamesrhrois=['r_'+str(s) for s in rhrois]\n\n\n\n# set additional input and output paths\nif WhichModelType=='AfterEmbedding': \n source_dir=s.projectfolder+'7NETS_vertex/5_7nets_corresponding/' # path containig id of functionally corresponding vertices of each twin to reference\n corr_mat_dir=s.projectfolder+'7NETS_vertex/6_7nets_corr_matrices_cosine/'# correlation values for each subject, intermediate output\n targetdir=s.projectfolder+'7NETS_vertex/7_7nets_ROIs_cosine/'# tables for each vertex\n if not os.path.exists(corr_mat_dir):\n os.mkdir(corr_mat_dir)\n if not os.path.exists(targetdir):\n os.mkdir(targetdir)\n\n \nif WhichModelType=='Original':\n corr_mat_dir=s.projectfolder+'7NETS_vertex/1_7net_corr_matrices/'# correlation values for each subject, intermediate output\n targetdir=s.projectfolder+'7NETS_vertex/2_7nets_ROIs/'# tables for each vertex\n if not os.path.exists(corr_mat_dir):\n os.mkdir(corr_mat_dir)\n if not os.path.exists(targetdir):\n os.mkdir(targetdir)\n\n \nlhannot=nib.freesurfer.io.read_annot(lhparpath)\nlhlabels=lhannot[0]\nrhannot=nib.freesurfer.io.read_annot(rhparpath)\nrhlabels=rhannot[0]\nlabelslh=np.delete(lhlabels,zerovertexlh,0)\nlabelsrh=np.delete(rhlabels,zerovertexrh,0)\n\n\n#get assigenment of parcels to yeo nets based on color table\nlhnetwork=np.zeros((9))\nrhnetwork=np.zeros((9))\nlhnetwork[8]=301\nrhnetwork[8]=301\nc1=1\nc2=1\n\nfor i in range(1,301):\n if abs(lhannot[1][i][0]-lhannot[1][i-1][0])>5:\n lhnetwork[c1]=int(i)\n c1=c1+1\n \n if abs(rhannot[1][i][0]-rhannot[1][i-1][0])>5:\n rhnetwork[c2]=int(i)\n c2=c2+1\n\n \n\n \n\n#Get paths to mgh-files(fmri runs) of available twin subjects\nxl=pd.ExcelFile(infile)\ndataframe1=xl.parse('Sheet1')\nisNotTwin=dataframe1['Twin_Stat']=='NotTwin'\nisNotTwin=np.where(isNotTwin)[0]\ndataframe2=dataframe1.drop(isNotTwin,0)\nSubjects=dataframe2['Subject'].values\n\npath1=[]\npath2=[]\nfor i in range(Subjects.shape[0]):\n path1.append(subjectpath1+str(Subjects[i]))\n path2.append(subjectpath2+str(Subjects[i]))\n\nfmri_LH_LR_R1=[]\nfmri_RH_LR_R1=[]\nfmri_LH_RL_R1=[]\nfmri_RH_RL_R1=[]\nfmri_LH_LR_R2=[]\nfmri_RH_LR_R2=[]\nfmri_LH_RL_R2=[]\nfmri_RH_RL_R2=[]\ntruesubjects=[]\n\nfor i in range(Subjects.shape[0]):\n if os.path.isdir(path1[i])==True:\n fmri_LH_LR_R1.append(path1[i]+'/lh.rfMRI_REST1_LR_Atlas_hp2000_clean_bpss_gsr_fs4.mgh')\n fmri_RH_LR_R1.append(path1[i]+'/rh.rfMRI_REST1_LR_Atlas_hp2000_clean_bpss_gsr_fs4.mgh')\n #fmri_LH_RL_R1.append(path1[i]+'/lh.rfMRI_REST1_RL_Atlas_hp2000_clean_bpss_gsr_fs4.mgh')\n #fmri_RH_RL_R1.append(path1[i]+'/rh.rfMRI_REST1_RL_Atlas_hp2000_clean_bpss_gsr_fs4.mgh')\n fmri_LH_LR_R2.append(path1[i]+'/lh.rfMRI_REST2_LR_Atlas_hp2000_clean_bpss_gsr_fs4.mgh')\n fmri_RH_LR_R2.append(path1[i]+'/rh.rfMRI_REST2_LR_Atlas_hp2000_clean_bpss_gsr_fs4.mgh')\n #fmri_LH_RL_R2.append(path1[i]+'/lh.rfMRI_REST2_RL_Atlas_hp2000_clean_bpss_gsr_fs4.mgh')\n #fmri_RH_RL_R2.append(path1[i]+'/rh.rfMRI_REST2_RL_Atlas_hp2000_clean_bpss_gsr_fs4.mgh')\n truesubjects.append(Subjects[i])\n if os.path.isdir(path2[i])==True:\n fmri_LH_LR_R1.append(path2[i]+'/lh.rfMRI_REST1_LR_Atlas_hp2000_clean_bpss_gsr_fs4.mgh')\n fmri_RH_LR_R1.append(path2[i]+'/rh.rfMRI_REST1_LR_Atlas_hp2000_clean_bpss_gsr_fs4.mgh')\n #fmri_LH_RL_R1.append(path2[i]+'/lh.rfMRI_REST1_RL_Atlas_hp2000_clean_bpss_gsr_fs4.mgh')\n #fmri_RH_RL_R1.append(path2[i]+'/rh.rfMRI_REST1_RL_Atlas_hp2000_clean_bpss_gsr_fs4.mgh')\n fmri_LH_LR_R2.append(path2[i]+'/lh.rfMRI_REST2_LR_Atlas_hp2000_clean_bpss_gsr_fs4.mgh')\n fmri_RH_LR_R2.append(path2[i]+'/rh.rfMRI_REST2_LR_Atlas_hp2000_clean_bpss_gsr_fs4.mgh')\n #fmri_LH_RL_R2.append(path2[i]+'/lh.rfMRI_REST2_RL_Atlas_hp2000_clean_bpss_gsr_fs4.mgh')\n #fmri_RH_RL_R2.append(path2[i]+'/rh.rfMRI_REST2_RL_Atlas_hp2000_clean_bpss_gsr_fs4.mgh')\n truesubjects.append(Subjects[i])\n\n\n\n\n\n\n\n\n#calculate correlation matrices for subjects\nif corr_mats_calculated==False:\n \n for i in range(len(truesubjects)):\n \n lh1=nib.load(fmri_LH_LR_R1[i]).get_data() #choose LR or RL runs\n lh2=nib.load(fmri_LH_LR_R2[i]).get_data()\n rh1=nib.load(fmri_RH_LR_R1[i]).get_data()\n rh2=nib.load(fmri_RH_LR_R2[i]).get_data()\n\n lh1.resize(lh1.shape[1],lh1.shape[3])\n lh2.resize(lh2.shape[1],lh2.shape[3])\n rh1.resize(rh1.shape[1],rh1.shape[3])\n rh2.resize(rh2.shape[1],rh2.shape[3])\n\n lh=np.concatenate((lh1,lh2),axis=1)\n rh=np.concatenate((rh1,rh2),axis=1)\n lh=np.delete(lh,zerovertexlh,axis=0)\n rh=np.delete(rh,zerovertexrh,axis=0)\n \n if WhichModelType=='Original': \n lhframe=pd.DataFrame(np.transpose(lh))\n lhframe=lhframe.groupby(labelslh,axis=1).mean()\n rhframe=pd.DataFrame(np.transpose(rh))\n rhframe=rhframe.groupby(labelsrh,axis=1).mean()\n\n \n lhframe=lhframe.iloc[:,lhrois]\n lhframe.columns=nameslhrois\n rhframe=rhframe.iloc[:,rhrois]\n rhframe.columns=namesrhrois\n frame=pd.concat([lhframe,rhframe],axis=1)\n for j in range(len(labelslh)):\n if labelslh[j]!=0:\n if not os.path.exists(corr_mat_dir+'lh_vertex_'+str(j+1)):\n os.makedirs(corr_mat_dir+'lh_vertex_'+str(j+1))\n vertex=pd.DataFrame(np.transpose(lh[j,:]))\n vertex.columns=[str(j+1)]\n assign=lhnetwork>labelslh[j]\n assign=np.expand_dims(assign,0)\n assign=np.where(np.any(assign,axis=0))[0][0]\n assign=assign-2\n frame=frame.drop(nameslhrois[assign],1)\n frame=pd.concat([vertex,frame],1)\n framecor=frame.corr()\n outfileLR=corr_mat_dir +'lh_vertex_'+str(j+1)+'/'+ str(i) + '.csv.gz'\n framecor.to_csv(outfileLR, compression='gzip')\n frame=pd.concat([lhframe,rhframe],axis=1)\n\n \n for j in range(len(labelsrh)):\n if labelsrh[j]!=0:\n if not os.path.exists(corr_mat_dir+'rh_vertex_'+str(j+1)):\n os.makedirs(corr_mat_dir+'rh_vertex_'+str(j+1))\n vertex=pd.DataFrame(np.transpose(rh[j,:]))\n vertex.columns=[str(j+1)]\n assign=rhnetwork>labelsrh[j]\n assign=np.expand_dims(assign,0)\n assign=np.where(np.any(assign,axis=0))[0][0]\n assign=assign-2\n frame=frame.drop(namesrhrois[assign],1)\n frame=pd.concat([vertex,frame],1)\n framecor=frame.corr()\n outfileLR=corr_mat_dir +'rh_vertex_'+str(j+1)+'/'+ str(i) + '.csv.gz'\n framecor.to_csv(outfileLR, compression='gzip')\n frame=pd.concat([lhframe,rhframe],axis=1)\n \n\n if WhichModelType=='AfterEmbedding': \n for j in range(len(labelslh)): \n if labelslh[j]!=0:\n if not os.path.exists(corr_mat_dir+'lh_vertex_'+str(j+1)):\n os.makedirs(corr_mat_dir+'lh_vertex_'+str(j+1))\n functional=pickle.load(open(source_dir+'lh_'+str(j+1)+'correspondingvertices.p','rb')) \n brain=np.concatenate([lh,rh],axis=0)\n brain=brain[np.squeeze(functional[0][i]),:] \n brainframe=pd.DataFrame(np.transpose(brain))\n brainframe=brainframe.groupby(functional[1],axis=1).mean()\n\n assign=lhnetwork>labelslh[j]\n assign=np.expand_dims(assign,0)\n assign=np.where(np.any(assign,axis=0))[0][0]\n assign=assign-2\n brainframe.columns=[str(j+1)]+nameslhrois[0:assign]+nameslhrois[assign+1:7]+namesrhrois\n framecor=brainframe.corr()\n\n outfileLR=corr_mat_dir +'lh_vertex_'+str(j+1)+'/'+ str(i) + '.csv.gz'\n framecor.to_csv(outfileLR, compression='gzip')\n\n for j in range(len(labelsrh)): \n if labelsrh[j]!=0:\n if not os.path.exists(corr_mat_dir+'rh_vertex_'+str(j+1)):\n os.makedirs(corr_mat_dir+'rh_vertex_'+str(j+1))\n functional=pickle.load(open(source_dir+'rh_'+str(j+1)+'correspondingvertices.p','rb')) \n brain=np.concatenate([lh,rh],axis=0)\n brain=brain[np.squeeze(functional[0][i]),:] \n brainframe=pd.DataFrame(np.transpose(brain))\n brainframe=brainframe.groupby(functional[1],axis=1).mean()\n\n assign=rhnetwork>labelsrh[j]\n assign=np.expand_dims(assign,0)\n assign=np.where(np.any(assign,axis=0))[0][0]\n assign=assign-2\n brainframe.columns=[str(j+1)]+nameslhrois+namesrhrois[0:assign]+namesrhrois[assign+1:7]\n framecor=brainframe.corr()\n\n outfileLR=corr_mat_dir +'rh_vertex_'+str(j+1)+'/'+ str(i) + '.csv.gz'\n framecor.to_csv(outfileLR, compression='gzip')\n print('correlation matrices: done') \n\n\n\n\n\n\n\n\n\n# create tables for each vertex based on correlation matrices\nif WhichModelType=='Original': \n#lh\n subject_into_csv_vertex(len(labelslh),'lh',truesubjects,WhichModelType,labelslh,lhnetwork,corr_mat_dir,targetdir,nameslhrois,namesrhrois,dataframe2,source_dir=None)\n#rh\n subject_into_csv_vertex(len(labelsrh),'rh',truesubjects,WhichModelType,labelsrh,rhnetwork,corr_mat_dir,targetdir,nameslhrois,namesrhrois,dataframe2,source_dir=None)\n\n\n\nif WhichModelType=='AfterEmbedding':\n#lh\n subject_into_csv_vertex(len(labelslh),'lh',truesubjects,WhichModelType,labelslh,lhnetwork,corr_mat_dir,targetdir,nameslhrois,namesrhrois,dataframe2,source_dir=source_dir)\n#rh\n subject_into_csv_vertex(len(labelsrh),'rh',truesubjects,WhichModelType,labelsrh,rhnetwork,corr_mat_dir,targetdir,nameslhrois,namesrhrois,dataframe2,source_dir=source_dir)\n\n\n\nprint('Finished') \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
] |
[
[
"pandas.concat",
"pandas.DataFrame",
"numpy.concatenate",
"numpy.delete",
"pandas.ExcelFile",
"numpy.load",
"numpy.zeros",
"numpy.where"
],
[
"pandas.concat",
"numpy.expand_dims",
"numpy.squeeze",
"numpy.concatenate",
"numpy.delete",
"numpy.any",
"pandas.ExcelFile",
"numpy.transpose",
"numpy.load",
"numpy.zeros",
"numpy.where"
]
] |
fernandezc/pint
|
[
"37a61ede6fbd628c7dc160eb36278cf41c96484c"
] |
[
"pint/testing.py"
] |
[
"from __future__ import annotations\n\nimport math\nimport warnings\nfrom numbers import Number\n\nfrom . import Quantity\nfrom .compat import ndarray\n\ntry:\n import numpy as np\nexcept ImportError:\n np = None\n\n\ndef _get_comparable_magnitudes(first, second, msg):\n if isinstance(first, Quantity) and isinstance(second, Quantity):\n ctx = first._REGISTRY._active_ctx.contexts\n if first.is_compatible_with(second, *ctx):\n second = second.to(first)\n assert first.units == second.units, msg + \" Units are not equal.\"\n m1, m2 = first.magnitude, second.magnitude\n elif isinstance(first, Quantity):\n assert first.dimensionless, msg + \" The first is not dimensionless.\"\n first = first.to(\"\")\n m1, m2 = first.magnitude, second\n elif isinstance(second, Quantity):\n assert second.dimensionless, msg + \" The second is not dimensionless.\"\n second = second.to(\"\")\n m1, m2 = first, second.magnitude\n else:\n m1, m2 = first, second\n\n return m1, m2\n\n\ndef assert_equal(first, second, msg=None):\n if msg is None:\n msg = \"Comparing %r and %r. \" % (first, second)\n\n m1, m2 = _get_comparable_magnitudes(first, second, msg)\n msg += \" (Converted to %r and %r): Magnitudes are not equal\" % (m1, m2)\n\n if isinstance(m1, ndarray) or isinstance(m2, ndarray):\n np.testing.assert_array_equal(m1, m2, err_msg=msg)\n elif not isinstance(m1, Number):\n warnings.warn(RuntimeWarning)\n return\n elif not isinstance(m2, Number):\n warnings.warn(RuntimeWarning)\n return\n elif math.isnan(m1):\n assert math.isnan(m2), msg\n elif math.isnan(m2):\n assert math.isnan(m1), msg\n else:\n assert m1 == m2, msg\n\n\ndef assert_allclose(first, second, rtol=1e-07, atol=0, msg=None):\n if msg is None:\n try:\n msg = \"Comparing %r and %r. \" % (first, second)\n except TypeError:\n try:\n msg = \"Comparing %s and %s. \" % (first, second)\n except Exception:\n msg = \"Comparing\"\n\n m1, m2 = _get_comparable_magnitudes(first, second, msg)\n msg += \" (Converted to %r and %r)\" % (m1, m2)\n\n if isinstance(m1, ndarray) or isinstance(m2, ndarray):\n np.testing.assert_allclose(m1, m2, rtol=rtol, atol=atol, err_msg=msg)\n elif not isinstance(m1, Number):\n warnings.warn(RuntimeWarning)\n return\n elif not isinstance(m2, Number):\n warnings.warn(RuntimeWarning)\n return\n elif math.isnan(m1):\n assert math.isnan(m2), msg\n elif math.isnan(m2):\n assert math.isnan(m1), msg\n elif math.isinf(m1):\n assert math.isinf(m2), msg\n elif math.isinf(m2):\n assert math.isinf(m1), msg\n else:\n # Numpy version (don't like because is not symmetric)\n # assert abs(m1 - m2) <= atol + rtol * abs(m2), msg\n assert abs(m1 - m2) <= max(rtol * max(abs(m1), abs(m2)), atol), msg\n"
] |
[
[
"numpy.testing.assert_array_equal",
"numpy.testing.assert_allclose"
]
] |
rafcy/FastMOT
|
[
"9aee101b1ac83a5fea8cece1f8cfda8030adb743"
] |
[
"fastmot/utils/visualization.py"
] |
[
"import colorsys\nimport numpy as np\nimport cv2\n\n\nGOLDEN_RATIO = 0.618033988749895\n\n\ndef draw_tracks(frame, tracks, show_flow=False, show_cov=False):\n for track in tracks:\n draw_bbox(frame, track.tlbr, get_color(track.trk_id), 2, str(track.trk_id))\n if show_flow:\n draw_feature_match(frame, track.prev_keypoints, track.keypoints, (0, 255, 255))\n if show_cov:\n draw_covariance(frame, track.tlbr, track.state[1])\n\n\ndef draw_detections(frame, detections, color=(255, 255, 255), show_conf=False):\n for det in detections:\n text = f'{det.label}: {det.conf:.2f}' if show_conf else None\n draw_bbox(frame, det.tlbr, color, 1, text)\n\n\ndef draw_klt_bboxes(frame, klt_bboxes, color=(0, 0, 0)):\n for tlbr in klt_bboxes:\n draw_bbox(frame, tlbr, color, 1)\n\n\ndef draw_tiles(frame, tiles, scale_factor, color=(0, 0, 0)):\n for tile in tiles:\n tlbr = np.rint(tile * np.tile(scale_factor, 2))\n draw_bbox(frame, tlbr, color, 1)\n\n\ndef draw_background_flow(frame, prev_bg_keypoints, bg_keypoints, color=(0, 0, 255)):\n draw_feature_match(frame, prev_bg_keypoints, bg_keypoints, color)\n\n\ndef get_color(idx, s=0.8, vmin=0.7):\n h = np.fmod(idx * GOLDEN_RATIO, 1.)\n v = 1. - np.fmod(idx * GOLDEN_RATIO, 1. - vmin)\n r, g, b = colorsys.hsv_to_rgb(h, s, v)\n return int(255 * b), int(255 * g), int(255 * r)\n\n\ndef draw_bbox(frame, tlbr, color, thickness, text=None):\n tlbr = tlbr.astype(int)\n tl, br = tuple(tlbr[:2]), tuple(tlbr[2:])\n cv2.rectangle(frame, tl, br, color, thickness)\n if text is not None:\n (text_width, text_height), _ = cv2.getTextSize(text, cv2.FONT_HERSHEY_DUPLEX, 0.5, 1)\n cv2.rectangle(frame, tl, (tl[0] + text_width - 1, tl[1] + text_height - 1),\n color, cv2.FILLED)\n cv2.putText(frame, text, (tl[0], tl[1] + text_height - 1), cv2.FONT_HERSHEY_DUPLEX,\n 0.5, 0, 1, cv2.LINE_AA)\n\n\ndef draw_feature_match(frame, prev_pts, cur_pts, color):\n if len(cur_pts) > 0:\n cur_pts = np.rint(cur_pts).astype(np.int32)\n for pt in cur_pts:\n cv2.circle(frame, tuple(pt), 1, color, cv2.FILLED)\n if len(prev_pts) > 0:\n prev_pts = np.rint(prev_pts).astype(np.int32)\n for pt1, pt2 in zip(prev_pts, cur_pts):\n cv2.line(frame, tuple(pt1), tuple(pt2), color, 1, cv2.LINE_AA)\n\n\ndef draw_covariance(frame, tlbr, covariance):\n tlbr = tlbr.astype(int)\n tl, br = tuple(tlbr[:2]), tuple(tlbr[2:])\n\n def ellipse(cov):\n vals, vecs = np.linalg.eigh(cov)\n order = vals.argsort()[::-1]\n # 95% confidence ellipse\n vals, vecs = np.sqrt(vals[order] * 5.9915), vecs[:, order]\n axes = int(vals[0] + 0.5), int(vals[1] + 0.5)\n angle = np.degrees(np.arctan2(vecs[1, 0], vecs[0, 0]))\n return axes, angle\n\n axes, angle = ellipse(covariance[:2, :2])\n cv2.ellipse(frame, tl, axes, angle, 0, 360, (255, 255, 255), 1, cv2.LINE_AA)\n axes, angle = ellipse(covariance[2:4, 2:4])\n cv2.ellipse(frame, br, axes, angle, 0, 360, (255, 255, 255), 1, cv2.LINE_AA)\n\n\nclass Visualizer:\n def __init__(self,\n draw_detections=False,\n draw_confidence=False,\n draw_covariance=False,\n draw_klt=False,\n draw_obj_flow=False,\n draw_bg_flow=False):\n \"\"\"Class for visualization.\n\n Parameters\n ----------\n draw_detections : bool, optional\n Enable drawing detections.\n draw_confidence : bool, optional\n Enable drawing detection confidence, ignored if `draw_detections` is disabled.\n draw_covariance : bool, optional\n Enable drawing Kalman filter position covariance.\n draw_klt : bool, optional\n Enable drawing KLT bounding boxes.\n draw_obj_flow : bool, optional\n Enable drawing object flow matches.\n draw_bg_flow : bool, optional\n Enable drawing background flow matches.\n \"\"\"\n self.draw_detections = draw_detections\n self.draw_confidence = draw_confidence\n self.draw_covariance = draw_covariance\n self.draw_klt = draw_klt\n self.draw_obj_flow = draw_obj_flow\n self.draw_bg_flow = draw_bg_flow\n\n def render(self, frame, tracks, detections, klt_bboxes, prev_bg_keypoints, bg_keypoints):\n \"\"\"Render visualizations onto the frame.\"\"\"\n draw_tracks(frame, tracks, show_flow=self.draw_obj_flow, show_cov=self.draw_covariance)\n if self.draw_detections:\n draw_detections(frame, detections, show_conf=self.draw_confidence)\n if self.draw_klt:\n draw_klt_bboxes(frame, klt_bboxes)\n if self.draw_bg_flow:\n draw_background_flow(frame, prev_bg_keypoints, bg_keypoints)\n"
] |
[
[
"numpy.fmod",
"numpy.sqrt",
"numpy.rint",
"numpy.tile",
"numpy.arctan2",
"numpy.linalg.eigh"
]
] |
RachithP/rpg_public_dronet
|
[
"244b44c6d321e77cfe326071f8413ea1f7e438cb"
] |
[
"utils.py"
] |
[
"import re\nimport os\nimport numpy as np\nimport tensorflow as tf\nimport json\nimport time\nfrom keras import backend as K\nfrom keras.preprocessing.image import Iterator\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.utils.generic_utils import Progbar\nfrom keras.models import model_from_json\n\nimport img_utils\n\n\nclass DroneDataGenerator(ImageDataGenerator):\n \"\"\"\n Generate minibatches of images and labels with real-time augmentation.\n\n The only function that changes w.r.t. parent class is the flow that\n generates data. This function needed in fact adaptation for different\n directory structure and labels. All the remaining functions remain\n unchanged.\n\n For an example usage, see the evaluate.py script\n \"\"\"\n def flow_from_directory(self, directory, target_size=(224,224),\n crop_size=(250,250), color_mode='grayscale', batch_size=32,\n shuffle=True, seed=None, follow_links=False):\n return DroneDirectoryIterator(\n directory, self,\n target_size=target_size, crop_size=crop_size, color_mode=color_mode,\n batch_size=batch_size, shuffle=shuffle, seed=seed,\n follow_links=follow_links)\n\n\nclass DroneDirectoryIterator(Iterator):\n \"\"\"\n Class for managing data loading.of images and labels\n We assume that the folder structure is:\n root_folder/\n folder_1/\n images/\n sync_steering.txt or labels.txt\n folder_2/\n images/\n sync_steering.txt or labels.txt\n .\n .\n folder_n/\n images/\n sync_steering.txt or labels.txt\n\n # Arguments\n directory: Path to the root directory to read data from.\n image_data_generator: Image Generator.\n target_size: tuple of integers, dimensions to resize input images to.\n crop_size: tuple of integers, dimensions to crop input images.\n color_mode: One of `\"rgb\"`, `\"grayscale\"`. Color mode to read images.\n batch_size: The desired batch size\n shuffle: Whether to shuffle data or not\n seed : numpy seed to shuffle data\n follow_links: Bool, whether to follow symbolic links or not\n\n # TODO: Add functionality to save images to have a look at the augmentation\n \"\"\"\n def __init__(self, directory, image_data_generator,\n target_size=(224,224), crop_size = (250,250), color_mode='grayscale',\n batch_size=32, shuffle=True, seed=None, follow_links=False):\n self.directory = directory\n self.image_data_generator = image_data_generator\n self.target_size = tuple(target_size)\n self.crop_size = tuple(crop_size)\n self.follow_links = follow_links\n if color_mode not in {'rgb', 'grayscale'}:\n raise ValueError('Invalid color mode:', color_mode,\n '; expected \"rgb\" or \"grayscale\".')\n self.color_mode = color_mode\n if self.color_mode == 'rgb':\n self.image_shape = self.crop_size + (3,)\n else:\n self.image_shape = self.crop_size + (1,)\n\n # First count how many experiments are out there\n self.samples = 0\n\n experiments = []\n for subdir in sorted(os.listdir(directory)):\n if os.path.isdir(os.path.join(directory, subdir)):\n experiments.append(subdir)\n self.num_experiments = len(experiments)\n self.formats = {'png', 'jpg'}\n\n # Idea = associate each filename with a corresponding steering or label\n self.filenames = []\n self.ground_truth = []\n\n # Determine the type of experiment (steering or collision) to compute\n # the loss\n self.exp_type = []\n\n for subdir in experiments:\n subpath = os.path.join(directory, subdir)\n self._decode_experiment_dir(subpath)\n\n # Conversion of list into array\n self.ground_truth = np.array(self.ground_truth, dtype = K.floatx())\n\n assert self.samples > 0, \"Did not find any data\"\n\n print('Found {} images belonging to {} experiments.'.format(\n self.samples, self.num_experiments))\n super(DroneDirectoryIterator, self).__init__(self.samples,\n batch_size, shuffle, seed)\n\n def _recursive_list(self, subpath):\n return sorted(os.walk(subpath, followlinks=self.follow_links),\n key=lambda tpl: tpl[0])\n\n def _decode_experiment_dir(self, dir_subpath):\n # Load steerings or labels in the experiment dir\n steerings_filename = os.path.join(dir_subpath, \"sync_steering.txt\")\n labels_filename = os.path.join(dir_subpath, \"labels.txt\")\n\n # Try to load steerings first. Make sure that the steering angle or the\n # label file is in the first column. Note also that the first line are\n # comments so it should be skipped.\n try:\n ground_truth = np.loadtxt(steerings_filename, usecols=0,\n delimiter=',', skiprows=1)\n exp_type = 1\n except OSError as e:\n # Try load collision labels if there are no steerings\n try:\n ground_truth = np.loadtxt(labels_filename, usecols=0)\n exp_type = 0\n except OSError as e:\n print(\"Neither steerings nor labels found in dir {}\".format(\n dir_subpath))\n raise IOError\n\n\n # Now fetch all images in the image subdir\n image_dir_path = os.path.join(dir_subpath, \"images\")\n for root, _, files in self._recursive_list(image_dir_path):\n sorted_files = sorted(files,\n key = lambda fname: int(re.search(r'\\d+',fname).group()))\n for frame_number, fname in enumerate(sorted_files):\n is_valid = False\n for extension in self.formats:\n if fname.lower().endswith('.' + extension):\n is_valid = True\n break\n if is_valid:\n absolute_path = os.path.join(root, fname)\n self.filenames.append(os.path.relpath(absolute_path,\n self.directory))\n self.ground_truth.append(ground_truth[frame_number])\n self.exp_type.append(exp_type)\n self.samples += 1\n\n\n def next(self):\n with self.lock:\n index_array = next(self.index_generator)\n # The transformation of images is not under thread lock\n # so it can be done in parallel\n return self._get_batches_of_transformed_samples(index_array)\n\n def _get_batches_of_transformed_samples(self, index_array) :\n \"\"\"\n Public function to fetch next batch.\n\n # Returns\n The next batch of images and labels.\n \"\"\"\n current_batch_size = index_array.shape[0]\n # Image transformation is not under thread lock, so it can be done in\n # parallel\n batch_x = np.zeros((current_batch_size,) + self.image_shape,\n dtype=K.floatx())\n batch_steer = np.zeros((current_batch_size, 2,),\n dtype=K.floatx())\n batch_coll = np.zeros((current_batch_size, 2,),\n dtype=K.floatx())\n\n grayscale = self.color_mode == 'grayscale'\n\n # Build batch of image data\n for i, j in enumerate(index_array):\n fname = self.filenames[j]\n x = img_utils.load_img(os.path.join(self.directory, fname),\n grayscale=grayscale,\n crop_size=self.crop_size,\n target_size=self.target_size)\n\n x = self.image_data_generator.random_transform(x)\n x = self.image_data_generator.standardize(x)\n batch_x[i] = x\n\n # Build batch of steering and collision data\n if self.exp_type[index_array[i]] == 1:\n # Steering experiment (t=1)\n batch_steer[i,0] =1.0\n batch_steer[i,1] = self.ground_truth[index_array[i]]\n batch_coll[i] = np.array([1.0, 0.0])\n else:\n # Collision experiment (t=0)\n batch_steer[i] = np.array([0.0, 0.0])\n batch_coll[i,0] = 0.0\n batch_coll[i,1] = self.ground_truth[index_array[i]]\n\n batch_y = [batch_steer, batch_coll]\n return batch_x, batch_y\n\n\ndef compute_predictions_and_gt(model, generator, steps,\n max_q_size=10,\n pickle_safe=False, verbose=0):\n \"\"\"\n Generate predictions and associated ground truth\n for the input samples from a data generator.\n The generator should return the same kind of data as accepted by\n `predict_on_batch`.\n Function adapted from keras `predict_generator`.\n\n # Arguments\n generator: Generator yielding batches of input samples.\n steps: Total number of steps (batches of samples)\n to yield from `generator` before stopping.\n max_q_size: Maximum size for the generator queue.\n pickle_safe: If `True`, use process based threading.\n Note that because\n this implementation relies on multiprocessing,\n you should not pass\n non picklable arguments to the generator\n as they can't be passed\n easily to children processes.\n verbose: verbosity mode, 0 or 1.\n\n # Returns\n Numpy array(s) of predictions and associated ground truth.\n\n # Raises\n ValueError: In case the generator yields\n data in an invalid format.\n \"\"\"\n steps_done = 0\n all_outs = []\n all_labels = []\n all_ts = []\n\n if verbose == 1:\n progbar = Progbar(target=steps)\n\n while steps_done < steps:\n generator_output = next(generator)\n\n if isinstance(generator_output, tuple):\n if len(generator_output) == 2:\n x, gt_lab = generator_output\n elif len(generator_output) == 3:\n x, gt_lab, _ = generator_output\n else:\n raise ValueError('output of generator should be '\n 'a tuple `(x, y, sample_weight)` '\n 'or `(x, y)`. Found: ' +\n str(generator_output))\n else:\n raise ValueError('Output not valid for current evaluation')\n start_time = time.time()\n outs = model.predict_on_batch(x)\n time_diff = time.time() - start_time\n print(\"\\n Time Diff: \", time_diff)\n print(\"Batch Size: \", len(x))\n print(\"FPS: \", len(x)/time_diff)\n\t\n if not isinstance(outs, list):\n outs = [outs]\n if not isinstance(gt_lab, list):\n gt_lab = [gt_lab]\n\n if not all_outs:\n for out in outs:\n # Len of this list is related to the number of\n # outputs per model(1 in our case)\n all_outs.append([])\n\n if not all_labels:\n # Len of list related to the number of gt_commands\n # per model (1 in our case )\n for lab in gt_lab:\n all_labels.append([])\n all_ts.append([])\n\n\n for i, out in enumerate(outs):\n all_outs[i].append(out)\n\n for i, lab in enumerate(gt_lab):\n all_labels[i].append(lab[:,1])\n all_ts[i].append(lab[:,0])\n\n steps_done += 1\n if verbose == 1:\n progbar.update(steps_done)\n\n if steps_done == 1:\n return [out for out in all_outs], [lab for lab in all_labels], np.concatenate(all_ts[0])\n else:\n return np.squeeze(np.array([np.concatenate(out) for out in all_outs])).T, \\\n np.array([np.concatenate(lab) for lab in all_labels]).T, \\\n np.concatenate(all_ts[0])\n\n\n\ndef hard_mining_mse(k):\n \"\"\"\n Compute MSE for steering evaluation and hard-mining for the current batch.\n\n # Arguments\n k: number of samples for hard-mining.\n\n # Returns\n custom_mse: average MSE for the current batch.\n \"\"\"\n\n def custom_mse(y_true, y_pred):\n # Parameter t indicates the type of experiment\n t = y_true[:,0]\n\n # Number of steering samples\n samples_steer = tf.cast(tf.equal(t,1), tf.int32)\n n_samples_steer = tf.reduce_sum(samples_steer)\n\n if n_samples_steer == 0:\n return 0.0\n else:\n # Predicted and real steerings\n pred_steer = tf.squeeze(y_pred, squeeze_dims=-1)\n true_steer = y_true[:,1]\n\n # Steering loss\n l_steer = tf.multiply(t, K.square(pred_steer - true_steer))\n\n # Hard mining\n k_min = tf.minimum(k, n_samples_steer)\n _, indices = tf.nn.top_k(l_steer, k=k_min)\n max_l_steer = tf.gather(l_steer, indices)\n hard_l_steer = tf.divide(tf.reduce_sum(max_l_steer), tf.cast(k,tf.float32))\n\n return hard_l_steer\n\n return custom_mse\n\n\n\ndef hard_mining_entropy(k):\n \"\"\"\n Compute binary cross-entropy for collision evaluation and hard-mining.\n\n # Arguments\n k: Number of samples for hard-mining.\n\n # Returns\n custom_bin_crossentropy: average binary cross-entropy for the current batch.\n \"\"\"\n\n def custom_bin_crossentropy(y_true, y_pred):\n # Parameter t indicates the type of experiment\n t = y_true[:,0]\n\n # Number of collision samples\n samples_coll = tf.cast(tf.equal(t,0), tf.int32)\n n_samples_coll = tf.reduce_sum(samples_coll)\n\n if n_samples_coll == 0:\n return 0.0\n else:\n # Predicted and real labels\n pred_coll = tf.squeeze(y_pred, squeeze_dims=-1)\n true_coll = y_true[:,1]\n\n # Collision loss\n l_coll = tf.multiply((1-t), K.binary_crossentropy(true_coll, pred_coll))\n\n # Hard mining\n k_min = tf.minimum(k, n_samples_coll)\n _, indices = tf.nn.top_k(l_coll, k=k_min)\n max_l_coll = tf.gather(l_coll, indices)\n hard_l_coll = tf.divide(tf.reduce_sum(max_l_coll), tf.cast(k, tf.float32))\n\n return hard_l_coll\n\n return custom_bin_crossentropy\n\n\n\ndef modelToJson(model, json_model_path):\n \"\"\"\n Serialize model into json.\n \"\"\"\n model_json = model.to_json()\n\n with open(json_model_path,\"w\") as f:\n f.write(model_json)\n\n\ndef jsonToModel(json_model_path):\n \"\"\"\n Serialize json into model.\n \"\"\"\n with open(json_model_path, 'r') as json_file:\n loaded_model_json = json_file.read()\n\n model = model_from_json(loaded_model_json)\n return model\n\ndef write_to_file(dictionary, fname):\n \"\"\"\n Writes everything is in a dictionary in json model.\n \"\"\"\n with open(fname, \"w\") as f:\n json.dump(dictionary,f)\n print(\"Written file {}\".format(fname))\n"
] |
[
[
"tensorflow.reduce_sum",
"tensorflow.equal",
"tensorflow.minimum",
"tensorflow.squeeze",
"tensorflow.cast",
"numpy.concatenate",
"tensorflow.nn.top_k",
"tensorflow.gather",
"numpy.array",
"numpy.loadtxt"
]
] |
CellEight/vision
|
[
"e8dded4c05ee403633529cef2e09bf94b07f6170"
] |
[
"test/test_datasets.py"
] |
[
"import contextlib\nimport sys\nimport os\nimport unittest\nfrom unittest import mock\nimport numpy as np\nimport PIL\nfrom PIL import Image\nfrom torch._utils_internal import get_file_path_2\nimport torchvision\nfrom torchvision.datasets import utils\nfrom common_utils import get_tmp_dir\nfrom fakedata_generation import svhn_root, places365_root, widerface_root, stl10_root\nimport xml.etree.ElementTree as ET\nfrom urllib.request import Request, urlopen\nimport itertools\nimport datasets_utils\nimport pathlib\nimport pickle\nfrom torchvision import datasets\nimport torch\nimport shutil\nimport json\nimport random\nimport bz2\nimport torch.nn.functional as F\nimport string\nimport io\nimport zipfile\n\n\ntry:\n import scipy\n HAS_SCIPY = True\nexcept ImportError:\n HAS_SCIPY = False\n\ntry:\n import av\n HAS_PYAV = True\nexcept ImportError:\n HAS_PYAV = False\n\n\nclass DatasetTestcase(unittest.TestCase):\n def generic_classification_dataset_test(self, dataset, num_images=1):\n self.assertEqual(len(dataset), num_images)\n img, target = dataset[0]\n self.assertTrue(isinstance(img, PIL.Image.Image))\n self.assertTrue(isinstance(target, int))\n\n def generic_segmentation_dataset_test(self, dataset, num_images=1):\n self.assertEqual(len(dataset), num_images)\n img, target = dataset[0]\n self.assertTrue(isinstance(img, PIL.Image.Image))\n self.assertTrue(isinstance(target, PIL.Image.Image))\n\n\nclass Tester(DatasetTestcase):\n @mock.patch('torchvision.datasets.SVHN._check_integrity')\n @unittest.skipIf(not HAS_SCIPY, \"scipy unavailable\")\n def test_svhn(self, mock_check):\n mock_check.return_value = True\n with svhn_root() as root:\n dataset = torchvision.datasets.SVHN(root, split=\"train\")\n self.generic_classification_dataset_test(dataset, num_images=2)\n\n dataset = torchvision.datasets.SVHN(root, split=\"test\")\n self.generic_classification_dataset_test(dataset, num_images=2)\n\n dataset = torchvision.datasets.SVHN(root, split=\"extra\")\n self.generic_classification_dataset_test(dataset, num_images=2)\n\n def test_places365(self):\n for split, small in itertools.product((\"train-standard\", \"train-challenge\", \"val\"), (False, True)):\n with places365_root(split=split, small=small) as places365:\n root, data = places365\n\n dataset = torchvision.datasets.Places365(root, split=split, small=small, download=True)\n self.generic_classification_dataset_test(dataset, num_images=len(data[\"imgs\"]))\n\n def test_places365_transforms(self):\n expected_image = \"image\"\n expected_target = \"target\"\n\n def transform(image):\n return expected_image\n\n def target_transform(target):\n return expected_target\n\n with places365_root() as places365:\n root, data = places365\n\n dataset = torchvision.datasets.Places365(\n root, transform=transform, target_transform=target_transform, download=True\n )\n actual_image, actual_target = dataset[0]\n\n self.assertEqual(actual_image, expected_image)\n self.assertEqual(actual_target, expected_target)\n\n def test_places365_devkit_download(self):\n for split in (\"train-standard\", \"train-challenge\", \"val\"):\n with self.subTest(split=split):\n with places365_root(split=split) as places365:\n root, data = places365\n\n dataset = torchvision.datasets.Places365(root, split=split, download=True)\n\n with self.subTest(\"classes\"):\n self.assertSequenceEqual(dataset.classes, data[\"classes\"])\n\n with self.subTest(\"class_to_idx\"):\n self.assertDictEqual(dataset.class_to_idx, data[\"class_to_idx\"])\n\n with self.subTest(\"imgs\"):\n self.assertSequenceEqual(dataset.imgs, data[\"imgs\"])\n\n def test_places365_devkit_no_download(self):\n for split in (\"train-standard\", \"train-challenge\", \"val\"):\n with self.subTest(split=split):\n with places365_root(split=split) as places365:\n root, data = places365\n\n with self.assertRaises(RuntimeError):\n torchvision.datasets.Places365(root, split=split, download=False)\n\n def test_places365_images_download(self):\n for split, small in itertools.product((\"train-standard\", \"train-challenge\", \"val\"), (False, True)):\n with self.subTest(split=split, small=small):\n with places365_root(split=split, small=small) as places365:\n root, data = places365\n\n dataset = torchvision.datasets.Places365(root, split=split, small=small, download=True)\n\n assert all(os.path.exists(item[0]) for item in dataset.imgs)\n\n def test_places365_images_download_preexisting(self):\n split = \"train-standard\"\n small = False\n images_dir = \"data_large_standard\"\n\n with places365_root(split=split, small=small) as places365:\n root, data = places365\n os.mkdir(os.path.join(root, images_dir))\n\n with self.assertRaises(RuntimeError):\n torchvision.datasets.Places365(root, split=split, small=small, download=True)\n\n def test_places365_repr_smoke(self):\n with places365_root() as places365:\n root, data = places365\n\n dataset = torchvision.datasets.Places365(root, download=True)\n self.assertIsInstance(repr(dataset), str)\n\n\nclass STL10Tester(DatasetTestcase):\n @contextlib.contextmanager\n def mocked_root(self):\n with stl10_root() as (root, data):\n yield root, data\n\n @contextlib.contextmanager\n def mocked_dataset(self, pre_extract=False, download=True, **kwargs):\n with self.mocked_root() as (root, data):\n if pre_extract:\n utils.extract_archive(os.path.join(root, data[\"archive\"]))\n dataset = torchvision.datasets.STL10(root, download=download, **kwargs)\n yield dataset, data\n\n def test_not_found(self):\n with self.assertRaises(RuntimeError):\n with self.mocked_dataset(download=False):\n pass\n\n def test_splits(self):\n for split in ('train', 'train+unlabeled', 'unlabeled', 'test'):\n with self.mocked_dataset(split=split) as (dataset, data):\n num_images = sum([data[\"num_images_in_split\"][part] for part in split.split(\"+\")])\n self.generic_classification_dataset_test(dataset, num_images=num_images)\n\n def test_folds(self):\n for fold in range(10):\n with self.mocked_dataset(split=\"train\", folds=fold) as (dataset, data):\n num_images = data[\"num_images_in_folds\"][fold]\n self.assertEqual(len(dataset), num_images)\n\n def test_invalid_folds1(self):\n with self.assertRaises(ValueError):\n with self.mocked_dataset(folds=10):\n pass\n\n def test_invalid_folds2(self):\n with self.assertRaises(ValueError):\n with self.mocked_dataset(folds=\"0\"):\n pass\n\n def test_transforms(self):\n expected_image = \"image\"\n expected_target = \"target\"\n\n def transform(image):\n return expected_image\n\n def target_transform(target):\n return expected_target\n\n with self.mocked_dataset(transform=transform, target_transform=target_transform) as (dataset, _):\n actual_image, actual_target = dataset[0]\n\n self.assertEqual(actual_image, expected_image)\n self.assertEqual(actual_target, expected_target)\n\n def test_unlabeled(self):\n with self.mocked_dataset(split=\"unlabeled\") as (dataset, _):\n labels = [dataset[idx][1] for idx in range(len(dataset))]\n self.assertTrue(all([label == -1 for label in labels]))\n\n @unittest.mock.patch(\"torchvision.datasets.stl10.download_and_extract_archive\")\n def test_download_preexisting(self, mock):\n with self.mocked_dataset(pre_extract=True) as (dataset, data):\n mock.assert_not_called()\n\n def test_repr_smoke(self):\n with self.mocked_dataset() as (dataset, _):\n self.assertIsInstance(repr(dataset), str)\n\n\nclass Caltech101TestCase(datasets_utils.ImageDatasetTestCase):\n DATASET_CLASS = datasets.Caltech101\n FEATURE_TYPES = (PIL.Image.Image, (int, np.ndarray, tuple))\n\n ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(\n target_type=(\"category\", \"annotation\", [\"category\", \"annotation\"])\n )\n REQUIRED_PACKAGES = (\"scipy\",)\n\n def inject_fake_data(self, tmpdir, config):\n root = pathlib.Path(tmpdir) / \"caltech101\"\n images = root / \"101_ObjectCategories\"\n annotations = root / \"Annotations\"\n\n categories = ((\"Faces\", \"Faces_2\"), (\"helicopter\", \"helicopter\"), (\"ying_yang\", \"ying_yang\"))\n num_images_per_category = 2\n\n for image_category, annotation_category in categories:\n datasets_utils.create_image_folder(\n root=images,\n name=image_category,\n file_name_fn=lambda idx: f\"image_{idx + 1:04d}.jpg\",\n num_examples=num_images_per_category,\n )\n self._create_annotation_folder(\n root=annotations,\n name=annotation_category,\n file_name_fn=lambda idx: f\"annotation_{idx + 1:04d}.mat\",\n num_examples=num_images_per_category,\n )\n\n # This is included in the original archive, but is removed by the dataset. Thus, an empty directory suffices.\n os.makedirs(images / \"BACKGROUND_Google\")\n\n return num_images_per_category * len(categories)\n\n def _create_annotation_folder(self, root, name, file_name_fn, num_examples):\n root = pathlib.Path(root) / name\n os.makedirs(root)\n\n for idx in range(num_examples):\n self._create_annotation_file(root, file_name_fn(idx))\n\n def _create_annotation_file(self, root, name):\n mdict = dict(obj_contour=torch.rand((2, torch.randint(3, 6, size=())), dtype=torch.float64).numpy())\n datasets_utils.lazy_importer.scipy.io.savemat(str(pathlib.Path(root) / name), mdict)\n\n def test_combined_targets(self):\n target_types = [\"category\", \"annotation\"]\n\n individual_targets = []\n for target_type in target_types:\n with self.create_dataset(target_type=target_type) as (dataset, _):\n _, target = dataset[0]\n individual_targets.append(target)\n\n with self.create_dataset(target_type=target_types) as (dataset, _):\n _, combined_targets = dataset[0]\n\n actual = len(individual_targets)\n expected = len(combined_targets)\n self.assertEqual(\n actual,\n expected,\n f\"The number of the returned combined targets does not match the the number targets if requested \"\n f\"individually: {actual} != {expected}\",\n )\n\n for target_type, combined_target, individual_target in zip(target_types, combined_targets, individual_targets):\n with self.subTest(target_type=target_type):\n actual = type(combined_target)\n expected = type(individual_target)\n self.assertIs(\n actual,\n expected,\n f\"Type of the combined target does not match the type of the corresponding individual target: \"\n f\"{actual} is not {expected}\",\n )\n\n\nclass Caltech256TestCase(datasets_utils.ImageDatasetTestCase):\n DATASET_CLASS = datasets.Caltech256\n\n def inject_fake_data(self, tmpdir, config):\n tmpdir = pathlib.Path(tmpdir) / \"caltech256\" / \"256_ObjectCategories\"\n\n categories = ((1, \"ak47\"), (127, \"laptop-101\"), (257, \"clutter\"))\n num_images_per_category = 2\n\n for idx, category in categories:\n datasets_utils.create_image_folder(\n tmpdir,\n name=f\"{idx:03d}.{category}\",\n file_name_fn=lambda image_idx: f\"{idx:03d}_{image_idx + 1:04d}.jpg\",\n num_examples=num_images_per_category,\n )\n\n return num_images_per_category * len(categories)\n\n\nclass WIDERFaceTestCase(datasets_utils.ImageDatasetTestCase):\n DATASET_CLASS = datasets.WIDERFace\n FEATURE_TYPES = (PIL.Image.Image, (dict, type(None))) # test split returns None as target\n ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(split=('train', 'val', 'test'))\n\n def inject_fake_data(self, tmpdir, config):\n widerface_dir = pathlib.Path(tmpdir) / 'widerface'\n annotations_dir = widerface_dir / 'wider_face_split'\n os.makedirs(annotations_dir)\n\n split_to_idx = split_to_num_examples = {\n \"train\": 1,\n \"val\": 2,\n \"test\": 3,\n }\n\n # We need to create all folders regardless of the split in config\n for split in ('train', 'val', 'test'):\n split_idx = split_to_idx[split]\n num_examples = split_to_num_examples[split]\n\n datasets_utils.create_image_folder(\n root=tmpdir,\n name=widerface_dir / f'WIDER_{split}' / 'images' / '0--Parade',\n file_name_fn=lambda image_idx: f\"0_Parade_marchingband_1_{split_idx + image_idx}.jpg\",\n num_examples=num_examples,\n )\n\n annotation_file_name = {\n 'train': annotations_dir / 'wider_face_train_bbx_gt.txt',\n 'val': annotations_dir / 'wider_face_val_bbx_gt.txt',\n 'test': annotations_dir / 'wider_face_test_filelist.txt',\n }[split]\n\n annotation_content = {\n \"train\": \"\".join(\n f\"0--Parade/0_Parade_marchingband_1_{split_idx + image_idx}.jpg\\n1\\n449 330 122 149 0 0 0 0 0 0\\n\"\n for image_idx in range(num_examples)\n ),\n \"val\": \"\".join(\n f\"0--Parade/0_Parade_marchingband_1_{split_idx + image_idx}.jpg\\n1\\n501 160 285 443 0 0 0 0 0 0\\n\"\n for image_idx in range(num_examples)\n ),\n \"test\": \"\".join(\n f\"0--Parade/0_Parade_marchingband_1_{split_idx + image_idx}.jpg\\n\"\n for image_idx in range(num_examples)\n ),\n }[split]\n\n with open(annotation_file_name, \"w\") as annotation_file:\n annotation_file.write(annotation_content)\n\n return split_to_num_examples[config[\"split\"]]\n\n\nclass CityScapesTestCase(datasets_utils.ImageDatasetTestCase):\n DATASET_CLASS = datasets.Cityscapes\n TARGET_TYPES = (\n \"instance\",\n \"semantic\",\n \"polygon\",\n \"color\",\n )\n ADDITIONAL_CONFIGS = (\n *datasets_utils.combinations_grid(\n mode=(\"fine\",), split=(\"train\", \"test\", \"val\"), target_type=TARGET_TYPES\n ),\n *datasets_utils.combinations_grid(\n mode=(\"coarse\",),\n split=(\"train\", \"train_extra\", \"val\"),\n target_type=TARGET_TYPES,\n ),\n )\n FEATURE_TYPES = (PIL.Image.Image, (dict, PIL.Image.Image))\n\n def inject_fake_data(self, tmpdir, config):\n\n tmpdir = pathlib.Path(tmpdir)\n\n mode_to_splits = {\n \"Coarse\": [\"train\", \"train_extra\", \"val\"],\n \"Fine\": [\"train\", \"test\", \"val\"],\n }\n\n if config[\"split\"] == \"train\": # just for coverage of the number of samples\n cities = [\"bochum\", \"bremen\"]\n else:\n cities = [\"bochum\"]\n\n polygon_target = {\n \"imgHeight\": 1024,\n \"imgWidth\": 2048,\n \"objects\": [\n {\n \"label\": \"sky\",\n \"polygon\": [\n [1241, 0],\n [1234, 156],\n [1478, 197],\n [1611, 172],\n [1606, 0],\n ],\n },\n {\n \"label\": \"road\",\n \"polygon\": [\n [0, 448],\n [1331, 274],\n [1473, 265],\n [2047, 605],\n [2047, 1023],\n [0, 1023],\n ],\n },\n ],\n }\n\n for mode in [\"Coarse\", \"Fine\"]:\n gt_dir = tmpdir / f\"gt{mode}\"\n for split in mode_to_splits[mode]:\n for city in cities:\n def make_image(name, size=10):\n datasets_utils.create_image_folder(\n root=gt_dir / split,\n name=city,\n file_name_fn=lambda _: name,\n size=size,\n num_examples=1,\n )\n make_image(f\"{city}_000000_000000_gt{mode}_instanceIds.png\")\n make_image(f\"{city}_000000_000000_gt{mode}_labelIds.png\")\n make_image(f\"{city}_000000_000000_gt{mode}_color.png\", size=(4, 10, 10))\n\n polygon_target_name = gt_dir / split / city / f\"{city}_000000_000000_gt{mode}_polygons.json\"\n with open(polygon_target_name, \"w\") as outfile:\n json.dump(polygon_target, outfile)\n\n # Create leftImg8bit folder\n for split in ['test', 'train_extra', 'train', 'val']:\n for city in cities:\n datasets_utils.create_image_folder(\n root=tmpdir / \"leftImg8bit\" / split,\n name=city,\n file_name_fn=lambda _: f\"{city}_000000_000000_leftImg8bit.png\",\n num_examples=1,\n )\n\n info = {'num_examples': len(cities)}\n if config['target_type'] == 'polygon':\n info['expected_polygon_target'] = polygon_target\n return info\n\n def test_combined_targets(self):\n target_types = ['semantic', 'polygon', 'color']\n\n with self.create_dataset(target_type=target_types) as (dataset, _):\n output = dataset[0]\n self.assertTrue(isinstance(output, tuple))\n self.assertTrue(len(output) == 2)\n self.assertTrue(isinstance(output[0], PIL.Image.Image))\n self.assertTrue(isinstance(output[1], tuple))\n self.assertTrue(len(output[1]) == 3)\n self.assertTrue(isinstance(output[1][0], PIL.Image.Image)) # semantic\n self.assertTrue(isinstance(output[1][1], dict)) # polygon\n self.assertTrue(isinstance(output[1][2], PIL.Image.Image)) # color\n\n def test_feature_types_target_color(self):\n with self.create_dataset(target_type='color') as (dataset, _):\n color_img, color_target = dataset[0]\n self.assertTrue(isinstance(color_img, PIL.Image.Image))\n self.assertTrue(np.array(color_target).shape[2] == 4)\n\n def test_feature_types_target_polygon(self):\n with self.create_dataset(target_type='polygon') as (dataset, info):\n polygon_img, polygon_target = dataset[0]\n self.assertTrue(isinstance(polygon_img, PIL.Image.Image))\n self.assertEqual(polygon_target, info['expected_polygon_target'])\n\n\nclass ImageNetTestCase(datasets_utils.ImageDatasetTestCase):\n DATASET_CLASS = datasets.ImageNet\n REQUIRED_PACKAGES = ('scipy',)\n ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(split=('train', 'val'))\n\n def inject_fake_data(self, tmpdir, config):\n tmpdir = pathlib.Path(tmpdir)\n\n wnid = 'n01234567'\n if config['split'] == 'train':\n num_examples = 3\n datasets_utils.create_image_folder(\n root=tmpdir,\n name=tmpdir / 'train' / wnid / wnid,\n file_name_fn=lambda image_idx: f\"{wnid}_{image_idx}.JPEG\",\n num_examples=num_examples,\n )\n else:\n num_examples = 1\n datasets_utils.create_image_folder(\n root=tmpdir,\n name=tmpdir / 'val' / wnid,\n file_name_fn=lambda image_ifx: \"ILSVRC2012_val_0000000{image_idx}.JPEG\",\n num_examples=num_examples,\n )\n\n wnid_to_classes = {wnid: [1]}\n torch.save((wnid_to_classes, None), tmpdir / 'meta.bin')\n return num_examples\n\n\nclass CIFAR10TestCase(datasets_utils.ImageDatasetTestCase):\n DATASET_CLASS = datasets.CIFAR10\n ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(train=(True, False))\n\n _VERSION_CONFIG = dict(\n base_folder=\"cifar-10-batches-py\",\n train_files=tuple(f\"data_batch_{idx}\" for idx in range(1, 6)),\n test_files=(\"test_batch\",),\n labels_key=\"labels\",\n meta_file=\"batches.meta\",\n num_categories=10,\n categories_key=\"label_names\",\n )\n\n def inject_fake_data(self, tmpdir, config):\n tmpdir = pathlib.Path(tmpdir) / self._VERSION_CONFIG[\"base_folder\"]\n os.makedirs(tmpdir)\n\n num_images_per_file = 1\n for name in itertools.chain(self._VERSION_CONFIG[\"train_files\"], self._VERSION_CONFIG[\"test_files\"]):\n self._create_batch_file(tmpdir, name, num_images_per_file)\n\n categories = self._create_meta_file(tmpdir)\n\n return dict(\n num_examples=num_images_per_file\n * len(self._VERSION_CONFIG[\"train_files\"] if config[\"train\"] else self._VERSION_CONFIG[\"test_files\"]),\n categories=categories,\n )\n\n def _create_batch_file(self, root, name, num_images):\n data = datasets_utils.create_image_or_video_tensor((num_images, 32 * 32 * 3))\n labels = np.random.randint(0, self._VERSION_CONFIG[\"num_categories\"], size=num_images).tolist()\n self._create_binary_file(root, name, {\"data\": data, self._VERSION_CONFIG[\"labels_key\"]: labels})\n\n def _create_meta_file(self, root):\n categories = [\n f\"{idx:0{len(str(self._VERSION_CONFIG['num_categories'] - 1))}d}\"\n for idx in range(self._VERSION_CONFIG[\"num_categories\"])\n ]\n self._create_binary_file(\n root, self._VERSION_CONFIG[\"meta_file\"], {self._VERSION_CONFIG[\"categories_key\"]: categories}\n )\n return categories\n\n def _create_binary_file(self, root, name, content):\n with open(pathlib.Path(root) / name, \"wb\") as fh:\n pickle.dump(content, fh)\n\n def test_class_to_idx(self):\n with self.create_dataset() as (dataset, info):\n expected = {category: label for label, category in enumerate(info[\"categories\"])}\n actual = dataset.class_to_idx\n self.assertEqual(actual, expected)\n\n\nclass CIFAR100(CIFAR10TestCase):\n DATASET_CLASS = datasets.CIFAR100\n\n _VERSION_CONFIG = dict(\n base_folder=\"cifar-100-python\",\n train_files=(\"train\",),\n test_files=(\"test\",),\n labels_key=\"fine_labels\",\n meta_file=\"meta\",\n num_categories=100,\n categories_key=\"fine_label_names\",\n )\n\n\nclass CelebATestCase(datasets_utils.ImageDatasetTestCase):\n DATASET_CLASS = datasets.CelebA\n FEATURE_TYPES = (PIL.Image.Image, (torch.Tensor, int, tuple, type(None)))\n\n ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(\n split=(\"train\", \"valid\", \"test\", \"all\"),\n target_type=(\"attr\", \"identity\", \"bbox\", \"landmarks\", [\"attr\", \"identity\"]),\n )\n REQUIRED_PACKAGES = (\"pandas\",)\n\n _SPLIT_TO_IDX = dict(train=0, valid=1, test=2)\n\n def inject_fake_data(self, tmpdir, config):\n base_folder = pathlib.Path(tmpdir) / \"celeba\"\n os.makedirs(base_folder)\n\n num_images, num_images_per_split = self._create_split_txt(base_folder)\n\n datasets_utils.create_image_folder(\n base_folder, \"img_align_celeba\", lambda idx: f\"{idx + 1:06d}.jpg\", num_images\n )\n attr_names = self._create_attr_txt(base_folder, num_images)\n self._create_identity_txt(base_folder, num_images)\n self._create_bbox_txt(base_folder, num_images)\n self._create_landmarks_txt(base_folder, num_images)\n\n return dict(num_examples=num_images_per_split[config[\"split\"]], attr_names=attr_names)\n\n def _create_split_txt(self, root):\n num_images_per_split = dict(train=3, valid=2, test=1)\n\n data = [\n [self._SPLIT_TO_IDX[split]] for split, num_images in num_images_per_split.items() for _ in range(num_images)\n ]\n self._create_txt(root, \"list_eval_partition.txt\", data)\n\n num_images_per_split[\"all\"] = num_images = sum(num_images_per_split.values())\n return num_images, num_images_per_split\n\n def _create_attr_txt(self, root, num_images):\n header = (\"5_o_Clock_Shadow\", \"Young\")\n data = torch.rand((num_images, len(header))).ge(0.5).int().mul(2).sub(1).tolist()\n self._create_txt(root, \"list_attr_celeba.txt\", data, header=header, add_num_examples=True)\n return header\n\n def _create_identity_txt(self, root, num_images):\n data = torch.randint(1, 4, size=(num_images, 1)).tolist()\n self._create_txt(root, \"identity_CelebA.txt\", data)\n\n def _create_bbox_txt(self, root, num_images):\n header = (\"x_1\", \"y_1\", \"width\", \"height\")\n data = torch.randint(10, size=(num_images, len(header))).tolist()\n self._create_txt(\n root, \"list_bbox_celeba.txt\", data, header=header, add_num_examples=True, add_image_id_to_header=True\n )\n\n def _create_landmarks_txt(self, root, num_images):\n header = (\"lefteye_x\", \"rightmouth_y\")\n data = torch.randint(10, size=(num_images, len(header))).tolist()\n self._create_txt(root, \"list_landmarks_align_celeba.txt\", data, header=header, add_num_examples=True)\n\n def _create_txt(self, root, name, data, header=None, add_num_examples=False, add_image_id_to_header=False):\n with open(pathlib.Path(root) / name, \"w\") as fh:\n if add_num_examples:\n fh.write(f\"{len(data)}\\n\")\n\n if header:\n if add_image_id_to_header:\n header = (\"image_id\", *header)\n fh.write(f\"{' '.join(header)}\\n\")\n\n for idx, line in enumerate(data, 1):\n fh.write(f\"{' '.join((f'{idx:06d}.jpg', *[str(value) for value in line]))}\\n\")\n\n def test_combined_targets(self):\n target_types = [\"attr\", \"identity\", \"bbox\", \"landmarks\"]\n\n individual_targets = []\n for target_type in target_types:\n with self.create_dataset(target_type=target_type) as (dataset, _):\n _, target = dataset[0]\n individual_targets.append(target)\n\n with self.create_dataset(target_type=target_types) as (dataset, _):\n _, combined_targets = dataset[0]\n\n actual = len(individual_targets)\n expected = len(combined_targets)\n self.assertEqual(\n actual,\n expected,\n f\"The number of the returned combined targets does not match the the number targets if requested \"\n f\"individually: {actual} != {expected}\",\n )\n\n for target_type, combined_target, individual_target in zip(target_types, combined_targets, individual_targets):\n with self.subTest(target_type=target_type):\n actual = type(combined_target)\n expected = type(individual_target)\n self.assertIs(\n actual,\n expected,\n f\"Type of the combined target does not match the type of the corresponding individual target: \"\n f\"{actual} is not {expected}\",\n )\n\n def test_no_target(self):\n with self.create_dataset(target_type=[]) as (dataset, _):\n _, target = dataset[0]\n\n self.assertIsNone(target)\n\n def test_attr_names(self):\n with self.create_dataset() as (dataset, info):\n self.assertEqual(tuple(dataset.attr_names), info[\"attr_names\"])\n\n\nclass VOCSegmentationTestCase(datasets_utils.ImageDatasetTestCase):\n DATASET_CLASS = datasets.VOCSegmentation\n FEATURE_TYPES = (PIL.Image.Image, PIL.Image.Image)\n\n ADDITIONAL_CONFIGS = (\n *datasets_utils.combinations_grid(\n year=[f\"20{year:02d}\" for year in range(7, 13)], image_set=(\"train\", \"val\", \"trainval\")\n ),\n dict(year=\"2007\", image_set=\"test\"),\n dict(year=\"2007-test\", image_set=\"test\"),\n )\n\n def inject_fake_data(self, tmpdir, config):\n year, is_test_set = (\n (\"2007\", True)\n if config[\"year\"] == \"2007-test\" or config[\"image_set\"] == \"test\"\n else (config[\"year\"], False)\n )\n image_set = config[\"image_set\"]\n\n base_dir = pathlib.Path(tmpdir)\n if year == \"2011\":\n base_dir /= \"TrainVal\"\n base_dir = base_dir / \"VOCdevkit\" / f\"VOC{year}\"\n os.makedirs(base_dir)\n\n num_images, num_images_per_image_set = self._create_image_set_files(base_dir, \"ImageSets\", is_test_set)\n datasets_utils.create_image_folder(base_dir, \"JPEGImages\", lambda idx: f\"{idx:06d}.jpg\", num_images)\n\n datasets_utils.create_image_folder(base_dir, \"SegmentationClass\", lambda idx: f\"{idx:06d}.png\", num_images)\n annotation = self._create_annotation_files(base_dir, \"Annotations\", num_images)\n\n return dict(num_examples=num_images_per_image_set[image_set], annotation=annotation)\n\n def _create_image_set_files(self, root, name, is_test_set):\n root = pathlib.Path(root) / name\n src = pathlib.Path(root) / \"Main\"\n os.makedirs(src, exist_ok=True)\n\n idcs = dict(train=(0, 1, 2), val=(3, 4), test=(5,))\n idcs[\"trainval\"] = (*idcs[\"train\"], *idcs[\"val\"])\n\n for image_set in (\"test\",) if is_test_set else (\"train\", \"val\", \"trainval\"):\n self._create_image_set_file(src, image_set, idcs[image_set])\n\n shutil.copytree(src, root / \"Segmentation\")\n\n num_images = max(itertools.chain(*idcs.values())) + 1\n num_images_per_image_set = dict([(image_set, len(idcs_)) for image_set, idcs_ in idcs.items()])\n return num_images, num_images_per_image_set\n\n def _create_image_set_file(self, root, image_set, idcs):\n with open(pathlib.Path(root) / f\"{image_set}.txt\", \"w\") as fh:\n fh.writelines([f\"{idx:06d}\\n\" for idx in idcs])\n\n def _create_annotation_files(self, root, name, num_images):\n root = pathlib.Path(root) / name\n os.makedirs(root)\n\n for idx in range(num_images):\n annotation = self._create_annotation_file(root, f\"{idx:06d}.xml\")\n\n return annotation\n\n def _create_annotation_file(self, root, name):\n def add_child(parent, name, text=None):\n child = ET.SubElement(parent, name)\n child.text = text\n return child\n\n def add_name(obj, name=\"dog\"):\n add_child(obj, \"name\", name)\n return name\n\n def add_bndbox(obj, bndbox=None):\n if bndbox is None:\n bndbox = {\"xmin\": \"1\", \"xmax\": \"2\", \"ymin\": \"3\", \"ymax\": \"4\"}\n\n obj = add_child(obj, \"bndbox\")\n for name, text in bndbox.items():\n add_child(obj, name, text)\n\n return bndbox\n\n annotation = ET.Element(\"annotation\")\n obj = add_child(annotation, \"object\")\n data = dict(name=add_name(obj), bndbox=add_bndbox(obj))\n\n with open(pathlib.Path(root) / name, \"wb\") as fh:\n fh.write(ET.tostring(annotation))\n\n return data\n\n\nclass VOCDetectionTestCase(VOCSegmentationTestCase):\n DATASET_CLASS = datasets.VOCDetection\n FEATURE_TYPES = (PIL.Image.Image, dict)\n\n def test_annotations(self):\n with self.create_dataset() as (dataset, info):\n _, target = dataset[0]\n\n self.assertIn(\"annotation\", target)\n annotation = target[\"annotation\"]\n\n self.assertIn(\"object\", annotation)\n objects = annotation[\"object\"]\n\n self.assertEqual(len(objects), 1)\n object = objects[0]\n\n self.assertEqual(object, info[\"annotation\"])\n\n\nclass CocoDetectionTestCase(datasets_utils.ImageDatasetTestCase):\n DATASET_CLASS = datasets.CocoDetection\n FEATURE_TYPES = (PIL.Image.Image, list)\n\n REQUIRED_PACKAGES = (\"pycocotools\",)\n\n _IMAGE_FOLDER = \"images\"\n _ANNOTATIONS_FOLDER = \"annotations\"\n _ANNOTATIONS_FILE = \"annotations.json\"\n\n def dataset_args(self, tmpdir, config):\n tmpdir = pathlib.Path(tmpdir)\n root = tmpdir / self._IMAGE_FOLDER\n annotation_file = tmpdir / self._ANNOTATIONS_FOLDER / self._ANNOTATIONS_FILE\n return root, annotation_file\n\n def inject_fake_data(self, tmpdir, config):\n tmpdir = pathlib.Path(tmpdir)\n\n num_images = 3\n num_annotations_per_image = 2\n\n files = datasets_utils.create_image_folder(\n tmpdir, name=self._IMAGE_FOLDER, file_name_fn=lambda idx: f\"{idx:012d}.jpg\", num_examples=num_images\n )\n file_names = [file.relative_to(tmpdir / self._IMAGE_FOLDER) for file in files]\n\n annotation_folder = tmpdir / self._ANNOTATIONS_FOLDER\n os.makedirs(annotation_folder)\n info = self._create_annotation_file(\n annotation_folder, self._ANNOTATIONS_FILE, file_names, num_annotations_per_image\n )\n\n info[\"num_examples\"] = num_images\n return info\n\n def _create_annotation_file(self, root, name, file_names, num_annotations_per_image):\n image_ids = [int(file_name.stem) for file_name in file_names]\n images = [dict(file_name=str(file_name), id=id) for file_name, id in zip(file_names, image_ids)]\n\n annotations, info = self._create_annotations(image_ids, num_annotations_per_image)\n self._create_json(root, name, dict(images=images, annotations=annotations))\n\n return info\n\n def _create_annotations(self, image_ids, num_annotations_per_image):\n annotations = datasets_utils.combinations_grid(\n image_id=image_ids, bbox=([1.0, 2.0, 3.0, 4.0],) * num_annotations_per_image\n )\n for id, annotation in enumerate(annotations):\n annotation[\"id\"] = id\n return annotations, dict()\n\n def _create_json(self, root, name, content):\n file = pathlib.Path(root) / name\n with open(file, \"w\") as fh:\n json.dump(content, fh)\n return file\n\n\nclass CocoCaptionsTestCase(CocoDetectionTestCase):\n DATASET_CLASS = datasets.CocoCaptions\n\n def _create_annotations(self, image_ids, num_annotations_per_image):\n captions = [str(idx) for idx in range(num_annotations_per_image)]\n annotations = datasets_utils.combinations_grid(image_id=image_ids, caption=captions)\n for id, annotation in enumerate(annotations):\n annotation[\"id\"] = id\n return annotations, dict(captions=captions)\n\n def test_captions(self):\n with self.create_dataset() as (dataset, info):\n _, captions = dataset[0]\n self.assertEqual(tuple(captions), tuple(info[\"captions\"]))\n\n\nclass UCF101TestCase(datasets_utils.VideoDatasetTestCase):\n DATASET_CLASS = datasets.UCF101\n\n ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(fold=(1, 2, 3), train=(True, False))\n\n _VIDEO_FOLDER = \"videos\"\n _ANNOTATIONS_FOLDER = \"annotations\"\n\n def dataset_args(self, tmpdir, config):\n tmpdir = pathlib.Path(tmpdir)\n root = tmpdir / self._VIDEO_FOLDER\n annotation_path = tmpdir / self._ANNOTATIONS_FOLDER\n return root, annotation_path\n\n def inject_fake_data(self, tmpdir, config):\n tmpdir = pathlib.Path(tmpdir)\n\n video_folder = tmpdir / self._VIDEO_FOLDER\n os.makedirs(video_folder)\n video_files = self._create_videos(video_folder)\n\n annotations_folder = tmpdir / self._ANNOTATIONS_FOLDER\n os.makedirs(annotations_folder)\n num_examples = self._create_annotation_files(annotations_folder, video_files, config[\"fold\"], config[\"train\"])\n\n return num_examples\n\n def _create_videos(self, root, num_examples_per_class=3):\n def file_name_fn(cls, idx, clips_per_group=2):\n return f\"v_{cls}_g{(idx // clips_per_group) + 1:02d}_c{(idx % clips_per_group) + 1:02d}.avi\"\n\n video_files = [\n datasets_utils.create_video_folder(root, cls, lambda idx: file_name_fn(cls, idx), num_examples_per_class)\n for cls in (\"ApplyEyeMakeup\", \"YoYo\")\n ]\n return [path.relative_to(root) for path in itertools.chain(*video_files)]\n\n def _create_annotation_files(self, root, video_files, fold, train):\n current_videos = random.sample(video_files, random.randrange(1, len(video_files) - 1))\n current_annotation = self._annotation_file_name(fold, train)\n self._create_annotation_file(root, current_annotation, current_videos)\n\n other_videos = set(video_files) - set(current_videos)\n other_annotations = [\n self._annotation_file_name(fold, train) for fold, train in itertools.product((1, 2, 3), (True, False))\n ]\n other_annotations.remove(current_annotation)\n for name in other_annotations:\n self._create_annotation_file(root, name, other_videos)\n\n return len(current_videos)\n\n def _annotation_file_name(self, fold, train):\n return f\"{'train' if train else 'test'}list{fold:02d}.txt\"\n\n def _create_annotation_file(self, root, name, video_files):\n with open(pathlib.Path(root) / name, \"w\") as fh:\n fh.writelines(f\"{file}\\n\" for file in sorted(video_files))\n\n\nclass LSUNTestCase(datasets_utils.ImageDatasetTestCase):\n DATASET_CLASS = datasets.LSUN\n\n REQUIRED_PACKAGES = (\"lmdb\",)\n ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(\n classes=(\"train\", \"test\", \"val\", [\"bedroom_train\", \"church_outdoor_train\"])\n )\n\n _CATEGORIES = (\n \"bedroom\",\n \"bridge\",\n \"church_outdoor\",\n \"classroom\",\n \"conference_room\",\n \"dining_room\",\n \"kitchen\",\n \"living_room\",\n \"restaurant\",\n \"tower\",\n )\n\n def inject_fake_data(self, tmpdir, config):\n root = pathlib.Path(tmpdir)\n\n num_images = 0\n for cls in self._parse_classes(config[\"classes\"]):\n num_images += self._create_lmdb(root, cls)\n\n return num_images\n\n @contextlib.contextmanager\n def create_dataset(\n self,\n *args, **kwargs\n ):\n with super().create_dataset(*args, **kwargs) as output:\n yield output\n # Currently datasets.LSUN caches the keys in the current directory rather than in the root directory. Thus,\n # this creates a number of unique _cache_* files in the current directory that will not be removed together\n # with the temporary directory\n for file in os.listdir(os.getcwd()):\n if file.startswith(\"_cache_\"):\n os.remove(file)\n\n def _parse_classes(self, classes):\n if not isinstance(classes, str):\n return classes\n\n split = classes\n if split == \"test\":\n return [split]\n\n return [f\"{category}_{split}\" for category in self._CATEGORIES]\n\n def _create_lmdb(self, root, cls):\n lmdb = datasets_utils.lazy_importer.lmdb\n hexdigits_lowercase = string.digits + string.ascii_lowercase[:6]\n\n folder = f\"{cls}_lmdb\"\n\n num_images = torch.randint(1, 4, size=()).item()\n format = \"png\"\n files = datasets_utils.create_image_folder(root, folder, lambda idx: f\"{idx}.{format}\", num_images)\n\n with lmdb.open(str(root / folder)) as env, env.begin(write=True) as txn:\n for file in files:\n key = \"\".join(random.choice(hexdigits_lowercase) for _ in range(40)).encode()\n\n buffer = io.BytesIO()\n Image.open(file).save(buffer, format)\n buffer.seek(0)\n value = buffer.read()\n\n txn.put(key, value)\n\n os.remove(file)\n\n return num_images\n\n def test_not_found_or_corrupted(self):\n # LSUN does not raise built-in exception, but a custom one. It is expressive enough to not 'cast' it to\n # RuntimeError or FileNotFoundError that are normally checked by this test.\n with self.assertRaises(datasets_utils.lazy_importer.lmdb.Error):\n super().test_not_found_or_corrupted()\n\n\nclass Kinetics400TestCase(datasets_utils.VideoDatasetTestCase):\n DATASET_CLASS = datasets.Kinetics400\n\n def inject_fake_data(self, tmpdir, config):\n classes = (\"Abseiling\", \"Zumba\")\n num_videos_per_class = 2\n\n digits = string.ascii_letters + string.digits + \"-_\"\n for cls in classes:\n datasets_utils.create_video_folder(\n tmpdir,\n cls,\n lambda _: f\"{datasets_utils.create_random_string(11, digits)}.avi\",\n num_videos_per_class,\n )\n\n return num_videos_per_class * len(classes)\n\n\nclass HMDB51TestCase(datasets_utils.VideoDatasetTestCase):\n DATASET_CLASS = datasets.HMDB51\n\n ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(fold=(1, 2, 3), train=(True, False))\n\n _VIDEO_FOLDER = \"videos\"\n _SPLITS_FOLDER = \"splits\"\n _CLASSES = (\"brush_hair\", \"wave\")\n\n def dataset_args(self, tmpdir, config):\n tmpdir = pathlib.Path(tmpdir)\n root = tmpdir / self._VIDEO_FOLDER\n annotation_path = tmpdir / self._SPLITS_FOLDER\n return root, annotation_path\n\n def inject_fake_data(self, tmpdir, config):\n tmpdir = pathlib.Path(tmpdir)\n\n video_folder = tmpdir / self._VIDEO_FOLDER\n os.makedirs(video_folder)\n video_files = self._create_videos(video_folder)\n\n splits_folder = tmpdir / self._SPLITS_FOLDER\n os.makedirs(splits_folder)\n num_examples = self._create_split_files(splits_folder, video_files, config[\"fold\"], config[\"train\"])\n\n return num_examples\n\n def _create_videos(self, root, num_examples_per_class=3):\n def file_name_fn(cls, idx, clips_per_group=2):\n return f\"{cls}_{(idx // clips_per_group) + 1:d}_{(idx % clips_per_group) + 1:d}.avi\"\n\n return [\n (\n cls,\n datasets_utils.create_video_folder(\n root,\n cls,\n lambda idx: file_name_fn(cls, idx),\n num_examples_per_class,\n ),\n )\n for cls in self._CLASSES\n ]\n\n def _create_split_files(self, root, video_files, fold, train):\n num_videos = num_train_videos = 0\n\n for cls, videos in video_files:\n num_videos += len(videos)\n\n train_videos = set(random.sample(videos, random.randrange(1, len(videos) - 1)))\n num_train_videos += len(train_videos)\n\n with open(pathlib.Path(root) / f\"{cls}_test_split{fold}.txt\", \"w\") as fh:\n fh.writelines(f\"{file.name} {1 if file in train_videos else 2}\\n\" for file in videos)\n\n return num_train_videos if train else (num_videos - num_train_videos)\n\n\nclass OmniglotTestCase(datasets_utils.ImageDatasetTestCase):\n DATASET_CLASS = datasets.Omniglot\n\n ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(background=(True, False))\n\n def inject_fake_data(self, tmpdir, config):\n target_folder = (\n pathlib.Path(tmpdir) / \"omniglot-py\" / f\"images_{'background' if config['background'] else 'evaluation'}\"\n )\n os.makedirs(target_folder)\n\n num_images = 0\n for name in (\"Alphabet_of_the_Magi\", \"Tifinagh\"):\n num_images += self._create_alphabet_folder(target_folder, name)\n\n return num_images\n\n def _create_alphabet_folder(self, root, name):\n num_images_total = 0\n for idx in range(torch.randint(1, 4, size=()).item()):\n num_images = torch.randint(1, 4, size=()).item()\n num_images_total += num_images\n\n datasets_utils.create_image_folder(\n root / name, f\"character{idx:02d}\", lambda image_idx: f\"{image_idx:02d}.png\", num_images\n )\n\n return num_images_total\n\n\nclass SBUTestCase(datasets_utils.ImageDatasetTestCase):\n DATASET_CLASS = datasets.SBU\n FEATURE_TYPES = (PIL.Image.Image, str)\n\n def inject_fake_data(self, tmpdir, config):\n num_images = 3\n\n dataset_folder = pathlib.Path(tmpdir) / \"dataset\"\n images = datasets_utils.create_image_folder(tmpdir, \"dataset\", self._create_file_name, num_images)\n\n self._create_urls_txt(dataset_folder, images)\n self._create_captions_txt(dataset_folder, num_images)\n\n return num_images\n\n def _create_file_name(self, idx):\n part1 = datasets_utils.create_random_string(10, string.digits)\n part2 = datasets_utils.create_random_string(10, string.ascii_lowercase, string.digits[:6])\n return f\"{part1}_{part2}.jpg\"\n\n def _create_urls_txt(self, root, images):\n with open(root / \"SBU_captioned_photo_dataset_urls.txt\", \"w\") as fh:\n for image in images:\n fh.write(\n f\"http://static.flickr.com/{datasets_utils.create_random_string(4, string.digits)}/{image.name}\\n\"\n )\n\n def _create_captions_txt(self, root, num_images):\n with open(root / \"SBU_captioned_photo_dataset_captions.txt\", \"w\") as fh:\n for _ in range(num_images):\n fh.write(f\"{datasets_utils.create_random_string(10)}\\n\")\n\n\nclass SEMEIONTestCase(datasets_utils.ImageDatasetTestCase):\n DATASET_CLASS = datasets.SEMEION\n\n def inject_fake_data(self, tmpdir, config):\n num_images = 3\n\n images = torch.rand(num_images, 256)\n labels = F.one_hot(torch.randint(10, size=(num_images,)))\n with open(pathlib.Path(tmpdir) / \"semeion.data\", \"w\") as fh:\n for image, one_hot_labels in zip(images, labels):\n image_columns = \" \".join([f\"{pixel.item():.4f}\" for pixel in image])\n labels_columns = \" \".join([str(label.item()) for label in one_hot_labels])\n fh.write(f\"{image_columns} {labels_columns}\\n\")\n\n return num_images\n\n\nclass USPSTestCase(datasets_utils.ImageDatasetTestCase):\n DATASET_CLASS = datasets.USPS\n\n ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(train=(True, False))\n\n def inject_fake_data(self, tmpdir, config):\n num_images = 2 if config[\"train\"] else 1\n\n images = torch.rand(num_images, 256) * 2 - 1\n labels = torch.randint(1, 11, size=(num_images,))\n\n with bz2.open(pathlib.Path(tmpdir) / f\"usps{'.t' if not config['train'] else ''}.bz2\", \"w\") as fh:\n for image, label in zip(images, labels):\n line = \" \".join((str(label.item()), *[f\"{idx}:{pixel:.6f}\" for idx, pixel in enumerate(image, 1)]))\n fh.write(f\"{line}\\n\".encode())\n\n return num_images\n\n\nclass SBDatasetTestCase(datasets_utils.ImageDatasetTestCase):\n DATASET_CLASS = datasets.SBDataset\n FEATURE_TYPES = (PIL.Image.Image, (np.ndarray, PIL.Image.Image))\n\n REQUIRED_PACKAGES = (\"scipy.io\", \"scipy.sparse\")\n\n ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(\n image_set=(\"train\", \"val\", \"train_noval\"), mode=(\"boundaries\", \"segmentation\")\n )\n\n _NUM_CLASSES = 20\n\n def inject_fake_data(self, tmpdir, config):\n num_images, num_images_per_image_set = self._create_split_files(tmpdir)\n\n sizes = self._create_target_folder(tmpdir, \"cls\", num_images)\n\n datasets_utils.create_image_folder(\n tmpdir, \"img\", lambda idx: f\"{self._file_stem(idx)}.jpg\", num_images, size=lambda idx: sizes[idx]\n )\n\n return num_images_per_image_set[config[\"image_set\"]]\n\n def _create_split_files(self, root):\n root = pathlib.Path(root)\n\n splits = dict(train=(0, 1, 2), train_noval=(0, 2), val=(3,))\n\n for split, idcs in splits.items():\n self._create_split_file(root, split, idcs)\n\n num_images = max(itertools.chain(*splits.values())) + 1\n num_images_per_split = dict([(split, len(idcs)) for split, idcs in splits.items()])\n return num_images, num_images_per_split\n\n def _create_split_file(self, root, name, idcs):\n with open(root / f\"{name}.txt\", \"w\") as fh:\n fh.writelines(f\"{self._file_stem(idx)}\\n\" for idx in idcs)\n\n def _create_target_folder(self, root, name, num_images):\n io = datasets_utils.lazy_importer.scipy.io\n\n target_folder = pathlib.Path(root) / name\n os.makedirs(target_folder)\n\n sizes = [torch.randint(1, 4, size=(2,)).tolist() for _ in range(num_images)]\n for idx, size in enumerate(sizes):\n content = dict(\n GTcls=dict(Boundaries=self._create_boundaries(size), Segmentation=self._create_segmentation(size))\n )\n io.savemat(target_folder / f\"{self._file_stem(idx)}.mat\", content)\n\n return sizes\n\n def _create_boundaries(self, size):\n sparse = datasets_utils.lazy_importer.scipy.sparse\n return [\n [sparse.csc_matrix(torch.randint(0, 2, size=size, dtype=torch.uint8).numpy())]\n for _ in range(self._NUM_CLASSES)\n ]\n\n def _create_segmentation(self, size):\n return torch.randint(0, self._NUM_CLASSES + 1, size=size, dtype=torch.uint8).numpy()\n\n def _file_stem(self, idx):\n return f\"2008_{idx:06d}\"\n\n\nclass FakeDataTestCase(datasets_utils.ImageDatasetTestCase):\n DATASET_CLASS = datasets.FakeData\n FEATURE_TYPES = (PIL.Image.Image, int)\n\n def dataset_args(self, tmpdir, config):\n return ()\n\n def inject_fake_data(self, tmpdir, config):\n return config[\"size\"]\n\n def test_not_found_or_corrupted(self):\n self.skipTest(\"The data is generated at creation and thus cannot be non-existent or corrupted.\")\n\n\nclass PhotoTourTestCase(datasets_utils.ImageDatasetTestCase):\n DATASET_CLASS = datasets.PhotoTour\n\n # The PhotoTour dataset returns examples with different features with respect to the 'train' parameter. Thus,\n # we overwrite 'FEATURE_TYPES' with a dummy value to satisfy the initial checks of the base class. Furthermore, we\n # overwrite the 'test_feature_types()' method to select the correct feature types before the test is run.\n FEATURE_TYPES = ()\n _TRAIN_FEATURE_TYPES = (torch.Tensor,)\n _TEST_FEATURE_TYPES = (torch.Tensor, torch.Tensor, torch.Tensor)\n\n datasets_utils.combinations_grid(train=(True, False))\n\n _NAME = \"liberty\"\n\n def dataset_args(self, tmpdir, config):\n return tmpdir, self._NAME\n\n def inject_fake_data(self, tmpdir, config):\n tmpdir = pathlib.Path(tmpdir)\n\n # In contrast to the original data, the fake images injected here comprise only a single patch. Thus,\n # num_images == num_patches.\n num_patches = 5\n\n image_files = self._create_images(tmpdir, self._NAME, num_patches)\n point_ids, info_file = self._create_info_file(tmpdir / self._NAME, num_patches)\n num_matches, matches_file = self._create_matches_file(tmpdir / self._NAME, num_patches, point_ids)\n\n self._create_archive(tmpdir, self._NAME, *image_files, info_file, matches_file)\n\n return num_patches if config[\"train\"] else num_matches\n\n def _create_images(self, root, name, num_images):\n # The images in the PhotoTour dataset comprises of multiple grayscale patches of 64 x 64 pixels. Thus, the\n # smallest fake image is 64 x 64 pixels and comprises a single patch.\n return datasets_utils.create_image_folder(\n root, name, lambda idx: f\"patches{idx:04d}.bmp\", num_images, size=(1, 64, 64)\n )\n\n def _create_info_file(self, root, num_images):\n point_ids = torch.randint(num_images, size=(num_images,)).tolist()\n\n file = root / \"info.txt\"\n with open(file, \"w\") as fh:\n fh.writelines([f\"{point_id} 0\\n\" for point_id in point_ids])\n\n return point_ids, file\n\n def _create_matches_file(self, root, num_patches, point_ids):\n lines = [\n f\"{patch_id1} {point_ids[patch_id1]} 0 {patch_id2} {point_ids[patch_id2]} 0\\n\"\n for patch_id1, patch_id2 in itertools.combinations(range(num_patches), 2)\n ]\n\n file = root / \"m50_100000_100000_0.txt\"\n with open(file, \"w\") as fh:\n fh.writelines(lines)\n\n return len(lines), file\n\n def _create_archive(self, root, name, *files):\n archive = root / f\"{name}.zip\"\n with zipfile.ZipFile(archive, \"w\") as zip:\n for file in files:\n zip.write(file, arcname=file.relative_to(root))\n\n return archive\n\n @datasets_utils.test_all_configs\n def test_feature_types(self, config):\n feature_types = self.FEATURE_TYPES\n self.FEATURE_TYPES = self._TRAIN_FEATURE_TYPES if config[\"train\"] else self._TEST_FEATURE_TYPES\n try:\n super().test_feature_types.__wrapped__(self, config)\n finally:\n self.FEATURE_TYPES = feature_types\n\n\nclass Flickr8kTestCase(datasets_utils.ImageDatasetTestCase):\n DATASET_CLASS = datasets.Flickr8k\n\n FEATURE_TYPES = (PIL.Image.Image, list)\n\n _IMAGES_FOLDER = \"images\"\n _ANNOTATIONS_FILE = \"captions.html\"\n\n def dataset_args(self, tmpdir, config):\n tmpdir = pathlib.Path(tmpdir)\n root = tmpdir / self._IMAGES_FOLDER\n ann_file = tmpdir / self._ANNOTATIONS_FILE\n return str(root), str(ann_file)\n\n def inject_fake_data(self, tmpdir, config):\n num_images = 3\n num_captions_per_image = 3\n\n tmpdir = pathlib.Path(tmpdir)\n\n images = self._create_images(tmpdir, self._IMAGES_FOLDER, num_images)\n self._create_annotations_file(tmpdir, self._ANNOTATIONS_FILE, images, num_captions_per_image)\n\n return dict(num_examples=num_images, captions=self._create_captions(num_captions_per_image))\n\n def _create_images(self, root, name, num_images):\n return datasets_utils.create_image_folder(root, name, self._image_file_name, num_images)\n\n def _image_file_name(self, idx):\n id = datasets_utils.create_random_string(10, string.digits)\n checksum = datasets_utils.create_random_string(10, string.digits, string.ascii_lowercase[:6])\n size = datasets_utils.create_random_string(1, \"qwcko\")\n return f\"{id}_{checksum}_{size}.jpg\"\n\n def _create_annotations_file(self, root, name, images, num_captions_per_image):\n with open(root / name, \"w\") as fh:\n fh.write(\"<table>\")\n for image in (None, *images):\n self._add_image(fh, image, num_captions_per_image)\n fh.write(\"</table>\")\n\n def _add_image(self, fh, image, num_captions_per_image):\n fh.write(\"<tr>\")\n self._add_image_header(fh, image)\n fh.write(\"</tr><tr><td><ul>\")\n self._add_image_captions(fh, num_captions_per_image)\n fh.write(\"</ul></td></tr>\")\n\n def _add_image_header(self, fh, image=None):\n if image:\n url = f\"http://www.flickr.com/photos/user/{image.name.split('_')[0]}/\"\n data = f'<a href=\"{url}\">{url}</a>'\n else:\n data = \"Image Not Found\"\n fh.write(f\"<td>{data}</td>\")\n\n def _add_image_captions(self, fh, num_captions_per_image):\n for caption in self._create_captions(num_captions_per_image):\n fh.write(f\"<li>{caption}\")\n\n def _create_captions(self, num_captions_per_image):\n return [str(idx) for idx in range(num_captions_per_image)]\n\n def test_captions(self):\n with self.create_dataset() as (dataset, info):\n _, captions = dataset[0]\n self.assertSequenceEqual(captions, info[\"captions\"])\n\n\nclass Flickr30kTestCase(Flickr8kTestCase):\n DATASET_CLASS = datasets.Flickr30k\n\n FEATURE_TYPES = (PIL.Image.Image, list)\n\n _ANNOTATIONS_FILE = \"captions.token\"\n\n def _image_file_name(self, idx):\n return f\"{idx}.jpg\"\n\n def _create_annotations_file(self, root, name, images, num_captions_per_image):\n with open(root / name, \"w\") as fh:\n for image, (idx, caption) in itertools.product(\n images, enumerate(self._create_captions(num_captions_per_image))\n ):\n fh.write(f\"{image.name}#{idx}\\t{caption}\\n\")\n\n\nclass MNISTTestCase(datasets_utils.ImageDatasetTestCase):\n DATASET_CLASS = datasets.MNIST\n\n ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(train=(True, False))\n\n _MAGIC_DTYPES = {\n torch.uint8: 8,\n torch.int8: 9,\n torch.int16: 11,\n torch.int32: 12,\n torch.float32: 13,\n torch.float64: 14,\n }\n\n _IMAGES_SIZE = (28, 28)\n _IMAGES_DTYPE = torch.uint8\n\n _LABELS_SIZE = ()\n _LABELS_DTYPE = torch.uint8\n\n def inject_fake_data(self, tmpdir, config):\n raw_dir = pathlib.Path(tmpdir) / self.DATASET_CLASS.__name__ / \"raw\"\n os.makedirs(raw_dir, exist_ok=True)\n\n num_images = self._num_images(config)\n self._create_binary_file(\n raw_dir, self._images_file(config), (num_images, *self._IMAGES_SIZE), self._IMAGES_DTYPE\n )\n self._create_binary_file(\n raw_dir, self._labels_file(config), (num_images, *self._LABELS_SIZE), self._LABELS_DTYPE\n )\n return num_images\n\n def _num_images(self, config):\n return 2 if config[\"train\"] else 1\n\n def _images_file(self, config):\n return f\"{self._prefix(config)}-images-idx3-ubyte\"\n\n def _labels_file(self, config):\n return f\"{self._prefix(config)}-labels-idx1-ubyte\"\n\n def _prefix(self, config):\n return \"train\" if config[\"train\"] else \"t10k\"\n\n def _create_binary_file(self, root, filename, size, dtype):\n with open(pathlib.Path(root) / filename, \"wb\") as fh:\n for meta in (self._magic(dtype, len(size)), *size):\n fh.write(self._encode(meta))\n\n # If ever an MNIST variant is added that uses floating point data, this should be adapted.\n data = torch.randint(0, torch.iinfo(dtype).max + 1, size, dtype=dtype)\n fh.write(data.numpy().tobytes())\n\n def _magic(self, dtype, dims):\n return self._MAGIC_DTYPES[dtype] * 256 + dims\n\n def _encode(self, v):\n return torch.tensor(v, dtype=torch.int32).numpy().tobytes()[::-1]\n\n\nclass FashionMNISTTestCase(MNISTTestCase):\n DATASET_CLASS = datasets.FashionMNIST\n\n\nclass KMNISTTestCase(MNISTTestCase):\n DATASET_CLASS = datasets.KMNIST\n\n\nclass EMNISTTestCase(MNISTTestCase):\n DATASET_CLASS = datasets.EMNIST\n\n DEFAULT_CONFIG = dict(split=\"byclass\")\n ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(\n split=(\"byclass\", \"bymerge\", \"balanced\", \"letters\", \"digits\", \"mnist\"), train=(True, False)\n )\n\n def _prefix(self, config):\n return f\"emnist-{config['split']}-{'train' if config['train'] else 'test'}\"\n\n\nclass QMNISTTestCase(MNISTTestCase):\n DATASET_CLASS = datasets.QMNIST\n\n ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(what=(\"train\", \"test\", \"test10k\", \"nist\"))\n\n _LABELS_SIZE = (8,)\n _LABELS_DTYPE = torch.int32\n\n def _num_images(self, config):\n if config[\"what\"] == \"nist\":\n return 3\n elif config[\"what\"] == \"train\":\n return 2\n elif config[\"what\"] == \"test50k\":\n # The split 'test50k' is defined as the last 50k images beginning at index 10000. Thus, we need to create\n # more than 10000 images for the dataset to not be empty. Since this takes significantly longer than the\n # creation of all other splits, this is excluded from the 'ADDITIONAL_CONFIGS' and is tested only once in\n # 'test_num_examples_test50k'.\n return 10001\n else:\n return 1\n\n def _labels_file(self, config):\n return f\"{self._prefix(config)}-labels-idx2-int\"\n\n def _prefix(self, config):\n if config[\"what\"] == \"nist\":\n return \"xnist\"\n\n if config[\"what\"] is None:\n what = \"train\" if config[\"train\"] else \"test\"\n elif config[\"what\"].startswith(\"test\"):\n what = \"test\"\n else:\n what = config[\"what\"]\n\n return f\"qmnist-{what}\"\n\n def test_num_examples_test50k(self):\n with self.create_dataset(what=\"test50k\") as (dataset, info):\n # Since the split 'test50k' selects all images beginning from the index 10000, we subtract the number of\n # created examples by this.\n self.assertEqual(len(dataset), info[\"num_examples\"] - 10000)\n\n\nclass DatasetFolderTestCase(datasets_utils.ImageDatasetTestCase):\n DATASET_CLASS = datasets.DatasetFolder\n\n # The dataset has no fixed return type since it is defined by the loader parameter. For testing, we use a loader\n # that simply returns the path as type 'str' instead of loading anything. See the 'dataset_args()' method.\n FEATURE_TYPES = (str, int)\n\n _IMAGE_EXTENSIONS = (\"jpg\", \"png\")\n _VIDEO_EXTENSIONS = (\"avi\", \"mp4\")\n _EXTENSIONS = (*_IMAGE_EXTENSIONS, *_VIDEO_EXTENSIONS)\n\n # DatasetFolder has two mutually exclusive parameters: 'extensions' and 'is_valid_file'. One of both is required.\n # We only iterate over different 'extensions' here and handle the tests for 'is_valid_file' in the\n # 'test_is_valid_file()' method.\n DEFAULT_CONFIG = dict(extensions=_EXTENSIONS)\n ADDITIONAL_CONFIGS = (\n *datasets_utils.combinations_grid(extensions=[(ext,) for ext in _IMAGE_EXTENSIONS]),\n dict(extensions=_IMAGE_EXTENSIONS),\n *datasets_utils.combinations_grid(extensions=[(ext,) for ext in _VIDEO_EXTENSIONS]),\n dict(extensions=_VIDEO_EXTENSIONS),\n )\n\n def dataset_args(self, tmpdir, config):\n return tmpdir, lambda x: x\n\n def inject_fake_data(self, tmpdir, config):\n extensions = config[\"extensions\"] or self._is_valid_file_to_extensions(config[\"is_valid_file\"])\n\n num_examples_total = 0\n classes = []\n for ext, cls in zip(self._EXTENSIONS, string.ascii_letters):\n if ext not in extensions:\n continue\n\n create_example_folder = (\n datasets_utils.create_image_folder\n if ext in self._IMAGE_EXTENSIONS\n else datasets_utils.create_video_folder\n )\n\n num_examples = torch.randint(1, 3, size=()).item()\n create_example_folder(tmpdir, cls, lambda idx: self._file_name_fn(cls, ext, idx), num_examples)\n\n num_examples_total += num_examples\n classes.append(cls)\n\n return dict(num_examples=num_examples_total, classes=classes)\n\n def _file_name_fn(self, cls, ext, idx):\n return f\"{cls}_{idx}.{ext}\"\n\n def _is_valid_file_to_extensions(self, is_valid_file):\n return {ext for ext in self._EXTENSIONS if is_valid_file(f\"foo.{ext}\")}\n\n @datasets_utils.test_all_configs\n def test_is_valid_file(self, config):\n extensions = config.pop(\"extensions\")\n # We need to explicitly pass extensions=None here or otherwise it would be filled by the value from the\n # DEFAULT_CONFIG.\n with self.create_dataset(\n config, extensions=None, is_valid_file=lambda file: pathlib.Path(file).suffix[1:] in extensions\n ) as (dataset, info):\n self.assertEqual(len(dataset), info[\"num_examples\"])\n\n @datasets_utils.test_all_configs\n def test_classes(self, config):\n with self.create_dataset(config) as (dataset, info):\n self.assertSequenceEqual(dataset.classes, info[\"classes\"])\n\n\nclass ImageFolderTestCase(datasets_utils.ImageDatasetTestCase):\n DATASET_CLASS = datasets.ImageFolder\n\n def inject_fake_data(self, tmpdir, config):\n num_examples_total = 0\n classes = (\"a\", \"b\")\n for cls in classes:\n num_examples = torch.randint(1, 3, size=()).item()\n num_examples_total += num_examples\n\n datasets_utils.create_image_folder(tmpdir, cls, lambda idx: f\"{cls}_{idx}.png\", num_examples)\n\n return dict(num_examples=num_examples_total, classes=classes)\n\n @datasets_utils.test_all_configs\n def test_classes(self, config):\n with self.create_dataset(config) as (dataset, info):\n self.assertSequenceEqual(dataset.classes, info[\"classes\"])\n\n\nclass KittiTestCase(datasets_utils.ImageDatasetTestCase):\n DATASET_CLASS = datasets.Kitti\n FEATURE_TYPES = (PIL.Image.Image, (list, type(None))) # test split returns None as target\n ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(train=(True, False))\n\n def inject_fake_data(self, tmpdir, config):\n kitti_dir = os.path.join(tmpdir, \"Kitti\", \"raw\")\n os.makedirs(kitti_dir)\n\n split_to_num_examples = {\n True: 1,\n False: 2,\n }\n\n # We need to create all folders(training and testing).\n for is_training in (True, False):\n num_examples = split_to_num_examples[is_training]\n\n datasets_utils.create_image_folder(\n root=kitti_dir,\n name=os.path.join(\"training\" if is_training else \"testing\", \"image_2\"),\n file_name_fn=lambda image_idx: f\"{image_idx:06d}.png\",\n num_examples=num_examples,\n )\n if is_training:\n for image_idx in range(num_examples):\n target_file_dir = os.path.join(kitti_dir, \"training\", \"label_2\")\n os.makedirs(target_file_dir)\n target_file_name = os.path.join(target_file_dir, f\"{image_idx:06d}.txt\")\n target_contents = \"Pedestrian 0.00 0 -0.20 712.40 143.00 810.73 307.92 1.89 0.48 1.20 1.84 1.47 8.41 0.01\\n\" # noqa\n with open(target_file_name, \"w\") as target_file:\n target_file.write(target_contents)\n\n return split_to_num_examples[config[\"train\"]]\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] |
[
[
"torch.randint",
"torch.iinfo",
"torch.tensor",
"torch.save",
"torch.rand",
"numpy.array",
"numpy.random.randint"
]
] |
araistrick/camera_pan_renderer
|
[
"900c6c064ac7d2b460087a16be49204276679e04"
] |
[
"render.py"
] |
[
"import os\nimport argparse\nfrom pathlib import Path\n\nimport bpy\nimport numpy as np\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\ndef use_cuda():\n bpy.context.preferences.addons[\"cycles\"].preferences.compute_device_type = \"CUDA\"\n print(bpy.context.preferences.addons[\"cycles\"].preferences.get_devices())\n bpy.context.preferences.addons[\"cycles\"].preferences.devices[0].use = True\n bpy.context.scene.cycles.device = \"GPU\"\n bpy.context.scene.render.tile_x = 128\n bpy.context.scene.render.tile_x = 128\n print('Using GPU device:', bpy.context.preferences.addons[\"cycles\"].preferences.devices[0])\n\ndef select_none():\n for obj in bpy.data.objects:\n obj.select_set(False)\n\ndef render_ply(args, ply_path):\n\n ply_name = ply_path.parts[-1]\n ply_id = '_'.join(list(ply_name.split('_'))[1:])\n\n # import the requisite ply\n select_none()\n print(f\"Importing {ply_path}\")\n bpy.ops.import_mesh.ply(filepath=str(ply_path))\n imported_ply = bpy.context.selected_objects[0]\n\n # rotate it correctly\n imported_ply.rotation_euler = np.radians(np.array(args.override_ply_euler))\n\n # make it colored according to vertex colors\n material = next(m for m in bpy.data.materials if m.name == args.template_material_name)\n if imported_ply.data.materials:\n imported_ply.data.materials[0] = material\n else:\n imported_ply.data.materials.append(material)\n\n # configure render output location\n outpath = Path(args.output_folder)/ply_id\n outpath.mkdir(exist_ok=True, parents=True)\n bpy.context.scene.render.filepath = str(outpath) + '/'\n\n bpy.ops.render.render(animation=True, write_still=True)\n\n # clean up\n select_none()\n imported_ply.select_set(True)\n bpy.ops.object.delete()\n\ndef main():\n\n parser = argparse.ArgumentParser()\n parser.add_argument('input_folder', type=str)\n parser.add_argument('output_folder', type=str)\n parser.add_argument('--template_file', type=str, default='template.blend')\n parser.add_argument('--override_ply_euler', type=int, nargs='+', default=[90, 0, 0])\n parser.add_argument('--template_material_name', type=str, default='vertex color')\n parser.add_argument('--cuda', action='store_true')\n args = parser.parse_args()\n\n bpy.ops.wm.open_mainfile(filepath=args.template_file)\n\n if args.cuda:\n use_cuda()\n\n input_paths = list(Path(args.input_folder).glob('*.ply'))\n print(f\"Starting processing of {len(input_paths)} .plys from {args.input_folder}\")\n for ply_path in input_paths:\n render_ply(args, ply_path)\n\nif __name__ == '__main__':\n main()"
] |
[
[
"numpy.array"
]
] |
siyuan-song/Container
|
[
"42313132af32f2edf710643b9ceb8ca84693ba5c"
] |
[
"samples/demo.py"
] |
[
"\n# coding: utf-8\n\n# # Mask R-CNN Demo\n# \n# A quick intro to using the pre-trained model to detect and segment objects.\n\n# In[1]:\n\n\nimport os\nimport sys\nimport random\nimport math\nimport numpy as np\nimport skimage.io\nimport matplotlib\nimport matplotlib.pyplot as plt\n\n# Root directory of the project\nROOT_DIR = os.path.abspath(\"../\")\n\n# Import Mask RCNN\nsys.path.append(ROOT_DIR) # To find local version of the library\nfrom mrcnn import utils\nimport mrcnn.model as modellib\nfrom mrcnn import visualize\n# Import COCO config\nsys.path.append(os.path.join(ROOT_DIR, \"samples/container/\")) # To find local version\nimport container\n\n#get_ipython().run_line_magic('matplotlib', 'inline')\n\n# Directory to save logs and trained model\nMODEL_DIR = os.path.join(ROOT_DIR, \"logs\")\n\n# Local path to trained weights file\nCOCO_MODEL_PATH = os.path.join(ROOT_DIR, \"logs/container20200717T1153/mask_rcnn_container_0030.h5\")\n# Download COCO trained weights from Releases if needed\nif not os.path.exists(COCO_MODEL_PATH):\n utils.download_trained_weights(COCO_MODEL_PATH)\n\n# Directory of images to run detection on\nIMAGE_DIR = os.path.join(ROOT_DIR, \"samples/container/dataset/val\")\n\n\n# ## Configurations\n# \n# We'll be using a model trained on the MS-COCO dataset. The configurations of this model are in the ```CocoConfig``` class in ```coco.py```.\n# \n# For inferencing, modify the configurations a bit to fit the task. To do so, sub-class the ```CocoConfig``` class and override the attributes you need to change.\n\n# In[2]:\n\n\nclass InferenceConfig(container.ContainerConfig):\n # Set batch size to 1 since we'll be running inference on\n # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n\nconfig = InferenceConfig()\nconfig.display()\n\n\n# ## Create Model and Load Trained Weights\n\n# In[3]:\n\n\n# Create model object in inference mode.\nmodel = modellib.MaskRCNN(mode=\"inference\", model_dir=MODEL_DIR, config=config)\n\n# Load weights trained on MS-COCO\nmodel.load_weights(COCO_MODEL_PATH, by_name=True)\n\n\n# ## Class Names\n# \n# The model classifies objects and returns class IDs, which are integer value that identify each class. Some datasets assign integer values to their classes and some don't. For example, in the MS-COCO dataset, the 'person' class is 1 and 'teddy bear' is 88. The IDs are often sequential, but not always. The COCO dataset, for example, has classes associated with class IDs 70 and 72, but not 71.\n# \n# To improve consistency, and to support training on data from multiple sources at the same time, our ```Dataset``` class assigns it's own sequential integer IDs to each class. For example, if you load the COCO dataset using our ```Dataset``` class, the 'person' class would get class ID = 1 (just like COCO) and the 'teddy bear' class is 78 (different from COCO). Keep that in mind when mapping class IDs to class names.\n# \n# To get the list of class names, you'd load the dataset and then use the ```class_names``` property like this.\n# ```\n# # Load COCO dataset\n# dataset = coco.CocoDataset()\n# dataset.load_coco(COCO_DIR, \"train\")\n# dataset.prepare()\n# \n# # Print class names\n# print(dataset.class_names)\n# ```\n# \n# We don't want to require you to download the COCO dataset just to run this demo, so we're including the list of class names below. The index of the class name in the list represent its ID (first class is 0, second is 1, third is 2, ...etc.)\n\n# In[4]:\n\n\n# COCO Class names\n# Index of the class in the list is its ID. For example, to get ID of\n# the teddy bear class, use: class_names.index('teddy bear')\nclass_names = ['BG','Cola Bottle','Fanta Bottle','Cherry Coke Bottle','Coke Zero Bottle','Mtn Dew Bottle','Cola Can','Fanta Can']\n\n\n# ## Run Object Detection\n\n# In[5]:\n\n\n# Load a random image from the images folder\n##file_names = next(os.walk(IMAGE_DIR))[2]\n##image = skimage.io.imread(os.path.join(IMAGE_DIR, random.choice(file_names)))\ntest_image = skimage.io.imread(os.path.join(IMAGE_DIR,'Image0170.png'))\ntest_image = image[:,:,:3]\n\n# Run detection\nresults = model.detect([test_image], verbose=1)\n\n# Visualize results\nr = results[0]\nvisualize.display_instances(test_image, r['rois'], r['masks'], r['class_ids'], \n class_names, r['scores'])\n\n# Evaluation\n# Compute VOC-Style mAP @ IoU=0.5\n# Running on 40 images. Increase for better accuracy.\nfrom container import ContainerDataset\ndataset_val = ContainerDataset()\ndataset_val.load_container(os.path.join(ROOT_DIR, \"samples/container/dataset\"), \"val\")\ndataset_val.prepare()\n\nimage_ids = np.random.choice(dataset_val.image_ids, 40)\nAPs = []\n\nfor image_id in image_ids:\n # Load image and ground truth data\n image, image_meta, gt_class_id, gt_bbox, gt_mask = modellib.load_image_gt(dataset_val, config, image_id, use_mini_mask=False)\n image = image[:,:,:3]\n \n # Run object detection\n results = model.detect([image], verbose=0)\n r = results[0]\n # Compute AP\n AP, precisions, recalls, overlaps = utils.compute_ap(gt_bbox, gt_class_id, gt_mask, r[\"rois\"], r[\"class_ids\"], r[\"scores\"], r['masks'])\n APs.append(AP)\n \nprint(\"mAP: \", np.mean(APs))\n"
] |
[
[
"numpy.mean",
"numpy.random.choice"
]
] |
deepkashiwa20/TrafficAccident
|
[
"c5fb26106137a4e85e5b5aa1e8ffdbb672a61988",
"c5fb26106137a4e85e5b5aa1e8ffdbb672a61988"
] |
[
"save/tokyo202112_MemGCRN_c1to1_20220208115005_time/traintest_MemGCRN.py",
"save/tokyo202112_MemGCRN_c1to1_20220208115005_time/MemGCRN.py"
] |
[
"import sys\nimport os\nimport shutil\nimport math\nimport numpy as np\nimport pandas as pd\nimport scipy.sparse as ss\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\nfrom datetime import datetime\nimport time\nimport torch\nimport torch.nn as nn\nimport torch.nn.init as init\nimport torch.nn.functional as F\nfrom torchsummary import summary\nimport argparse\nfrom configparser import ConfigParser\nimport logging\nimport Metrics\nfrom MemGCRN import MemGCRN\nfrom Utils import *\n\ndef refineXSYS(XS, YS):\n assert opt.time or opt.history, 'it should have one covariate time or history'\n XCov, YCov = XS[..., -1:], YS[..., -1:]\n XS, YS = XS[:, :, :, :opt.channelin], YS[:, :, :, :opt.channelout]\n return XS, YS, XCov, YCov\n\ndef print_params(model):\n # print trainable params\n param_count = 0\n logger.info('Trainable parameter list:')\n for name, param in model.named_parameters():\n if param.requires_grad:\n print(name, param.shape, param.numel())\n param_count += param.numel()\n logger.info(f'\\n In total: {param_count} trainable parameters. \\n')\n return\n\ndef getModel(mode):\n model = MemGCRN(num_nodes=num_variable, input_dim=opt.channelin, output_dim=opt.channelout, horizon=opt.seq_len, \n rnn_units=opt.hiddenunits, num_layers=opt.num_layers, mem_num=opt.mem_num, mem_dim=opt.mem_dim, \n decoder_type=opt.decoder, go_type=opt.go).to(device)\n if mode == 'train':\n summary(model, [(opt.his_len, num_variable, opt.channelin), (opt.seq_len, num_variable, opt.channelout)], device=device) \n print_params(model)\n for p in model.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n else:\n nn.init.uniform_(p)\n return model\n\ndef evaluateModel(model, data_iter, ycov_flag):\n if opt.loss == 'MSE': \n criterion = nn.MSELoss()\n if opt.loss == 'MAE': \n criterion = nn.L1Loss()\n separate_loss = nn.TripletMarginLoss(margin=1.0)\n compact_loss = nn.MSELoss()\n \n model.eval()\n loss_sum, n, YS_pred = 0.0, 0, []\n loss_sum1, loss_sum2, loss_sum3 = 0.0, 0.0, 0.0\n with torch.no_grad():\n if ycov_flag:\n for x, y, y_cov in data_iter:\n y_pred, h_att, query, pos, neg = model(x, y_cov)\n loss1 = criterion(y_pred, y)\n loss2 = separate_loss(query, pos.detach(), neg.detach())\n loss3 = compact_loss(query, pos.detach())\n loss = loss1 + opt.lamb * loss2 + opt.lamb1 * loss3\n loss_sum += loss.item() * y.shape[0]\n loss_sum1 += loss1.item() * y.shape[0]\n loss_sum2 += loss2.item() * y.shape[0]\n loss_sum3 += loss3.item() * y.shape[0]\n n += y.shape[0]\n YS_pred.append(y_pred.cpu().numpy()) \n else:\n for x, y in data_iter:\n y_pred, h_att, query, pos, neg = model(x)\n loss1 = criterion(y_pred, y)\n loss2 = separate_loss(query, pos.detach(), neg.detach())\n loss3 = compact_loss(query, pos.detach())\n loss = loss1 + opt.lamb * loss2 + opt.lamb1 * loss3\n loss_sum += loss.item() * y.shape[0]\n loss_sum1 += loss1.item() * y.shape[0]\n loss_sum2 += loss2.item() * y.shape[0]\n loss_sum3 += loss3.item() * y.shape[0]\n n += y.shape[0]\n YS_pred.append(y_pred.cpu().numpy())\n loss = loss_sum / n\n loss1 = loss_sum1 / n\n loss2 = loss_sum2 / n \n loss3 = loss_sum3 / n \n YS_pred = np.vstack(YS_pred)\n return loss, loss1, loss2, loss3, YS_pred\n\ndef trainModel(name, mode, XS, YS, YCov):\n logger.info('Model Training Started ...', time.ctime())\n logger.info('TIMESTEP_IN, TIMESTEP_OUT', opt.his_len, opt.seq_len)\n model = getModel(mode)\n XS_torch, YS_torch = torch.Tensor(XS).to(device), torch.Tensor(YS).to(device)\n logger.info('XS_torch.shape: ', XS_torch.shape)\n logger.info('YS_torch.shape: ', YS_torch.shape)\n if YCov is not None:\n YCov_torch = torch.Tensor(YCov).to(device)\n logger.info('YCov_torch.shape: ', YCov_torch.shape)\n trainval_data = torch.utils.data.TensorDataset(XS_torch, YS_torch, YCov_torch)\n else: \n trainval_data = torch.utils.data.TensorDataset(XS_torch, YS_torch)\n trainval_size = len(trainval_data)\n train_size = int(trainval_size * (1 - opt.val_ratio))\n\n train_data = torch.utils.data.Subset(trainval_data, list(range(0, train_size)))\n val_data = torch.utils.data.Subset(trainval_data, list(range(train_size, trainval_size)))\n train_iter = torch.utils.data.DataLoader(train_data, opt.batch_size, shuffle=False) # drop_last=True\n val_iter = torch.utils.data.DataLoader(val_data, opt.batch_size, shuffle=False) # drop_last=True\n trainval_iter = torch.utils.data.DataLoader(trainval_data, opt.batch_size, shuffle=False) # drop_last=True\n optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)\n if opt.loss == 'MSE': \n criterion = nn.MSELoss()\n if opt.loss == 'MAE': \n criterion = nn.L1Loss()\n separate_loss = nn.TripletMarginLoss(margin=1.0)\n compact_loss = nn.MSELoss()\n \n min_val_loss = np.inf\n wait = 0 \n for epoch in range(opt.epoch):\n starttime = datetime.now() \n loss_sum, n = 0.0, 0\n loss_sum1, loss_sum2, loss_sum3 = 0.0, 0.0, 0.0\n model.train()\n if YCov is not None:\n for x, y, ycov in train_iter:\n optimizer.zero_grad()\n y_pred, h_att, query, pos, neg = model(x, ycov)\n loss1 = criterion(y_pred, y)\n loss2 = separate_loss(query, pos.detach(), neg.detach())\n loss3 = compact_loss(query, pos.detach())\n loss = loss1 + opt.lamb * loss2 + opt.lamb1 * loss3\n loss.backward()\n optimizer.step()\n loss_sum += loss.item() * y.shape[0]\n loss_sum1 += loss1.item() * y.shape[0]\n loss_sum2 += loss2.item() * y.shape[0]\n loss_sum3 += loss3.item() * y.shape[0]\n n += y.shape[0]\n else:\n for x, y in train_iter:\n optimizer.zero_grad()\n y_pred, h_att, query, pos, neg = model(x)\n loss1 = criterion(y_pred, y)\n loss2 = separate_loss(query, pos.detach(), neg.detach())\n loss3 = compact_loss(query, pos.detach())\n loss = loss1 + opt.lamb * loss2 + opt.lamb1 * loss3\n loss.backward()\n optimizer.step()\n loss_sum += loss.item() * y.shape[0]\n loss_sum1 += loss1.item() * y.shape[0]\n loss_sum2 += loss2.item() * y.shape[0]\n loss_sum3 += loss3.item() * y.shape[0]\n n += y.shape[0]\n train_loss = loss_sum / n\n train_loss1 = loss_sum1 / n\n train_loss2 = loss_sum2 / n\n train_loss3 = loss_sum3 / n\n val_loss, val_loss1, val_loss2, val_loss3, _ = evaluateModel(model, val_iter, YCov is not None)\n if val_loss < min_val_loss:\n wait = 0\n min_val_loss = val_loss\n torch.save(model.state_dict(), modelpt_path)\n else:\n wait += 1\n if wait == opt.patience:\n logger.info('Early stopping at epoch: %d' % epoch)\n break\n endtime = datetime.now()\n epoch_time = (endtime - starttime).seconds\n logger.info(\"epoch\", epoch, \"time used:\", epoch_time,\" seconds \", \"train loss:\", train_loss, train_loss1, train_loss2, train_loss3, \"validation loss:\", val_loss, val_loss1, val_loss2, val_loss3)\n with open(epochlog_path, 'a') as f:\n f.write(\"%s, %d, %s, %d, %s, %s, %.6f, %s, %.6f\\n\" % (\"epoch\", epoch, \"time used\", epoch_time, \"seconds\", \"train loss\", train_loss, \"validation loss:\", val_loss))\n \n # torch_score = train_loss\n loss, loss1, loss2, loss3, YS_pred = evaluateModel(model, trainval_iter, YCov is not None)\n logger.info('trainval loss, loss1, loss2, loss3', loss, loss1, loss2, loss3)\n logger.info('YS.shape, YS_pred.shape,', YS.shape, YS_pred.shape)\n YS = YS[:YS_pred.shape[0], ...]\n YS, YS_pred = np.squeeze(YS), np.squeeze(YS_pred) \n YS, YS_pred = YS.reshape(-1, YS.shape[-1]), YS_pred.reshape(-1, YS_pred.shape[-1])\n YS, YS_pred = scaler.inverse_transform(YS), scaler.inverse_transform(YS_pred)\n YS, YS_pred = YS.reshape(-1, opt.seq_len, YS.shape[-1]), YS_pred.reshape(-1, opt.seq_len, YS_pred.shape[-1])\n logger.info('YS.shape, YS_pred.shape,', YS.shape, YS_pred.shape)\n MSE, RMSE, MAE, MAPE = Metrics.evaluate(YS, YS_pred)\n logger.info('*' * 40)\n logger.info(\"%s, %s, Torch MSE, %.6f, %.6f, %.6f, %.6f\" % (name, mode, train_loss, train_loss1, train_loss2, train_loss3))\n logger.info(\"%s, %s, MSE, RMSE, MAE, MAPE, %.6f, %.6f, %.6f, %.6f\" % (name, mode, MSE, RMSE, MAE, MAPE))\n logger.info('Model Training Ended ...', time.ctime())\n \ndef testModel(name, mode, XS, YS, YCov, Mask=None):\n def testScore(YS, YS_pred, message):\n MSE, RMSE, MAE, MAPE = Metrics.evaluate(YS, YS_pred)\n logger.info(message)\n logger.info('YS.shape, YS_pred.shape,', YS.shape, YS_pred.shape)\n logger.info(\"%s, %s, Torch MSE, %.6f, %.6f, %.6f, %.6f\" % (name, mode, loss, loss1, loss2, loss3))\n logger.info(\"all pred steps, %s, %s, MSE, RMSE, MAE, MAPE, %.6f, %.6f, %.6f, %.6f\" % (name, mode, MSE, RMSE, MAE, MAPE))\n with open(score_path, 'a') as f:\n f.write(\"all pred steps, %s, %s, MSE, RMSE, MAE, MAPE, %.6f, %.6f, %.6f, %.6f\\n\" % (name, mode, MSE, RMSE, MAE, MAPE))\n for i in range(opt.seq_len):\n MSE, RMSE, MAE, MAPE = Metrics.evaluate(YS[..., i], YS_pred[..., i])\n logger.info(\"%d step, %s, %s, MSE, RMSE, MAE, MAPE, %.6f, %.6f, %.6f, %.6f\" % (i+1, name, mode, MSE, RMSE, MAE, MAPE))\n f.write(\"%d step, %s, %s, MSE, RMSE, MAE, MAPE, %.6f, %.6f, %.6f, %.6f\\n\" % (i+1, name, mode, MSE, RMSE, MAE, MAPE))\n return None\n\n logger.info('Model Testing Started ...', time.ctime())\n logger.info('TIMESTEP_IN, TIMESTEP_OUT', opt.his_len, opt.seq_len)\n\n model = getModel(mode)\n model.load_state_dict(torch.load(modelpt_path))\n\n XS_torch, YS_torch = torch.Tensor(XS).to(device), torch.Tensor(YS).to(device)\n if YCov is not None:\n YCov_torch = torch.Tensor(YCov).to(device)\n test_data = torch.utils.data.TensorDataset(XS_torch, YS_torch, YCov_torch)\n else:\n test_data = torch.utils.data.TensorDataset(XS_torch, YS_torch)\n test_iter = torch.utils.data.DataLoader(test_data, opt.batch_size, shuffle=False) # drop_last=True\n \n loss, loss1, loss2, loss3, YS_pred = evaluateModel(model, test_iter, YCov is not None)\n logger.info('test loss, loss1, loss2, loss3', loss, loss1, loss2, loss3)\n logger.info('YS.shape, YS_pred.shape,', YS.shape, YS_pred.shape)\n YS = YS[:YS_pred.shape[0], ...]\n YS, YS_pred = np.squeeze(YS), np.squeeze(YS_pred) \n YS, YS_pred = YS.reshape(-1, YS.shape[-1]), YS_pred.reshape(-1, YS_pred.shape[-1])\n YS, YS_pred = scaler.inverse_transform(YS), scaler.inverse_transform(YS_pred)\n YS, YS_pred = YS.reshape(-1, opt.seq_len, YS.shape[-1]), YS_pred.reshape(-1, opt.seq_len, YS_pred.shape[-1])\n YS, YS_pred = YS.transpose(0, 2, 1), YS_pred.transpose(0, 2, 1)\n # np.save(path + f'/{name}_prediction.npy', YS_pred)\n # np.save(path + f'/{name}_groundtruth.npy', YS)\n # np.save(path + f'/{name}_Mask_t1.npy', Mask)\n testScore(YS, YS_pred, '********* Evaluation on the whole testing dataset *********')\n testScore(YS[Mask], YS_pred[Mask], '********* Evaluation on the selected testing dataset when incident happen at t+1 *********')\n logger.info('Model Testing Ended ...', time.ctime())\n \n######################################################################################### \nparser = argparse.ArgumentParser()\nparser.add_argument(\"--loss\", type=str, default='MAE', help=\"MAE, MSE, SELF\")\nparser.add_argument(\"--epoch\", type=int, default=200, help=\"number of epochs of training\")\nparser.add_argument(\"--batch_size\", type=int, default=64, help=\"size of the batches\")\nparser.add_argument(\"--lr\", type=float, default=0.001, help=\"adam: learning rate\")\nparser.add_argument(\"--patience\", type=float, default=10, help=\"patience used for early stop\")\nparser.add_argument('--val_ratio', type=float, default=0.25, help='the ratio of validation data among the trainval ratio')\nparser.add_argument('--seed', type=int, default=1234, help='Random seed.')\nparser.add_argument('--seq_len', type=int, default=6, help='sequence length of values, which should be even nums (2,4,6,12)')\nparser.add_argument('--his_len', type=int, default=6, help='sequence length of observed historical values')\nparser.add_argument('--month', type=str, default='202112', help='which experiment setting (month) to run')\nparser.add_argument('--city', type=str, default='tokyo', help='which experiment setting (city) to run')\nparser.add_argument('--channelin', type=int, default=1, help='number of input channel')\nparser.add_argument('--channelout', type=int, default=1, help='number of output channel')\nparser.add_argument('--time', type=bool, default=False, help='whether to use float time embedding')\nparser.add_argument('--history', type=bool, default=False, help='whether to use historical data')\nparser.add_argument('--num_layers', type=int, default=1, help='number of layers')\nparser.add_argument('--hiddenunits', type=int, default=32, help='number of hidden units')\nparser.add_argument('--mem_num', type=int, default=10, help='number of memory')\nparser.add_argument('--mem_dim', type=int, default=32, help='dimension of memory')\nparser.add_argument(\"--decoder\", type=str, default='stepwise', help=\"which type of decoder: stepwise or stepwise\")\nparser.add_argument('--ycov', type=str, default='time', help='which ycov to use: time or history')\nparser.add_argument('--go', type=str, default='random', help='which type of decoder go: random or last')\nparser.add_argument('--model', type=str, default='MemGCRN', help='which model to use')\nparser.add_argument('--gpu', type=int, default=3, help='which gpu to use')\nparser.add_argument('--lamb', type=float, default=0.01, help='lamb value for separate loss')\nparser.add_argument('--lamb1', type=float, default=0.01, help='lamb1 value for compact loss')\nopt = parser.parse_args()\n# optimal1: --ycov=history --go=random --lamb=0.01 --lamb1=0.01\n# optimal2: --ycov=time --go=last --lamb=0.01 --lamb1=0.0\n\nconfig = ConfigParser()\nconfig.read('params.txt', encoding='UTF-8')\ntrain_month = eval(config[opt.month]['train_month'])\ntest_month = eval(config[opt.month]['test_month'])\ntraffic_path = config[opt.month]['traffic_path']\nsubroad_path = config[opt.city]['subroad_path']\nroad_path = config['common']['road_path']\nadj_path = config['common']['adjdis_path'] \n# adj_path = config['common']['adj01_path']\nnum_variable = len(np.loadtxt(subroad_path).astype(int))\nN_link = config.getint('common', 'N_link')\nfeature_list = ['speed_typea']\nif opt.ycov=='time':\n opt.time = True\nelif opt.ycov=='history':\n opt.history = True\nelse:\n assert False, 'ycov type must be float time or float history value'\nif opt.time: feature_list.append('weekdaytime')\nif opt.history: feature_list.append('speed_typea_y')\n# opt.channelin = len(feature_list) # Here, input for the encoder is just speed, w/o xcov is better.\n# feature_list = ['speed_typea', 'accident_flag', 'real_accident_flag', 'weekdaytime', 'speed_typea_y']\n\n_, filename = os.path.split(os.path.abspath(sys.argv[0]))\nfilename = os.path.splitext(filename)[0]\nmodel_name = opt.model\ntimestring = time.strftime('%Y%m%d%H%M%S', time.localtime())\npath = f'../save/{opt.city}{opt.month}_{model_name}_c{opt.channelin}to{opt.channelout}_{timestring}_{opt.ycov}'\nlogging_path = f'{path}/{model_name}_{timestring}_logging.txt'\nscore_path = f'{path}/{model_name}_{timestring}_scores.txt'\nepochlog_path = f'{path}/{model_name}_{timestring}_epochlog.txt'\nmodelpt_path = f'{path}/{model_name}_{timestring}.pt'\nif not os.path.exists(path): os.makedirs(path)\nshutil.copy2(sys.argv[0], path)\nshutil.copy2(f'{model_name}.py', path)\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(level = logging.INFO)\nclass MyFormatter(logging.Formatter):\n def format(self, record):\n spliter = ' '\n record.msg = str(record.msg) + spliter + spliter.join(map(str, record.args))\n record.args = tuple() # set empty to args\n return super().format(record)\nformatter = MyFormatter()\nhandler = logging.FileHandler(logging_path, mode='a')\nhandler.setLevel(logging.INFO)\nhandler.setFormatter(formatter)\nconsole = logging.StreamHandler()\nconsole.setLevel(logging.INFO)\nconsole.setFormatter(formatter)\nlogger.addHandler(handler)\nlogger.addHandler(console)\n\nlogger.info('lamb', opt.lamb)\nlogger.info('lamb1', opt.lamb1)\nlogger.info('experiment_city', opt.city)\nlogger.info('experiment_month', opt.month)\nlogger.info('model_name', opt.model)\nlogger.info('mem_num', opt.mem_num)\nlogger.info('mem_dim', opt.mem_dim)\nlogger.info('decoder_type', opt.decoder)\nlogger.info('go_type', opt.go)\nlogger.info('ycov_type', opt.ycov)\nlogger.info('batch_size', opt.batch_size)\nlogger.info('rnn_units', opt.hiddenunits)\nlogger.info('num_layers', opt.num_layers)\nlogger.info('channnel_in', opt.channelin)\nlogger.info('channnel_out', opt.channelout)\nlogger.info('feature_time', opt.time)\nlogger.info('feature_history', opt.history)\n#####################################################################################################\n\ncpu_num = 1\nos.environ ['OMP_NUM_THREADS'] = str(cpu_num)\nos.environ ['OPENBLAS_NUM_THREADS'] = str(cpu_num)\nos.environ ['MKL_NUM_THREADS'] = str(cpu_num)\nos.environ ['VECLIB_MAXIMUM_THREADS'] = str(cpu_num)\nos.environ ['NUMEXPR_NUM_THREADS'] = str(cpu_num)\ntorch.set_num_threads(cpu_num)\ndevice = torch.device(\"cuda:{}\".format(opt.gpu)) if torch.cuda.is_available() else torch.device(\"cpu\")\nnp.random.seed(opt.seed)\ntorch.manual_seed(opt.seed)\nif torch.cuda.is_available():\n torch.cuda.manual_seed(opt.seed)\n\nscaler = StandardScaler()\n\ndef main():\n train_data = [get_data(config[month]['traffic_path'], N_link, subroad_path, feature_list) for month in train_month]\n test_data = [get_data(config[month]['traffic_path'], N_link, subroad_path, feature_list) for month in test_month]\n test_flag = [get_data(config[month]['traffic_path'], N_link, subroad_path, ['accident_flag']) for month in test_month]\n \n speed_data = []\n for data in train_data:\n speed_data.append(data[:,:,0])\n for data in test_data:\n speed_data.append(data[:,:,0])\n speed_data = np.vstack(speed_data) \n scaler.fit(speed_data)\n \n for data in train_data:\n logger.info('train_data', data.shape)\n data[:,:,0] = scaler.transform(data[:,:,0])\n for data in test_data:\n logger.info('test_data', data.shape)\n data[:,:,0] = scaler.transform(data[:,:,0])\n \n logger.info(opt.city, opt.month, 'training started', time.ctime())\n trainXS, trainYS = getXSYS(train_data, opt.his_len, opt.seq_len)\n trainXS, trainYS, trainXCov, trainYCov = refineXSYS(trainXS, trainYS)\n logger.info('TRAIN XS.shape YS.shape, XCov.shape, YCov.shape', trainXS.shape, trainYS.shape, trainXCov.shape, trainYCov.shape)\n trainModel(model_name, 'train', trainXS, trainYS, trainYCov)\n \n logger.info(opt.city, opt.month, 'testing started', time.ctime())\n testXS, testYS = getXSYS(test_data, opt.his_len, opt.seq_len)\n _, testYSFlag = getXSYS(test_flag, opt.his_len, opt.seq_len)\n testYMask = testYSFlag[:, 0, :, 0] > 0 # (B, N) incident happen at the first prediction timeslot, t+1.\n testXS, testYS, testXCov, testYCov = refineXSYS(testXS, testYS)\n logger.info('TEST XS.shape, YS.shape, XCov.shape, YCov.shape, YMask.shape', testXS.shape, testYS.shape, testXCov.shape, testYCov.shape, testYMask.shape)\n testModel(model_name, 'test', testXS, testYS, testYCov, testYMask) \n\nif __name__ == '__main__':\n main()\n\n",
"import torch\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport math\n\nclass AGCN(nn.Module):\n def __init__(self, dim_in, dim_out, cheb_k, embed_dim):\n super(AGCN, self).__init__()\n self.cheb_k = cheb_k\n self.weights = nn.Parameter(torch.FloatTensor(cheb_k*dim_in, dim_out))\n self.bias = nn.Parameter(torch.FloatTensor(dim_out))\n nn.init.xavier_normal_(self.weights)\n nn.init.constant_(self.bias, val=0)\n \n def forward(self, x, node_embeddings):\n if len(node_embeddings.shape)==2:\n node_num = node_embeddings.shape[0]\n supports = F.softmax(F.relu(torch.mm(node_embeddings, node_embeddings.transpose(0, 1))), dim=1)\n else:\n node_num = node_embeddings.shape[1]\n supports = F.softmax(F.relu(torch.einsum('bnc,bmc->nm', node_embeddings, node_embeddings)), dim=1) \n support_set = [torch.eye(node_num).to(supports.device), supports]\n for k in range(2, self.cheb_k):\n support_set.append(torch.matmul(2 * supports, support_set[-1]) - support_set[-2]) \n x_g = []\n for support in support_set:\n x_g.append(torch.einsum(\"nm,bmc->bnc\", support, x))\n x_g = torch.cat(x_g, dim=-1) # B, N, cheb_k * dim_in\n x_gconv = torch.einsum('bni,io->bno', x_g, self.weights) + self.bias # b, N, dim_out\n return x_gconv\n \nclass AGCRNCell(nn.Module):\n def __init__(self, node_num, dim_in, dim_out, cheb_k, embed_dim):\n super(AGCRNCell, self).__init__()\n self.node_num = node_num\n self.hidden_dim = dim_out\n self.gate = AGCN(dim_in+self.hidden_dim, 2*dim_out, cheb_k, embed_dim)\n self.update = AGCN(dim_in+self.hidden_dim, dim_out, cheb_k, embed_dim)\n\n def forward(self, x, state, node_embeddings):\n #x: B, num_nodes, input_dim\n #state: B, num_nodes, hidden_dim\n state = state.to(x.device)\n input_and_state = torch.cat((x, state), dim=-1)\n z_r = torch.sigmoid(self.gate(input_and_state, node_embeddings))\n z, r = torch.split(z_r, self.hidden_dim, dim=-1)\n candidate = torch.cat((x, z*state), dim=-1)\n hc = torch.tanh(self.update(candidate, node_embeddings))\n h = r*state + (1-r)*hc\n return h\n\n def init_hidden_state(self, batch_size):\n return torch.zeros(batch_size, self.node_num, self.hidden_dim)\n \nclass ADCRNN(nn.Module):\n def __init__(self, node_num, dim_in, dim_out, cheb_k, embed_dim, num_layers=1):\n super(ADCRNN, self).__init__()\n assert num_layers >= 1, 'At least one DCRNN layer in the Encoder.'\n self.node_num = node_num\n self.input_dim = dim_in\n self.num_layers = num_layers\n self.dcrnn_cells = nn.ModuleList()\n self.dcrnn_cells.append(AGCRNCell(node_num, dim_in, dim_out, cheb_k, embed_dim))\n for _ in range(1, num_layers):\n self.dcrnn_cells.append(AGCRNCell(node_num, dim_out, dim_out, cheb_k, embed_dim))\n\n def forward(self, x, init_state, node_embeddings):\n #shape of x: (B, T, N, D), shape of init_state: (num_layers, B, N, hidden_dim)\n assert x.shape[2] == self.node_num and x.shape[3] == self.input_dim\n seq_length = x.shape[1]\n current_inputs = x\n output_hidden = []\n for i in range(self.num_layers):\n state = init_state[i]\n inner_states = []\n for t in range(seq_length):\n state = self.dcrnn_cells[i](current_inputs[:, t, :, :], state, node_embeddings)\n inner_states.append(state)\n output_hidden.append(state)\n current_inputs = torch.stack(inner_states, dim=1)\n #current_inputs: the outputs of last layer: (B, T, N, hidden_dim)\n #output_hidden: the last state for each layer: (num_layers, B, N, hidden_dim)\n #last_state: (B, N, hidden_dim)\n # return current_inputs, torch.stack(output_hidden, dim=0)\n return current_inputs, output_hidden\n \n def init_hidden(self, batch_size):\n init_states = []\n for i in range(self.num_layers):\n init_states.append(self.dcrnn_cells[i].init_hidden_state(batch_size))\n return init_states\n\nclass ADCRNN_STEP(nn.Module):\n def __init__(self, node_num, dim_in, dim_out, cheb_k, embed_dim, num_layers=1):\n super(ADCRNN_STEP, self).__init__()\n assert num_layers >= 1, 'At least one DCRNN layer in the Decoder.'\n self.node_num = node_num\n self.input_dim = dim_in\n self.num_layers = num_layers\n self.dcrnn_cells = nn.ModuleList()\n self.dcrnn_cells.append(AGCRNCell(node_num, dim_in, dim_out, cheb_k, embed_dim))\n for _ in range(1, num_layers):\n self.dcrnn_cells.append(AGCRNCell(node_num, dim_out, dim_out, cheb_k, embed_dim))\n\n def forward(self, xt, init_state, node_embeddings):\n # xt: (B, N, D)\n # init_state: (num_layers, B, N, hidden_dim)\n assert xt.shape[1] == self.node_num and xt.shape[2] == self.input_dim\n current_inputs = xt\n output_hidden = []\n for i in range(self.num_layers):\n state = self.dcrnn_cells[i](current_inputs, init_state[i], node_embeddings)\n output_hidden.append(state)\n current_inputs = state\n return current_inputs, output_hidden\n\n\nclass MemGCRN(nn.Module):\n def __init__(self, num_nodes, input_dim, output_dim, horizon, rnn_units, num_layers=1, embed_dim=8, cheb_k=3,\n ycov_dim=1, mem_num=10, mem_dim=32, decoder_type='stepwise', go_type='go'):\n super(MemGCRN, self).__init__()\n self.num_node = num_nodes\n self.input_dim = input_dim\n self.hidden_dim = rnn_units\n self.output_dim = output_dim\n self.horizon = horizon\n self.num_layers = num_layers\n self.embed_dim = embed_dim\n self.cheb_k = cheb_k\n self.node_embeddings = nn.Parameter(torch.randn(self.num_node, self.embed_dim), requires_grad=True)\n \n self.ycov_dim = ycov_dim # float type t_cov or history_cov\n \n # memory\n self.mem_num = mem_num\n self.mem_dim = mem_dim\n self.memory = self.construct_memory()\n \n self.decoder_dim = self.hidden_dim + self.mem_dim # add historical average\n \n # encoder\n self.encoder = ADCRNN(num_nodes, self.input_dim, rnn_units, cheb_k, embed_dim, num_layers) # mob\n \n # deocoder\n self.decoder_type = decoder_type\n self.go_type = go_type\n if self.decoder_type == 'sequence':\n self.decoder = ADCRNN(num_nodes, self.ycov_dim, self.decoder_dim, cheb_k, embed_dim, num_layers) # mob\n elif self.decoder_type == 'stepwise':\n self.decoder = ADCRNN_STEP(num_nodes, self.output_dim + self.ycov_dim, self.decoder_dim, cheb_k, embed_dim, num_layers) # mob\n else:\n self.decoder = None\n self.proj = nn.Sequential(nn.Linear(self.decoder_dim, self.output_dim, bias=True))\n \n def construct_memory(self):\n memory_dict = nn.ParameterDict()\n memory_dict['Memory'] = nn.Parameter(torch.randn(self.mem_num, self.mem_dim), requires_grad=True) # (M, d)\n memory_dict['Wq'] = nn.Parameter(torch.randn(self.hidden_dim, self.mem_dim), requires_grad=True) # project to query\n for param in memory_dict.values():\n nn.init.xavier_normal_(param)\n return memory_dict\n \n def query_memory(self, h_t:torch.Tensor):\n B = h_t.shape[0] # h_t = h_t.squeeze(1) # B, N, hidden\n query = torch.matmul(h_t, self.memory['Wq']) # (B, N, d)\n att_score = torch.softmax(torch.matmul(query, self.memory['Memory'].t()), dim=-1) # alpha: (B, N, M)\n proto_t = torch.matmul(att_score, self.memory['Memory']) # (B, N, d)\n _, ind = torch.topk(att_score, k=2, dim=-1)\n pos = self.memory['Memory'][ind[:, :, 0]] # B, N, d\n neg = self.memory['Memory'][ind[:, :, 1]] # B, N, d\n return proto_t, query, pos, neg\n \n def forward(self, x, y_cov): \n init_state = self.encoder.init_hidden(x.shape[0])\n h_en, state_en = self.encoder(x, init_state, self.node_embeddings) # B, T, N, hidden \n h_t = h_en[:, -1, :, :] # B, N, hidden (last state)\n \n h_att, query, pos, neg = self.query_memory(h_t)\n h_t = torch.cat([h_t, h_att], dim=-1) \n \n ht_list = [h_t]*self.num_layers\n \n if self.decoder_type == 'sequence':\n h_de, state_de = self.decoder(y_cov, ht_list, self.node_embeddings)\n output = self.proj(h_de)\n elif self.decoder_type == 'stepwise':\n if self.go_type == 'random':\n go = torch.zeros((x.shape[0], self.num_node, self.output_dim), device=x.device)\n elif self.go_type == 'last':\n go = x[:, -1, :, :self.output_dim] # using the last input value instead of random.\n else:\n assert False, 'You must specify a correct go type: random or last'\n out = []\n for t in range(self.horizon):\n h_de, ht_list = self.decoder(torch.cat([go, y_cov[:, t, ...]], dim=-1), ht_list, self.node_embeddings)\n go = self.proj(h_de)\n out.append(go)\n output = torch.stack(out, dim=1)\n else:\n assert False, 'You must specify a correct decoder type: sequence, step_go_ycov, step_ycov'\n \n return output, h_att, query, pos, neg\n\ndef print_params(model):\n # print trainable params\n param_count = 0\n print('Trainable parameter list:')\n for name, param in model.named_parameters():\n if param.requires_grad:\n print(name, param.shape, param.numel())\n param_count += param.numel()\n print(f' \\n In total: {param_count} trainable parameters. \\n')\n return\n\ndef main():\n import sys\n import argparse\n from torchsummary import summary\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--gpu\", type=int, default=3, help=\"which GPU to use\")\n parser.add_argument('--his_len', type=int, default=6, help='sequence length of observed historical values')\n parser.add_argument('--seq_len', type=int, default=6, help='sequence length of values, which should be even nums (2,4,6,12)')\n parser.add_argument('--channelin', type=int, default=1, help='number of input channel')\n parser.add_argument('--channelout', type=int, default=1, help='number of output channel')\n parser.add_argument('--hiddenunits', type=int, default=32, help='number of hidden units')\n parser.add_argument(\"--decoder\", type=str, default='stepwise', help=\"which type of decoder: stepwise or sequence\")\n parser.add_argument('--go', type=str, default='last', help='which type of decoder go: random or last')\n opt = parser.parse_args()\n \n num_variable = 1843\n \n device = torch.device(\"cuda:{}\".format(opt.gpu)) if torch.cuda.is_available() else torch.device(\"cpu\")\n model = MemGCRN(num_nodes=num_variable, input_dim=opt.channelin, output_dim=opt.channelout, horizon=opt.seq_len, rnn_units=opt.hiddenunits, \n decoder_type=opt.decoder, go_type=opt.go).to(device)\n print_params(model)\n summary(model, [(opt.his_len, num_variable, opt.channelin), (opt.seq_len, num_variable, opt.channelout)], device=device)\n \nif __name__ == '__main__':\n main()\n"
] |
[
[
"torch.nn.init.uniform_",
"torch.load",
"numpy.squeeze",
"numpy.vstack",
"torch.utils.data.DataLoader",
"torch.nn.init.xavier_uniform_",
"torch.set_num_threads",
"torch.no_grad",
"torch.cuda.is_available",
"torch.device",
"torch.nn.L1Loss",
"torch.utils.data.TensorDataset",
"numpy.random.seed",
"torch.cuda.manual_seed",
"torch.Tensor",
"torch.manual_seed",
"torch.nn.TripletMarginLoss",
"sklearn.preprocessing.StandardScaler",
"torch.nn.MSELoss",
"numpy.loadtxt"
],
[
"torch.zeros",
"torch.nn.init.constant_",
"torch.cat",
"torch.nn.ModuleList",
"torch.nn.ParameterDict",
"torch.nn.init.xavier_normal_",
"torch.einsum",
"torch.randn",
"torch.eye",
"torch.matmul",
"torch.nn.Linear",
"torch.FloatTensor",
"torch.cuda.is_available",
"torch.split",
"torch.topk",
"torch.device",
"torch.stack"
]
] |
dmayo/brain-score
|
[
"3ab4258152c9e3f8c7d29afb10158b184dbcebbe"
] |
[
"packaging/dicarlo/sanghavi/sanghavimurty2020things1.py"
] |
[
"import os\nfrom pathlib import Path\nimport json\n\nimport numpy as np\nimport xarray as xr\nimport pandas as pd\n\nfrom brainio_base.assemblies import NeuronRecordingAssembly\nfrom brainio_base.stimuli import StimulusSet\nfrom brainio_collection.packaging import package_data_assembly, package_stimulus_set\nfrom mkgu_packaging.dicarlo.sanghavi import filter_neuroids\n\n\ndef collect_stimuli(data_dir):\n image_dir = data_dir / 'images' / 'things-1'\n assert os.path.isdir(image_dir)\n files = sorted(os.listdir(image_dir), key=lambda x: int(os.path.splitext(x)[0]))\n files = files[:-130] # Discard last 130 images (5 grey and 25x5 normalizer images)\n\n assert os.path.isdir(data_dir / 'image-metadata')\n stimuli = pd.read_csv(data_dir / 'image-metadata' / 'things_1_metadata.csv')\n\n stimuli = stimuli.rename(columns={'id': 'image_id'})\n\n stimuli['image_current_local_file_path'] = stimuli.apply(\n lambda row: os.path.join(image_dir, str(row.image_id) + '.jpg'), axis=1)\n\n assert len(np.unique(stimuli['image_id'])) == len(stimuli)\n stimuli = StimulusSet(stimuli)\n stimuli.image_paths = \\\n {stimuli.at[idx, 'image_id']: stimuli.at[idx, 'image_current_local_file_path'] for idx in range(len(stimuli))}\n return stimuli\n\n\ndef load_responses(data_dir, stimuli):\n data_dir = data_dir / 'database'\n assert os.path.isdir(data_dir)\n psth = np.load(data_dir / 'solo.rsvp.things-1.experiment_psth.npy') # Shaped images x repetitions x time_bins x channels\n\n # Compute firing rate for given time bins\n timebins = [[70, 170], [170, 270], [50, 100], [100, 150], [150, 200], [200, 250], [70, 270]]\n photodiode_delay = 30 # Delay recorded on photodiode is ~30ms\n timebase = np.arange(-100, 381, 10) # PSTH from -100ms to 380ms relative to stimulus onset\n assert len(timebase) == psth.shape[2]\n rate = np.empty((len(timebins), psth.shape[0], psth.shape[1], psth.shape[3]))\n for idx, tb in enumerate(timebins):\n t_cols = np.where((timebase >= (tb[0] + photodiode_delay)) & (timebase < (tb[1] + photodiode_delay)))[0]\n rate[idx] = np.mean(psth[:, :, t_cols, :], axis=2) # Shaped time bins x images x repetitions x channels\n\n assembly = xr.DataArray(rate,\n coords={'repetition': ('repetition', list(range(rate.shape[2]))),\n 'time_bin_id': ('time_bin', list(range(rate.shape[0]))),\n 'time_bin_start': ('time_bin', [x[0] for x in timebins]),\n 'time_bin_stop': ('time_bin', [x[1] for x in timebins])},\n dims=['time_bin', 'image', 'repetition', 'neuroid'])\n\n # Add neuroid related meta data\n neuroid_meta = pd.DataFrame(json.load(open(data_dir.parent / 'array-metadata' / 'mapping.json')))\n for column_name, column_data in neuroid_meta.iteritems():\n assembly = assembly.assign_coords(**{f'{column_name}': ('neuroid', list(column_data.values))})\n\n # Add stimulus related meta data\n for column_name, column_data in stimuli.iteritems():\n assembly = assembly.assign_coords(**{f'{column_name}': ('image', list(column_data.values))})\n\n # Collapse dimensions 'image' and 'repetitions' into a single 'presentation' dimension\n assembly = assembly.stack(presentation=('image', 'repetition')).reset_index('presentation')\n assembly = assembly.drop('image')\n assembly = NeuronRecordingAssembly(assembly)\n\n # Filter noisy electrodes\n psth = np.load(data_dir / 'solo.rsvp.things-1.normalizer_psth.npy')\n t_cols = np.where((timebase >= (70 + photodiode_delay)) & (timebase < (170 + photodiode_delay)))[0]\n rate = np.mean(psth[:, :, t_cols, :], axis=2)\n normalizer_assembly = xr.DataArray(rate,\n coords={'repetition': ('repetition', list(range(rate.shape[1]))),\n 'image_id': ('image', list(range(rate.shape[0]))),\n 'id': ('image', list(range(rate.shape[0])))},\n dims=['image', 'repetition', 'neuroid'])\n for column_name, column_data in neuroid_meta.iteritems():\n normalizer_assembly = normalizer_assembly.assign_coords(\n **{f'{column_name}': ('neuroid', list(column_data.values))})\n normalizer_assembly = normalizer_assembly.stack(presentation=('image', 'repetition')).reset_index('presentation')\n normalizer_assembly = normalizer_assembly.drop('image')\n normalizer_assembly = normalizer_assembly.transpose('presentation', 'neuroid')\n normalizer_assembly = NeuronRecordingAssembly(normalizer_assembly)\n\n filtered_assembly = filter_neuroids(normalizer_assembly, 0.7)\n assembly = assembly.sel(neuroid=np.isin(assembly.neuroid_id, filtered_assembly.neuroid_id))\n assembly = assembly.transpose('presentation', 'neuroid', 'time_bin')\n\n # Add other experiment and data processing related info\n assembly.attrs['image_size_degree'] = 8\n assembly.attrs['stim_on_time_ms'] = 100\n\n return assembly\n\n\ndef main():\n data_dir = Path(__file__).parents[6] / 'data2' / 'active' / 'users' / 'sachis'\n assert os.path.isdir(data_dir)\n\n stimuli = collect_stimuli(data_dir)\n stimuli.identifier = 'dicarlo.THINGS1'\n assembly = load_responses(data_dir, stimuli)\n assembly.name = 'dicarlo.SanghaviMurty2020THINGS1'\n\n print('Packaging stimuli')\n package_stimulus_set(stimuli, stimulus_set_identifier=stimuli.identifier, bucket_name='brainio.dicarlo')\n print('Packaging assembly')\n package_data_assembly(assembly, assembly_identifier=assembly.name, stimulus_set_identifier=stimuli.identifier,\n bucket_name='brainio.dicarlo')\n return\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"pandas.read_csv",
"numpy.unique",
"numpy.arange",
"numpy.mean",
"numpy.load",
"numpy.where",
"numpy.isin"
]
] |
rubensmau/splink
|
[
"da4f5d5bc09753b6c6974af308dd1bad324d9b4b"
] |
[
"splink/intuition.py"
] |
[
"from .model import Model\n\nfrom .charts import load_chart_definition, altair_if_installed_else_json\n\nimport pandas as pd\nfrom math import log2\n\ninitial_template = \"\"\"\nInitial probability of match (prior) = λ = {lam:.4g}\n\"\"\"\n\ncol_template = [\n (\"Comparison of {column_name}. Values are:\", \"\"),\n (\"{column_name}_l:\", \"{value_l}\"),\n (\"{column_name}_r:\", \"{value_r}\"),\n (\"Comparison has:\", \"{num_levels} levels\"),\n (\"Level for this comparison:\", \"{gamma_column_name} = {gamma_index}\"),\n (\"m probability = P(level|match):\", \"{m_probability:.4g}\"),\n (\"u probability = P(level|non-match):\", \"{u_probability:.4g}\"),\n (\"Bayes factor = m/u:\", \"{bayes_factor:.4g}\"),\n (\"New probability of match (updated belief):\", \"{updated_belief:.4g}\"),\n]\n\nend_template = \"\"\"\nFinal probability of match = {final:.4g}\n\nReminder:\n\nThe m probability for a given level is the proportion of matches which are in this level.\nWe would generally expect the highest similarity level to have the largest proportion of matches.\nFor example, we would expect first name field to match exactly amongst most matching records, except where nicknames, aliases or typos have occurred.\nFor a comparison column that changes through time, like address, we may expect a lower proportion of comparisons to be in the highest similarity level.\n\nThe u probability for a given level is the proportion of non-matches which are in this level.\nWe would generally expect the lowest similarity level to have the highest proportion of non-matches, but the magnitude depends on the cardinality of the field.\nFor example, we would expect that in the vast majority of non-matching records, the date of birth field would not match. However, we would expect it to be common for gender to match amongst non-matches.\n\"\"\"\n\n\ndef intuition_report(row_dict: dict, model: Model):\n \"\"\"Generate a text summary of a row in the comparison table which explains how the match_probability was computed\n\n Args:\n row_dict (dict): A python dictionary representing the comparison row\n model (Model): splink Model object\n\n Returns:\n string: The intuition report\n \"\"\"\n\n lam = model.current_settings_obj[\"proportion_of_matches\"]\n report = initial_template.format(lam=lam)\n current_prob = lam\n\n for cc in model.current_settings_obj.comparison_columns_list:\n d = cc.describe_row_dict(row_dict)\n\n bf = d[\"bayes_factor\"]\n\n a = bf * current_prob\n new_p = a / (a + (1 - current_prob))\n d[\"updated_belief\"] = new_p\n current_prob = new_p\n\n col_report = []\n col_report.append(\"------\")\n for (blurb, value) in col_template:\n blurb_fmt = blurb.format(**d)\n\n value_fmt = value.format(**d)\n col_report.append(f\"{blurb_fmt:<50} {value_fmt}\")\n col_report.append(\"\\n\")\n col_report = \"\\n\".join(col_report)\n report += col_report\n\n report += end_template.format(final=new_p)\n\n if len(model.current_settings_obj[\"blocking_rules\"]) > 1:\n match_key = int(row_dict[\"match_key\"])\n br = model.current_settings_obj[\"blocking_rules\"][match_key]\n br = f\"\\nThis comparison was generated by the blocking rule: {br}\"\n report += br\n\n return report\n\n\ndef _get_bayes_factors(row_dict, model):\n bayes_factors = []\n lam = model.current_settings_obj[\"proportion_of_matches\"]\n for cc in model.current_settings_obj.comparison_columns_list:\n row_desc = cc.describe_row_dict(row_dict, lam)\n bayes_factors.append(row_desc)\n\n return bayes_factors\n\n\ndef bayes_factor_chart(row_dict, model):\n chart_path = \"bayes_factor_chart_def.json\"\n bayes_factor_chart_def = load_chart_definition(chart_path)\n bayes_factor_chart_def[\"data\"][\"values\"] = _get_bayes_factors(row_dict, model)\n bayes_factor_chart_def[\"encoding\"][\"y\"][\"field\"] = \"column_name\"\n del bayes_factor_chart_def[\"encoding\"][\"row\"]\n\n return altair_if_installed_else_json(bayes_factor_chart_def)\n\ndef bayes_factor_intuition_chart(row_dict, model):\n chart_path = \"bayes_factor_intuition_chart_def.json\"\n bayes_factor_intuition_chart_def = load_chart_definition(chart_path)\n \n data = _get_bayes_factors(row_dict, model)\n\n # Get initial and final bayes factors\n lam = model.current_settings_obj[\"proportion_of_matches\"]\n bf_init = lam/(1-lam)\n bf_final = sum([d['log2_bayes_factor'] for d in data]) + log2(bf_init) \n\n # Sort records in descending order of influence\n # with start and end positions added\n df = pd.DataFrame(data)\\\n .sort_values(by=\"log2_bayes_factor\", key=abs, ascending=False)\\\n .reset_index(drop=True)\\\n .append({\n 'bayes_factor': 2**bf_final, \n 'log2_bayes_factor': bf_final, \n 'column_name': 'Final score'\n }, \n ignore_index=True\n )\n df = pd.DataFrame({\n 'bayes_factor': bf_init, \n 'log2_bayes_factor': log2(bf_init), \n 'column_name': 'Prior lambda'\n }, \n index=[0]\n ).append(df, ignore_index=True).reset_index() \n \n bayes_factor_intuition_chart_def[\"data\"][\"values\"] = df.to_dict('records')\n\n return altair_if_installed_else_json(bayes_factor_intuition_chart_def)\n"
] |
[
[
"pandas.DataFrame"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.