repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
possible_versions
list
Ajithsj96/RealTimeFRuseDeep
[ "a5ebfc1137bc3e8530ffa4423842d3b2ba13954a" ]
[ "model/prediction.py" ]
[ "import argparse\n\nimport feature_utility as fu\nimport myVGG\n\nimport cv2\nimport numpy as np\n\nparser = argparse.ArgumentParser(description=(\"Testing Prediction\"))\nparser.add_argument('--image', help=('Input an image to test model prediction'))\nparser.add_argument('--dataset', help=('Input a directory to test model prediction'))\n\nargs = parser.parse_args()\ndef main():\n model = myVGG.VGG_16('my_model_weights.h5')\n\n if args.image is not None:\n print ('Image Prediction Mode')\n img = fu.preprocessing(cv2.imread(args.image))\n X = np.expand_dims(img, axis=0)\n X = np.expand_dims(X, axis=0)\n result = model.predict(X)\n print (result)\n return\n elif args.dataset is not None:\n print (\"Directory Prediction Mode\")\n X, y = fu.extract_features(args.dataset)\n scores = model.evaluate(X, y, verbose=0)\n print (scores)\n return \n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.expand_dims" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ThirstyScholar/trading-bitcoin-with-reinforcement-learning
[ "e2163e954b1f5f656c49fbfb560ddd4635548a91" ]
[ "frl/data.py" ]
[ "import numpy as np\nimport pandas as pd\n\n\nclass Data(object):\n\n def __init__(self, csv_path):\n self.csv_path = csv_path\n self.data = pd.read_csv(csv_path) # read CSV into DataFrame\n self.feat = None\n\n def __len__(self):\n return len(self.data)\n\n def remove_outlier(self):\n idx = pd.datetime(2017, 4, 15, 23)\n self.data.drop(index=idx, inplace=True)\n self.feat.drop(index=idx, inplace=True)\n\n def preprocess(self):\n \"\"\"\n Step 1. Create datetime index and select datatime range\n Step 2. Drop columns 'Timestamp', 'Volume_(Currency)' and 'Weighted_Price'\n Step 3. Rename 'Volume_(BTC)' as 'Volume'\n Step 4. Resample to 15-minute bars and drop NaN values\n\n :return: None\n \"\"\"\n\n # Step 1\n self.data.index = pd.to_datetime(self.data['Timestamp'], unit='s')\n self.data = self.data.loc[self.data.index < pd.datetime(2017, 7, 1)]\n\n # Step 2\n self.data.drop(['Timestamp', 'Volume_(Currency)', 'Weighted_Price'], axis=1, inplace=True)\n\n # Step 3\n self.data.rename(columns={'Volume_(BTC)': 'Volume'}, inplace=True)\n\n # Step 4\n self.data = self.data.groupby(pd.Grouper(freq='15Min')).aggregate({\n 'Open': 'first',\n 'High': 'max',\n 'Low': 'min',\n 'Close': 'last',\n 'Volume': 'sum'\n })\n self.data.dropna(inplace=True)\n\n def extract_feature(self):\n \"\"\"\n Step 1. Create an empty feature DataFrame\n Step 2. Calculate features\n Step 3. Drop rows with NaN values\n Step 4. Remove outlier\n\n :return: None\n \"\"\"\n\n # Step 1\n self.feat = pd.DataFrame(index=self.data.index)\n\n # Step 2\n cls = self.data['Close']\n vol = self.data['Volume']\n np_cls = np.log(cls)\n\n self.feat['r'] = np_cls.diff()\n self.feat['r_1'] = self.feat['r'].shift(1)\n self.feat['r_2'] = self.feat['r'].shift(2)\n\n r = self.feat['r']\n self.feat['rZ12'] = Data.zscore(r, 12)\n self.feat['rZ96'] = Data.zscore(r, 96)\n\n self.feat['pma12'] = Data.zscore(Data.ser2ma_ret(cls, 12), 96)\n self.feat['pma96'] = Data.zscore(Data.ser2ma_ret(cls, 96), 96)\n self.feat['pma672'] = Data.zscore(Data.ser2ma_ret(cls, 672), 96)\n\n self.feat['ma4/36'] = Data.zscore(Data.ma2ma_ret(cls, 4, 36), 96)\n self.feat['ma12/96'] = Data.zscore(Data.ma2ma_ret(cls, 12, 96), 96)\n\n self.feat['ac12/12'] = Data.zscore(Data.acceleration(cls, 12, 12), 96)\n self.feat['ac96/96'] = Data.zscore(Data.acceleration(cls, 96, 12), 96)\n\n self.feat['vZ12'] = Data.zscore(vol, 12)\n self.feat['vZ96'] = Data.zscore(vol, 96)\n self.feat['vZ672'] = Data.zscore(vol, 672)\n\n self.feat['vma12'] = Data.zscore(Data.ser2ma_ret(vol, 12), 96)\n self.feat['vma96'] = Data.zscore(Data.ser2ma_ret(vol, 96), 96)\n self.feat['vma672'] = Data.zscore(Data.ser2ma_ret(vol, 672), 96)\n\n vola_12 = Data.roll_std(r, 12) # 12-period volatility\n vola_96 = Data.roll_std(r, 96)\n vola_672 = Data.roll_std(r, 672)\n self.feat['vol12'] = Data.zscore(vola_12, 96)\n self.feat['vol96'] = Data.zscore(vola_96, 96)\n self.feat['vol672'] = Data.zscore(vola_672, 96)\n\n self.feat['dv12/96'] = Data.zscore(Data.ser2ma_ret(vola_12, 96), 96)\n self.feat['dv96/672'] = Data.zscore(Data.ser2ma_ret(vola_96, 672), 96)\n\n # Step 3\n self.feat.dropna(inplace=True)\n self.data = self.data.loc[self.feat.index] # select data where feat are available\n\n # Step 4\n self.remove_outlier()\n\n @staticmethod\n def roll_mean(s, window):\n \"\"\"\n :param s: Pandas Series\n :param window: int\n :return: Pandas Series\n \"\"\"\n return s.rolling(window).mean()\n\n @staticmethod\n def roll_std(s, window):\n \"\"\"\n :param s: Pandas Series\n :param window: int\n :return: Pandas Series\n \"\"\"\n return s.rolling(window).std()\n\n @staticmethod\n def zscore(s, window):\n \"\"\"\n :param s: Pandas Series\n :param window: int\n :return: Pandas Series\n \"\"\"\n roll_mean = s.rolling(window).mean()\n roll_std = s.rolling(window).std()\n return (s - roll_mean) / (roll_std + 1e-6)\n\n @staticmethod\n def ser2ma_ret(s, window):\n \"\"\"\n Series-to-Moving Average return.\n :param s: Pandas Series\n :param window: int\n :return: Pandas Series\n \"\"\"\n roll_mean = s.rolling(window).mean()\n return (s - roll_mean) - 1\n\n @staticmethod\n def ma2ma_ret(s, window_1, window_2):\n \"\"\"\n Series-to-series return.\n :param s: Pandas Series\n :param window_1: int\n :param window_2: int\n :return: Pandas Series\n \"\"\"\n return s.rolling(window_1).mean() / s.rolling(window_2).mean() - 1\n\n @staticmethod\n def acceleration(s, window_1, window_2):\n \"\"\"\n See the definition from the original post \"https://launchpad.ai/blog/trading-bitcoin\"\n :param s: Pandas Series\n :param window_1: int\n :param window_2: int\n :return: Pandas Series\n \"\"\"\n tmp = s / s.rolling(window_1).mean()\n return tmp / tmp.rolling(window_2).mean()\n\n\ndef test_data():\n data_path = './bitcoin-historical-data/coinbaseUSD_1-min_data.csv'\n data = Data(data_path)\n data.preprocess()\n data.extract_feature()\n\n\nif __name__ == '__main__':\n test_data()\n" ]
[ [ "numpy.log", "pandas.read_csv", "pandas.to_datetime", "pandas.Grouper", "pandas.DataFrame", "pandas.datetime" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jabader97/backpack
[ "089daafa0d611e13901fd7ecf8a0d708ce7a5928", "089daafa0d611e13901fd7ecf8a0d708ce7a5928", "089daafa0d611e13901fd7ecf8a0d708ce7a5928", "089daafa0d611e13901fd7ecf8a0d708ce7a5928", "089daafa0d611e13901fd7ecf8a0d708ce7a5928" ]
[ "test/implementation/implementation_autograd.py", "backpack/core/derivatives/rnn.py", "test/core/derivatives/batch_norm_settings.py", "backpack/extensions/firstorder/batch_l2_grad/linear.py", "test/utils/test_conv_transpose_settings.py" ]
[ "import torch\n\nfrom backpack.hessianfree.ggnvp import ggn_vector_product_from_plist\nfrom backpack.hessianfree.hvp import hessian_vector_product\nfrom backpack.hessianfree.rop import R_op\nfrom backpack.utils.convert_parameters import vector_to_parameter_list\n\nfrom .implementation import Implementation\n\n\nclass AutogradImpl(Implementation):\n def gradient(self):\n return list(torch.autograd.grad(self.loss(), self.model.parameters()))\n\n def batch_gradients(self):\n batch_grads = [\n torch.zeros(self.N, *p.size()).to(self.device)\n for p in self.model.parameters()\n ]\n\n for b in range(self.N):\n gradients = torch.autograd.grad(self.loss(b), self.model.parameters())\n for idx, g in enumerate(gradients):\n batch_grads[idx][b, :] = g.detach() / self.N\n\n return batch_grads\n\n def batch_l2(self):\n batch_grad = self.batch_gradients()\n batch_l2 = [(g ** 2).sum(list(range(1, len(g.shape)))) for g in batch_grad]\n return batch_l2\n\n def variance(self):\n batch_grad = self.batch_gradients()\n variances = [torch.var(g, dim=0, unbiased=False) for g in batch_grad]\n return variances\n\n def sgs(self):\n sgs = self.plist_like(self.model.parameters())\n\n for b in range(self.N):\n gradients = torch.autograd.grad(self.loss(b), self.model.parameters())\n for idx, g in enumerate(gradients):\n sgs[idx] += (g.detach() / self.N) ** 2\n\n return sgs\n\n def diag_ggn(self):\n outputs = self.model(self.problem.X)\n loss = self.problem.lossfunc(outputs, self.problem.Y)\n\n def extract_ith_element_of_diag_ggn(i, p):\n v = torch.zeros(p.numel()).to(self.device)\n v[i] = 1.0\n vs = vector_to_parameter_list(v, [p])\n GGN_vs = ggn_vector_product_from_plist(loss, outputs, [p], vs)\n GGN_v = torch.cat([g.detach().view(-1) for g in GGN_vs])\n return GGN_v[i]\n\n diag_ggns = []\n for p in list(self.model.parameters()):\n diag_ggn_p = torch.zeros_like(p).view(-1)\n\n for parameter_index in range(p.numel()):\n diag_value = extract_ith_element_of_diag_ggn(parameter_index, p)\n diag_ggn_p[parameter_index] = diag_value\n\n diag_ggns.append(diag_ggn_p.view(p.size()))\n\n return diag_ggns\n\n def diag_h(self):\n loss = self.problem.lossfunc(self.model(self.problem.X), self.problem.Y)\n\n def hvp(df_dx, x, v):\n Hv = R_op(df_dx, x, v)\n return [j.detach() for j in Hv]\n\n def extract_ith_element_of_diag_h(i, p, df_dx):\n v = torch.zeros(p.numel()).to(self.device)\n v[i] = 1.0\n vs = vector_to_parameter_list(v, [p])\n\n Hvs = hvp(df_dx, [p], vs)\n Hv = torch.cat([g.detach().view(-1) for g in Hvs])\n\n return Hv[i]\n\n diag_hs = []\n for p in list(self.model.parameters()):\n diag_h_p = torch.zeros_like(p).view(-1)\n\n df_dx = torch.autograd.grad(loss, [p], create_graph=True, retain_graph=True)\n for parameter_index in range(p.numel()):\n diag_value = extract_ith_element_of_diag_h(parameter_index, p, df_dx)\n diag_h_p[parameter_index] = diag_value\n\n diag_hs.append(diag_h_p.view(p.size()))\n\n return diag_hs\n\n def h_blocks(self):\n mat_list = []\n for p in self.model.parameters():\n mat_list.append(\n torch.eye(p.numel(), device=p.device).reshape(p.numel(), *p.shape)\n )\n # return self.hmp(mat_list)\n hmp_list = self.hmp(mat_list)\n return [\n mat.reshape(p.numel(), p.numel())\n for mat, p in zip(hmp_list, self.model.parameters())\n ]\n\n def hvp(self, vec_list):\n mat_list = [vec.unsqueeze(0) for vec in vec_list]\n results = self.hmp(mat_list)\n results_vec = [mat.squeeze(0) for mat in results]\n return results_vec\n\n def hmp(self, mat_list):\n assert len(mat_list) == len(list(self.model.parameters()))\n\n loss = self.problem.lossfunc(self.model(self.problem.X), self.problem.Y)\n\n results = []\n for p, mat in zip(self.model.parameters(), mat_list):\n results.append(self.hvp_applied_columnwise(loss, p, mat))\n\n return results\n\n def hvp_applied_columnwise(self, f, p, mat):\n h_cols = []\n for i in range(mat.size(0)):\n hvp_col_i = hessian_vector_product(f, [p], mat[i, :])[0]\n h_cols.append(hvp_col_i.unsqueeze(0))\n\n return torch.cat(h_cols, dim=0)\n\n def ggn_blocks(self):\n mat_list = []\n for p in self.model.parameters():\n mat_list.append(\n torch.eye(p.numel(), device=p.device).reshape(p.numel(), *p.shape)\n )\n ggn_mp_list = self.ggn_mp(mat_list)\n return [\n mat.reshape(p.numel(), p.numel())\n for mat, p in zip(ggn_mp_list, self.model.parameters())\n ]\n # return ggn_mp_list\n\n def ggn_vp(self, vec_list):\n mat_list = [vec.unsqueeze(0) for vec in vec_list]\n results = self.ggn_mp(mat_list)\n results_vec = [mat.squeeze(0) for mat in results]\n return results_vec\n\n def ggn_mp(self, mat_list):\n assert len(mat_list) == len(list(self.model.parameters()))\n\n outputs = self.model(self.problem.X)\n loss = self.problem.lossfunc(outputs, self.problem.Y)\n\n results = []\n for p, mat in zip(self.model.parameters(), mat_list):\n results.append(self.ggn_vp_applied_columnwise(loss, outputs, p, mat))\n\n return results\n\n def ggn_vp_applied_columnwise(self, loss, out, p, mat):\n ggn_cols = []\n for i in range(mat.size(0)):\n col_i = mat[i, :]\n GGN_col_i = ggn_vector_product_from_plist(loss, out, [p], col_i)[0]\n ggn_cols.append(GGN_col_i.unsqueeze(0))\n\n return torch.cat(ggn_cols, dim=0)\n\n def plist_like(self, plist):\n return [torch.zeros(*p.size()).to(self.device) for p in plist]\n\n def parameter_numels(self):\n return [p.numel() for p in self.model.parameters()]\n", "\"\"\"Partial derivatives for the torch.nn.RNN layer.\"\"\"\nfrom typing import List, Tuple\n\nfrom torch import Tensor, cat, einsum, zeros\nfrom torch.nn import RNN\n\nfrom backpack.core.derivatives.basederivatives import BaseParameterDerivatives\nfrom backpack.utils.subsampling import subsample\n\n\nclass RNNDerivatives(BaseParameterDerivatives):\n \"\"\"Partial derivatives for the torch.nn.RNN layer.\n\n a_t = W_ih x_t + b_ih + W_hh h_{t-1} + b_hh\n h_t = tanh(a_t)\n\n We assume that it is always batch axis first.\n\n Index conventions:\n ------------------\n * t: Sequence dimension\n * v: Free dimension\n * n: Batch dimension\n * h: Output dimension\n * i: Input dimension\n \"\"\"\n\n @staticmethod\n def _check_parameters(module: RNN) -> None:\n \"\"\"Check the parameters of module.\n\n Args:\n module: module which to check\n\n Raises:\n NotImplementedError: If any parameter of module does not match expectation\n \"\"\"\n if not module.batch_first:\n raise NotImplementedError(\"Batch axis must be first.\")\n if module.num_layers > 1:\n raise NotImplementedError(\"only num_layers = 1 is supported\")\n if not module.nonlinearity == \"tanh\":\n raise NotImplementedError(\"only nonlinearity = tanh is supported\")\n if module.bias is not True:\n raise NotImplementedError(\"only bias = True is supported\")\n if not module.dropout == 0:\n raise NotImplementedError(\"only dropout = 0 is supported\")\n if module.bidirectional is not False:\n raise NotImplementedError(\"only bidirectional = False is supported\")\n\n def hessian_is_zero(self, module: RNN) -> bool: # noqa: D102\n return False\n\n @classmethod\n def _a_jac_t_mat_prod(\n cls,\n module: RNN,\n weight_hh_l0: Tensor,\n mat: Tensor,\n subsampling: List[int] = None,\n ) -> Tensor:\n \"\"\"Calculates jacobian vector product wrt a.\n\n Args:\n module: RNN module\n weight_hh_l0: weight matrix hidden-to-hidden\n mat: matrix to multiply\n subsampling: subsampling\n\n Returns:\n jacobian vector product wrt a\n \"\"\"\n V, N, T, H = mat.shape\n output = subsample(module.output, dim=0, subsampling=subsampling)\n a_jac_t_mat_prod: Tensor = zeros(V, N, T, H, device=mat.device, dtype=mat.dtype)\n for t in reversed(range(T)):\n if t == (T - 1):\n a_jac_t_mat_prod[:, :, t] = einsum(\n \"vnh,nh->vnh\", mat[:, :, t], 1 - output[:, t] ** 2\n )\n else:\n a_jac_t_mat_prod[:, :, t] = einsum(\n \"vnh,nh->vnh\",\n mat[:, :, t]\n + einsum(\n \"vng,gh->vnh\",\n a_jac_t_mat_prod[:, :, t + 1],\n weight_hh_l0,\n ),\n 1 - output[:, t] ** 2,\n )\n return a_jac_t_mat_prod\n\n def _jac_t_mat_prod(\n self,\n module: RNN,\n g_inp: Tuple[Tensor],\n g_out: Tuple[Tensor],\n mat: Tensor,\n subsampling: List[int] = None,\n ) -> Tensor:\n self._check_parameters(module)\n return einsum(\n f\"vnth,hk->v{'nt' if module.batch_first else 'tn'}k\",\n self._a_jac_t_mat_prod(\n module,\n module.weight_hh_l0,\n mat,\n subsampling,\n ),\n module.weight_ih_l0,\n )\n\n def _jac_mat_prod(\n self, module: RNN, g_inp: Tuple[Tensor], g_out: Tuple[Tensor], mat: Tensor\n ) -> Tensor:\n self._check_parameters(module)\n H: int = module.hidden_size\n V, N, T, _ = mat.shape\n _jac_mat_prod: Tensor = zeros(V, N, T, H, device=mat.device, dtype=mat.dtype)\n for t in range(T):\n if t == 0:\n _jac_mat_prod[:, :, t] = einsum(\n \"nh,hi,vni->vnh\",\n 1 - module.output[:, t] ** 2,\n module.weight_ih_l0,\n mat[:, :, t],\n )\n else:\n _jac_mat_prod[:, :, t] = einsum(\n \"nh,vnh->vnh\",\n 1 - module.output[:, t] ** 2,\n einsum(\n \"hi,vni->vnh\",\n module.weight_ih_l0,\n mat[:, :, t],\n )\n + einsum(\n \"hk,vnk->vnh\",\n module.weight_hh_l0,\n _jac_mat_prod[:, :, t - 1],\n ),\n )\n return _jac_mat_prod\n\n def _bias_ih_l0_jac_t_mat_prod(\n self,\n module: RNN,\n g_inp: Tuple[Tensor],\n g_out: Tuple[Tensor],\n mat: Tensor,\n sum_batch: bool = True,\n subsampling: List[int] = None,\n ) -> Tensor:\n \"\"\"Apply transposed Jacobian of the output w.r.t. bias_ih_l0.\n\n Args:\n module: extended module\n g_inp: input gradient\n g_out: output gradient\n mat: matrix to multiply\n sum_batch: Whether to sum along batch axis. Defaults to True.\n subsampling: Indices of active samples. Defaults to ``None`` (all samples).\n\n Returns:\n product\n \"\"\"\n self._check_parameters(module)\n if sum_batch:\n dim: List[int] = [1, 2]\n else:\n dim: int = 2\n return self._a_jac_t_mat_prod(\n module,\n module.weight_hh_l0,\n mat,\n subsampling,\n ).sum(dim=dim)\n\n def _bias_hh_l0_jac_t_mat_prod(\n self,\n module: RNN,\n g_inp: Tuple[Tensor],\n g_out: Tuple[Tensor],\n mat: Tensor,\n sum_batch: bool = True,\n subsampling: List[int] = None,\n ) -> Tensor:\n \"\"\"Apply transposed Jacobian of the output w.r.t. bias_hh_l0.\n\n Args:\n module: extended module\n g_inp: input gradient\n g_out: output gradient\n mat: matrix to multiply\n sum_batch: Whether to sum along batch axis. Defaults to True.\n subsampling: Indices of active samples. Defaults to ``None`` (all samples).\n\n Returns:\n product\n \"\"\"\n return self._bias_ih_l0_jac_t_mat_prod(\n module, g_inp, g_out, mat, sum_batch=sum_batch, subsampling=subsampling\n )\n\n def _weight_ih_l0_jac_t_mat_prod(\n self,\n module: RNN,\n g_inp: Tuple[Tensor],\n g_out: Tuple[Tensor],\n mat: Tensor,\n sum_batch: bool = True,\n subsampling: List[int] = None,\n ) -> Tensor:\n \"\"\"Apply transposed Jacobian of the output w.r.t. weight_ih_l0.\n\n Args:\n module: extended module\n g_inp: input gradient\n g_out: output gradient\n mat: matrix to multiply\n sum_batch: Whether to sum along batch axis. Defaults to True.\n subsampling: Indices of active samples. Defaults to ``None`` (all samples).\n\n Returns:\n product\n \"\"\"\n self._check_parameters(module)\n return einsum(\n f\"vnth,ntj->v{'' if sum_batch else 'n'}hj\",\n self._a_jac_t_mat_prod(module, module.weight_hh_l0, mat, subsampling),\n subsample(module.input0, dim=0, subsampling=subsampling),\n )\n\n def _weight_hh_l0_jac_t_mat_prod(\n self,\n module: RNN,\n g_inp: Tuple[Tensor],\n g_out: Tuple[Tensor],\n mat: Tensor,\n sum_batch: bool = True,\n subsampling: List[int] = None,\n ) -> Tensor:\n \"\"\"Apply transposed Jacobian of the output w.r.t. weight_hh_l0.\n\n Args:\n module: extended module\n g_inp: input gradient\n g_out: output gradient\n mat: matrix to multiply\n sum_batch: Whether to sum along batch axis. Defaults to True.\n subsampling: Indices of active samples. Defaults to ``None`` (all samples).\n\n Returns:\n product\n \"\"\"\n self._check_parameters(module)\n _, N, _, H = mat.shape\n output = subsample(module.output, dim=0, subsampling=subsampling)\n single_step = zeros(N, 1, H, device=mat.device, dtype=mat.dtype)\n output_shifted = cat([single_step, output[:, :-1]], dim=1)\n return einsum(\n f\"vnth,ntk->v{'' if sum_batch else 'n'}hk\",\n self._a_jac_t_mat_prod(module, module.weight_hh_l0, mat, subsampling),\n output_shifted,\n )\n", "\"\"\"Test configurations for `backpack.core.derivatives` BatchNorm layers.\n\nRequired entries:\n \"module_fn\" (callable): Contains a model constructed from `torch.nn` layers\n \"input_fn\" (callable): Used for specifying input function\n\nOptional entries:\n \"target_fn\" (callable): Fetches the groundtruth/target classes\n of regression/classification task\n \"loss_function_fn\" (callable): Loss function used in the model\n \"device\" [list(torch.device)]: List of devices to run the test on.\n \"id_prefix\" (str): Prefix to be included in the test name.\n \"seed\" (int): seed for the random number for torch.rand\n\"\"\"\nfrom test.utils.evaluation_mode import initialize_batch_norm_eval\n\nfrom torch import rand\nfrom torch.nn import BatchNorm1d, BatchNorm2d, BatchNorm3d\n\nBATCH_NORM_SETTINGS = [\n {\n \"module_fn\": lambda: BatchNorm1d(num_features=7),\n \"input_fn\": lambda: rand(size=(5, 7)),\n },\n {\n \"module_fn\": lambda: BatchNorm1d(num_features=7),\n \"input_fn\": lambda: rand(size=(5, 7, 4)),\n },\n {\n \"module_fn\": lambda: BatchNorm2d(num_features=7),\n \"input_fn\": lambda: rand(size=(5, 7, 3, 4)),\n },\n {\n \"module_fn\": lambda: BatchNorm3d(num_features=3),\n \"input_fn\": lambda: rand(size=(5, 3, 3, 4, 2)),\n },\n {\n \"module_fn\": lambda: initialize_batch_norm_eval(BatchNorm1d(num_features=7)),\n \"input_fn\": lambda: rand(size=(5, 7)),\n \"id_prefix\": \"training=False\",\n },\n {\n \"module_fn\": lambda: initialize_batch_norm_eval(BatchNorm1d(num_features=7)),\n \"input_fn\": lambda: rand(size=(5, 7, 4)),\n \"id_prefix\": \"training=False\",\n },\n {\n \"module_fn\": lambda: initialize_batch_norm_eval(BatchNorm2d(num_features=7)),\n \"input_fn\": lambda: rand(size=(5, 7, 3, 4)),\n \"id_prefix\": \"training=False\",\n },\n {\n \"module_fn\": lambda: initialize_batch_norm_eval(BatchNorm3d(num_features=7)),\n \"input_fn\": lambda: rand(size=(5, 7, 3, 4, 2)),\n \"id_prefix\": \"training=False\",\n },\n]\n", "\"\"\"Contains batch_l2 extension for Linear.\"\"\"\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Tuple\n\nfrom torch import Tensor, einsum\nfrom torch.nn import Linear\n\nfrom backpack.core.derivatives.linear import LinearDerivatives\nfrom backpack.extensions.firstorder.batch_l2_grad.batch_l2_base import BatchL2Base\n\nif TYPE_CHECKING:\n from backpack.extensions import BatchL2Grad\n\n\nclass BatchL2Linear(BatchL2Base):\n \"\"\"batch_l2 extension for Linear.\"\"\"\n\n def __init__(self):\n \"\"\"Initialization.\"\"\"\n super().__init__([\"bias\", \"weight\"], derivatives=LinearDerivatives())\n\n def weight(\n self,\n ext: BatchL2Grad,\n module: Linear,\n g_inp: Tuple[Tensor],\n g_out: Tuple[Tensor],\n backproped: None,\n ) -> Tensor:\n \"\"\"batch_l2 for weight.\n\n Args:\n ext: extension\n module: module\n g_inp: input gradients\n g_out: output gradients\n backproped: backpropagation quantities\n\n Returns:\n batch_l2 for weight\n \"\"\"\n has_additional_axes = g_out[0].dim() > 2\n\n if has_additional_axes:\n # TODO Compare `torch.einsum`, `opt_einsum.contract` and the base class\n # implementation: https://github.com/fKunstner/backpack-discuss/issues/111\n dE_dY = g_out[0].flatten(start_dim=1, end_dim=-2)\n X = module.input0.flatten(start_dim=1, end_dim=-2)\n return einsum(\"nmi,nmj,nki,nkj->n\", dE_dY, X, dE_dY, X)\n else:\n return einsum(\"ni,nj->n\", g_out[0] ** 2, module.input0 ** 2)\n", "\"\"\"Test configurations for `backpack.test.utils` for Transpose CONVOLUTIONal layers\nRequired entries:\n \"module_fn\" (callable): Contains a model constructed from `torch.nn` layers\n \"input_fn\" (callable): Used for specifying input function\n\nOptional entries:\n \"target_fn\" (callable): Fetches the groundtruth/target classes \n of regression/classification task\n \"loss_function_fn\" (callable): Loss function used in the model\n \"device\" [list(torch.device)]: List of devices to run the test on.\n \"id_prefix\" (str): Prefix to be included in the test name.\n \"seed\" (int): seed for the random number for torch.rand\n\"\"\"\nimport torch\n\nSETTINGS = []\n\n###############################################################################\n# example #\n###############################################################################\nexample = {\n \"module_fn\": lambda: torch.nn.ConvTranspose2d(\n in_channels=2,\n out_channels=3,\n kernel_size=2,\n bias=False,\n padding=1,\n stride=2,\n dilation=2,\n ),\n \"input_fn\": lambda: torch.rand(size=(3, 2, 7, 7)),\n \"device\": [torch.device(\"cpu\")], # optional\n \"seed\": 0, # optional\n \"id_prefix\": \"conv-example\", # optional\n}\nSETTINGS.append(example)\n\n###############################################################################\n# test settings #\n###############################################################################\n\nSETTINGS += [\n {\n \"module_fn\": lambda: torch.nn.ConvTranspose1d(\n in_channels=2, out_channels=3, kernel_size=2, padding=1, bias=False\n ),\n \"input_fn\": lambda: torch.rand(size=(3, 2, 7)),\n },\n {\n \"module_fn\": lambda: torch.nn.ConvTranspose1d(\n in_channels=2,\n out_channels=3,\n kernel_size=2,\n padding=1,\n stride=2,\n dilation=2,\n bias=False,\n ),\n \"input_fn\": lambda: torch.rand(size=(3, 2, 11)),\n },\n {\n \"module_fn\": lambda: torch.nn.ConvTranspose1d(\n in_channels=2,\n out_channels=4,\n kernel_size=2,\n padding=1,\n groups=2,\n stride=2,\n bias=False,\n ),\n \"input_fn\": lambda: torch.rand(size=(3, 2, 11)),\n },\n {\n \"module_fn\": lambda: torch.nn.ConvTranspose1d(\n in_channels=3,\n out_channels=6,\n kernel_size=2,\n padding=1,\n stride=2,\n groups=3,\n bias=False,\n ),\n \"input_fn\": lambda: torch.rand(size=(3, 3, 11)),\n },\n {\n \"module_fn\": lambda: torch.nn.ConvTranspose2d(\n in_channels=3,\n out_channels=6,\n kernel_size=2,\n padding=2,\n bias=False,\n padding_mode=\"zeros\",\n stride=4,\n dilation=3,\n ),\n \"input_fn\": lambda: torch.rand(size=(1, 3, 8, 8)),\n },\n {\n \"module_fn\": lambda: torch.nn.ConvTranspose2d(\n in_channels=2,\n out_channels=6,\n kernel_size=2,\n padding=1,\n groups=2,\n bias=False,\n dilation=2,\n ),\n \"input_fn\": lambda: torch.rand(size=(3, 2, 11, 13)),\n },\n {\n \"module_fn\": lambda: torch.nn.ConvTranspose2d(\n in_channels=8,\n out_channels=15,\n kernel_size=(3, 5),\n stride=(2, 1),\n padding=(4, 2),\n bias=False,\n dilation=(3, 1),\n ),\n \"input_fn\": lambda: torch.rand(size=(10, 8, 25, 50)),\n },\n {\n \"module_fn\": lambda: torch.nn.ConvTranspose3d(\n in_channels=2,\n out_channels=3,\n kernel_size=2,\n padding=2,\n bias=False,\n dilation=2,\n stride=2,\n ),\n \"input_fn\": lambda: torch.rand(size=(3, 2, 5, 7, 7)),\n },\n {\n \"module_fn\": lambda: torch.nn.ConvTranspose3d(\n in_channels=3,\n out_channels=6,\n kernel_size=2,\n padding=2,\n padding_mode=\"zeros\",\n stride=4,\n bias=False,\n ),\n \"input_fn\": lambda: torch.rand(size=(1, 3, 3, 4, 4)),\n },\n {\n \"module_fn\": lambda: torch.nn.ConvTranspose3d(\n in_channels=2,\n out_channels=3,\n kernel_size=2,\n padding=1,\n stride=2,\n dilation=2,\n bias=False,\n ),\n \"input_fn\": lambda: torch.rand(size=(3, 2, 5, 13, 17)),\n },\n {\n \"module_fn\": lambda: torch.nn.ConvTranspose3d(\n in_channels=2,\n out_channels=4,\n kernel_size=2,\n padding=1,\n stride=2,\n groups=2,\n bias=False,\n ),\n \"input_fn\": lambda: torch.rand(size=(3, 2, 5, 13, 17)),\n },\n {\n \"module_fn\": lambda: torch.nn.ConvTranspose3d(\n in_channels=3,\n out_channels=6,\n kernel_size=2,\n padding=1,\n stride=2,\n groups=3,\n bias=False,\n ),\n \"input_fn\": lambda: torch.rand(size=(3, 3, 5, 7, 7)),\n },\n]\n" ]
[ [ "torch.var", "torch.zeros_like", "torch.autograd.grad", "torch.cat" ], [ "torch.einsum", "torch.cat", "torch.zeros" ], [ "torch.nn.BatchNorm1d", "torch.nn.BatchNorm3d", "torch.rand", "torch.nn.BatchNorm2d" ], [ "torch.einsum" ], [ "torch.nn.ConvTranspose2d", "torch.nn.ConvTranspose3d", "torch.rand", "torch.device", "torch.nn.ConvTranspose1d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
josh-gree/NumericalMethods
[ "03cb91114b3f5eb1b56916920ad180d371fe5283" ]
[ "Lectures/tex/codes/lecture9.py" ]
[ "import numpy\n\ndef functional_iteration(f, x0, max_steps=100, tol=1e-10):\n x = numpy.zeros(max_steps+1)\n x[0] = x0\n step = 0\n g = lambda x : x - f(x)\n while abs(f(x[step])) > tol and step < max_steps:\n step = step + 1\n x[step] = g(x[step-1])\n return x[:step+1]\n \nif __name__==\"__main__\":\n def F(x):\n return 1/2*x**2 - 1/52*x**4 - 72/52*x\n def dF(x, dx):\n return (F(x+dx) - F(x)) / dx\n \n f_1em6 = lambda x : dF(x, 1e-6)\n x_df_6 = functional_iteration(f_1em6, 1)\n print(\"Root: \", x_df_6[-1], \"iterations\", len(x_df_6))\n f_1em1 = lambda x : dF(x, 1e-1)\n x_df_1 = functional_iteration(f_1em1, 1)\n print(\"Root: \", x_df_1[-1], \"iterations\", len(x_df_1))\n f_5em1 = lambda x : dF(x, 5e-1)\n x_df_5 = functional_iteration(f_5em1, 1)\n print(\"Root: \", x_df_5[-1], \"iterations\", len(x_df_5))" ]
[ [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
plasmapotential/HEAT
[ "bee84ceffbfc022cdc202ef67c87e469ff6b9e91" ]
[ "source/GUIscripts/plot2DEQ.py" ]
[ "#plotEQ.py\n#Description: Plots Equilibrium (2D) from gfile for pyqt5 application\n#Engineer: T Looby\n#Date: 20190916\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport MDSplus\nimport EFIT.equilParams_class as EP\nfrom scipy import interpolate\nfrom scipy.interpolate import interp1d\n\ndef EQ2Dplot(ep,shot,t,MachFlag,height=None):\n #Seperatrix\n rbdry = ep.g['lcfs'][:,0]\n zbdry = ep.g['lcfs'][:,1]\n if MachFlag == 'nstx':\n rlim, zlim = nstxu_wall(oldwall=False) #FOR NSTXU\n else:\n rlim = ep.g['wall'][:,0]\n zlim = ep.g['wall'][:,1]\n #Change the linspace to get higher psi_n (outside wall)\n psi = ep.g['psiRZn']\n #psi = ep.g['psiRZ']\n levels = sorted(np.append([0.0,0.05,0.1,0.25,0.5,0.75,1.0], np.linspace(1.01,psi.max(),15)))\n R, Z = np.meshgrid(ep.g['R'],ep.g['Z'])\n #psiMax = ep.psiFunc.ev(max(rlim),0.0)\n psiMax = psi.max()\n commonLev = np.linspace(1.0,psiMax,15)\n lcfs = [1.0]\n if height is None:\n plt.figure(figsize=(5,8))\n else:\n dpi = 80\n #w = width / dpi\n #h = 8.0/5.0 * w\n h = height / dpi\n w = 5.0/8.0 * h\n if w % 2 == 0:\n pass\n else:\n w=w+1\n plt.figure(figsize=(w,h), dpi=dpi)\n #Color Contour Plot\n #CS = plt.contourf(R,Z,psi,levels,cmap=plt.cm.bone)\n CS = plt.contourf(R,Z,psi,levels,cmap=plt.cm.cividis)\n #Draw Flux Lines in Common Flux Region\n plt.contour(CS, levels = commonLev, colors=('white',),linestyles='dotted',linewidths=(1,))\n #Draw separatrix as red line\n plt.contour(CS, levels = lcfs, colors=('r',),linestyles=('-',),linewidths=(2,))\n plt.axes().set_aspect('equal')\n #ax.set_aspect('equal')\n plt.xlabel('R [m]', fontsize=22,color='w')\n plt.ylabel('Z [m]', fontsize=22,color='w')\n plt.tick_params(axis='both',colors='w')\n #plt.xlim(min(rlim)-0.02,1.6)\n# plt.xlim(0.0,1.6)\n# plt.ylim(-1.7,1.7)\n plt.title(\"{:06d} @ {:05d}ms\".format(shot,t), fontsize=22, color='white')\n #plt.colorbar(CS, label=r'$\\psi$')\n #Fill in missing limiter section\n rlim_patched = np.append(rlim[2:], rlim[2])\n zlim_patched = np.append(zlim[2:], zlim[2])\n #plt.plot(rlim_patched, zlim_patched,'k--')\n plt.plot(rlim, zlim, '--', color='lime', lw=2)\n\n return plt\n\n\ndef nstxu_wall(oldwall=False):\n \"\"\"\n returns simplified wall. Uses two different wall versions\n \"\"\"\n if oldwall:\n R = np.array([0.1851, 0.1851, 0.2794, 0.2794, 0.2979, 0.5712,\n 1.0433, 1.3192, 1.3358,\n 1.4851, 1.4791, 1.5174, 1.5313, 1.5464, 1.5608,\n 1.567, 1.5657, 1.5543, 1.5341, 1.5181, 1.4818,\n 1.4851, 1.3358, 1.3192, 1.0433,\n 0.5712, 0.2979, 0.2794, 0.2794, 0.1851, 0.1851])\n Z = np.array([0.0, 1.0081, 1.1714, 1.578, 1.6034, 1.6034,\n 1.43, 1.0397, 0.9976,\n 0.545, 0.4995, 0.306, 0.2355, 0.1586, 0.0801,\n 0.0, -0.0177, -0.1123, -0.221, -0.3026, -0.486,\n -0.545, -0.9976, -1.0397, -1.43,\n -1.6034, -1.6034, -1.578, -1.1714, -1.0081, 0])\n else:\n R = np.array([ 0.3147568, 0.3147568, 0.4441952, 0.4441952, 0.443484 ,\n 0.443484 , 0.6000496, 0.7672832, 0.8499856, 1.203452, 1.3192, 1.3358, 1.4851, 1.489 ,\n 1.5638, 1.57 , 1.5737, 1.575 , 1.5737, 1.57 , 1.5638,\n 1.489 , 1.4851, 1.3358, 1.3192, 1.203452 , 0.8499856, 0.7672832, 0.6000496, 0.443484 ,\n 0.443484 , 0.4441952, 0.4441952, 0.3147568, 0.3147568 ])\n Z = np.array([ 0. , 1.0499344, 1.2899136, 1.5104872, 1.5104872,\n 1.6028416, 1.6028416, 1.5367 , 1.5367 , 1.397508, 1.0397, 0.9976, 0.545 , 0.49 ,\n 0.1141, 0.0764, 0.0383, 0. , -0.0383, -0.0764, -0.1141,\n -0.49 , -0.545 , -0.9976, -1.0397, -1.397508 , -1.5367 , -1.5367 , -1.6028416, -1.6028416,\n -1.5104872, -1.5104872, -1.2899136, -1.0499344, 0.])\n return R,Z\n" ]
[ [ "matplotlib.pyplot.contourf", "numpy.linspace", "matplotlib.pyplot.figure", "matplotlib.pyplot.plot", "matplotlib.pyplot.axes", "numpy.append", "matplotlib.pyplot.contour", "matplotlib.pyplot.xlabel", "numpy.array", "numpy.meshgrid", "matplotlib.pyplot.tick_params", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Tarandro/TransTrack
[ "81c12d0737125052f3eb2773ac47be60144f6ccb", "81c12d0737125052f3eb2773ac47be60144f6ccb" ]
[ "datasets/data_prefetcher.py", "datasets/crowdhuman.py" ]
[ "# ------------------------------------------------------------------------\r\n# Deformable DETR\r\n# Copyright (c) 2020 SenseTime. All Rights Reserved.\r\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\r\n# ------------------------------------------------------------------------\r\nimport torch\r\n\r\ndef to_cuda(samples, targets, device):\r\n samples = samples.to(device, non_blocking=True)\r\n targets = [{k: v.to(device, non_blocking=True) for k, v in t.items()} for t in targets]\r\n return samples, targets\r\n\r\nclass data_prefetcher():\r\n def __init__(self, loader, device, prefetch=True):\r\n self.loader = iter(loader)\r\n self.prefetch = prefetch\r\n self.device = device\r\n if prefetch:\r\n self.stream = torch.cuda.Stream()\r\n self.preload()\r\n\r\n def preload(self):\r\n try:\r\n self.next_samples, self.next_targets = next(self.loader)\r\n except StopIteration:\r\n self.next_samples = None\r\n self.next_targets = None\r\n return\r\n # if record_stream() doesn't work, another option is to make sure device inputs are created\r\n # on the main stream.\r\n # self.next_input_gpu = torch.empty_like(self.next_input, device='cuda')\r\n # self.next_target_gpu = torch.empty_like(self.next_target, device='cuda')\r\n # Need to make sure the memory allocated for next_* is not still in use by the main stream\r\n # at the time we start copying to next_*:\r\n # self.stream.wait_stream(torch.cuda.current_stream())\r\n with torch.cuda.stream(self.stream):\r\n self.next_samples, self.next_targets = to_cuda(self.next_samples, self.next_targets, self.device)\r\n # more code for the alternative if record_stream() doesn't work:\r\n # copy_ will record the use of the pinned source tensor in this side stream.\r\n # self.next_input_gpu.copy_(self.next_input, non_blocking=True)\r\n # self.next_target_gpu.copy_(self.next_target, non_blocking=True)\r\n # self.next_input = self.next_input_gpu\r\n # self.next_target = self.next_target_gpu\r\n\r\n # With Amp, it isn't necessary to manually convert data to half.\r\n # if args.fp16:\r\n # self.next_input = self.next_input.half()\r\n # else:\r\n\r\n def next(self):\r\n if self.prefetch:\r\n torch.cuda.current_stream().wait_stream(self.stream)\r\n samples = self.next_samples\r\n targets = self.next_targets\r\n if samples is not None:\r\n samples.record_stream(torch.cuda.current_stream())\r\n if targets is not None:\r\n for t in targets:\r\n for k, v in t.items():\r\n v.record_stream(torch.cuda.current_stream())\r\n self.preload()\r\n else:\r\n try:\r\n samples, targets = next(self.loader)\r\n samples, targets = to_cuda(samples, targets, self.device)\r\n except StopIteration:\r\n samples = None\r\n targets = None\r\n return samples, targets\r\n", " # Modified by Peize Sun, Rufeng Zhang\n# ------------------------------------------------------------------------\n# Modified from DETR (https://github.com/facebookresearch/detr)\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# ------------------------------------------------------------------------\n\"\"\"\nCOCO dataset which returns image_id for evaluation.\n\nMostly copy-paste from https://github.com/pytorch/vision/blob/13b35ff/references/detection/coco_utils.py\n\"\"\"\nfrom pathlib import Path\n\nimport torch\nimport torch.utils.data\nfrom pycocotools import mask as coco_mask\n\nfrom .torchvision_datasets import CocoDetection as TvCocoDetection\nfrom util.misc import get_local_rank, get_local_size\nimport datasets.transforms as T\n\n\nclass CocoDetection(TvCocoDetection):\n def __init__(self, img_folder, ann_file, transforms, return_masks, cache_mode=False, local_rank=0, local_size=1):\n super(CocoDetection, self).__init__(img_folder, ann_file,\n cache_mode=cache_mode, local_rank=local_rank, local_size=local_size)\n self._transforms = transforms\n self.prepare = ConvertCocoPolysToMask(return_masks)\n\n def __getitem__(self, idx):\n img, target = super(CocoDetection, self).__getitem__(idx)\n image_id = self.ids[idx]\n target = {'image_id': image_id, 'annotations': target}\n img, target = self.prepare(img, target)\n if self._transforms is not None:\n img, target = self._transforms(img, target)\n return img, target\n\n\ndef convert_coco_poly_to_mask(segmentations, height, width):\n masks = []\n for polygons in segmentations:\n rles = coco_mask.frPyObjects(polygons, height, width)\n mask = coco_mask.decode(rles)\n if len(mask.shape) < 3:\n mask = mask[..., None]\n mask = torch.as_tensor(mask, dtype=torch.uint8)\n mask = mask.any(dim=2)\n masks.append(mask)\n if masks:\n masks = torch.stack(masks, dim=0)\n else:\n masks = torch.zeros((0, height, width), dtype=torch.uint8)\n return masks\n\n\nclass ConvertCocoPolysToMask(object):\n def __init__(self, return_masks=False):\n self.return_masks = return_masks\n\n def __call__(self, image, target):\n w, h = image.size\n\n image_id = target[\"image_id\"]\n image_id = torch.tensor([image_id])\n\n anno = target[\"annotations\"]\n\n anno = [obj for obj in anno if 'iscrowd' not in obj or obj['iscrowd'] == 0]\n\n boxes = [obj[\"bbox\"] for obj in anno]\n # guard against no boxes via resizing\n boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4)\n boxes[:, 2:] += boxes[:, :2]\n #boxes[:, 0::2].clamp_(min=0, max=w)\n #boxes[:, 1::2].clamp_(min=0, max=h)\n\n classes = [obj[\"category_id\"] for obj in anno]\n classes = torch.tensor(classes, dtype=torch.int64)\n\n if self.return_masks:\n segmentations = [obj[\"segmentation\"] for obj in anno]\n masks = convert_coco_poly_to_mask(segmentations, h, w)\n\n keypoints = None\n if anno and \"keypoints\" in anno[0]:\n keypoints = [obj[\"keypoints\"] for obj in anno]\n keypoints = torch.as_tensor(keypoints, dtype=torch.float32)\n num_keypoints = keypoints.shape[0]\n if num_keypoints:\n keypoints = keypoints.view(num_keypoints, -1, 3)\n\n keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])\n boxes = boxes[keep]\n classes = classes[keep]\n if self.return_masks:\n masks = masks[keep]\n if keypoints is not None:\n keypoints = keypoints[keep]\n\n target = {}\n target[\"boxes\"] = boxes\n target[\"labels\"] = classes\n if self.return_masks:\n target[\"masks\"] = masks\n target[\"image_id\"] = image_id\n if keypoints is not None:\n target[\"keypoints\"] = keypoints\n\n # for conversion to coco api\n area = torch.tensor([obj[\"area\"] for obj in anno])\n iscrowd = torch.tensor([obj[\"iscrowd\"] if \"iscrowd\" in obj else 0 for obj in anno])\n target[\"area\"] = area[keep]\n target[\"iscrowd\"] = iscrowd[keep]\n\n target[\"orig_size\"] = torch.as_tensor([int(h), int(w)])\n target[\"size\"] = torch.as_tensor([int(h), int(w)])\n\n return image, target\n\n\ndef make_coco_transforms(image_set):\n\n normalize = T.Compose([\n T.ToTensor(),\n T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n\n scales = [480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800]\n\n if image_set == 'train':\n return T.Compose([\n T.RandomHorizontalFlip(),\n T.RandomSelect(\n T.RandomResize(scales, max_size=1333),\n T.Compose([\n T.RandomResize([400, 500, 600]),\n T.RandomSizeCrop(384, 600),\n T.RandomResize(scales, max_size=1333),\n ])\n ),\n normalize,\n ])\n\n if image_set == 'val':\n return T.Compose([\n T.RandomResize([800], max_size=1333),\n normalize,\n ])\n\n raise ValueError(f'unknown {image_set}')\n\n\ndef build(image_set, args):\n root = Path(args.coco_path)\n assert root.exists(), f'provided Crowdhuman path {root} does not exist'\n mode = 'instances'\n PATHS = {\n \"train\": (root / \"train\", root / \"annotations\" / 'train.json'),\n \"val\": (root / \"val\", root / \"annotations\" / 'val.json'),\n }\n\n img_folder, ann_file = PATHS[image_set]\n dataset = CocoDetection(img_folder, ann_file, transforms=make_coco_transforms(image_set), return_masks=args.masks,\n cache_mode=args.cache_mode, local_rank=get_local_rank(), local_size=get_local_size())\n return dataset\n" ]
[ [ "torch.cuda.current_stream", "torch.cuda.Stream", "torch.cuda.stream" ], [ "torch.stack", "torch.tensor", "torch.zeros", "torch.as_tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
patrickeganfoley/pyro
[ "3bd5e099e85f3686c66fc3b53476c3b009a77a02", "3bd5e099e85f3686c66fc3b53476c3b009a77a02", "3bd5e099e85f3686c66fc3b53476c3b009a77a02", "3bd5e099e85f3686c66fc3b53476c3b009a77a02", "3bd5e099e85f3686c66fc3b53476c3b009a77a02", "3bd5e099e85f3686c66fc3b53476c3b009a77a02", "3bd5e099e85f3686c66fc3b53476c3b009a77a02" ]
[ "pyro/infer/importance.py", "pyro/poutine/enum_messenger.py", "examples/sparse_regression.py", "tests/contrib/easyguide/test_easyguide.py", "examples/air/modules.py", "tests/infer/test_smcfilter.py", "pyro/infer/trace_mmd.py" ]
[ "# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nimport math\nimport warnings\n\nimport torch\n\nimport pyro\nimport pyro.poutine as poutine\nfrom pyro.ops.stats import fit_generalized_pareto\n\nfrom .abstract_infer import TracePosterior\nfrom .enum import get_importance_trace\n\n\nclass Importance(TracePosterior):\n \"\"\"\n :param model: probabilistic model defined as a function\n :param guide: guide used for sampling defined as a function\n :param num_samples: number of samples to draw from the guide (default 10)\n\n This method performs posterior inference by importance sampling\n using the guide as the proposal distribution.\n If no guide is provided, it defaults to proposing from the model's prior.\n \"\"\"\n\n def __init__(self, model, guide=None, num_samples=None):\n \"\"\"\n Constructor. default to num_samples = 10, guide = model\n \"\"\"\n super().__init__()\n if num_samples is None:\n num_samples = 10\n warnings.warn(\"num_samples not provided, defaulting to {}\".format(num_samples))\n if guide is None:\n # propose from the prior by making a guide from the model by hiding observes\n guide = poutine.block(model, hide_types=[\"observe\"])\n self.num_samples = num_samples\n self.model = model\n self.guide = guide\n\n def _traces(self, *args, **kwargs):\n \"\"\"\n Generator of weighted samples from the proposal distribution.\n \"\"\"\n for i in range(self.num_samples):\n guide_trace = poutine.trace(self.guide).get_trace(*args, **kwargs)\n model_trace = poutine.trace(\n poutine.replay(self.model, trace=guide_trace)).get_trace(*args, **kwargs)\n log_weight = model_trace.log_prob_sum() - guide_trace.log_prob_sum()\n yield (model_trace, log_weight)\n\n def get_log_normalizer(self):\n \"\"\"\n Estimator of the normalizing constant of the target distribution.\n (mean of the unnormalized weights)\n \"\"\"\n # ensure list is not empty\n if self.log_weights:\n log_w = torch.tensor(self.log_weights)\n log_num_samples = torch.log(torch.tensor(self.num_samples * 1.))\n return torch.logsumexp(log_w - log_num_samples, 0)\n else:\n warnings.warn(\"The log_weights list is empty, can not compute normalizing constant estimate.\")\n\n def get_normalized_weights(self, log_scale=False):\n \"\"\"\n Compute the normalized importance weights.\n \"\"\"\n if self.log_weights:\n log_w = torch.tensor(self.log_weights)\n log_w_norm = log_w - torch.logsumexp(log_w, 0)\n return log_w_norm if log_scale else torch.exp(log_w_norm)\n else:\n warnings.warn(\"The log_weights list is empty. There is nothing to normalize.\")\n\n def get_ESS(self):\n \"\"\"\n Compute (Importance Sampling) Effective Sample Size (ESS).\n \"\"\"\n if self.log_weights:\n log_w_norm = self.get_normalized_weights(log_scale=True)\n ess = torch.exp(-torch.logsumexp(2*log_w_norm, 0))\n else:\n warnings.warn(\"The log_weights list is empty, effective sample size is zero.\")\n ess = 0\n return ess\n\n\ndef vectorized_importance_weights(model, guide, *args, **kwargs):\n \"\"\"\n :param model: probabilistic model defined as a function\n :param guide: guide used for sampling defined as a function\n :param num_samples: number of samples to draw from the guide (default 1)\n :param int max_plate_nesting: Bound on max number of nested :func:`pyro.plate` contexts.\n :param bool normalized: set to True to return self-normalized importance weights\n :returns: returns a ``(num_samples,)``-shaped tensor of importance weights\n and the model and guide traces that produced them\n\n Vectorized computation of importance weights for models with static structure::\n\n log_weights, model_trace, guide_trace = \\\\\n vectorized_importance_weights(model, guide, *args,\n num_samples=1000,\n max_plate_nesting=4,\n normalized=False)\n \"\"\"\n num_samples = kwargs.pop(\"num_samples\", 1)\n max_plate_nesting = kwargs.pop(\"max_plate_nesting\", None)\n normalized = kwargs.pop(\"normalized\", False)\n\n if max_plate_nesting is None:\n raise ValueError(\"must provide max_plate_nesting\")\n max_plate_nesting += 1\n\n def vectorize(fn):\n def _fn(*args, **kwargs):\n with pyro.plate(\"num_particles_vectorized\", num_samples, dim=-max_plate_nesting):\n return fn(*args, **kwargs)\n return _fn\n\n model_trace, guide_trace = get_importance_trace(\n \"flat\", max_plate_nesting, vectorize(model), vectorize(guide), args, kwargs)\n\n guide_trace.pack_tensors()\n model_trace.pack_tensors(guide_trace.plate_to_symbol)\n\n if num_samples == 1:\n log_weights = model_trace.log_prob_sum() - guide_trace.log_prob_sum()\n else:\n wd = guide_trace.plate_to_symbol[\"num_particles_vectorized\"]\n log_weights = 0.\n for site in model_trace.nodes.values():\n if site[\"type\"] != \"sample\":\n continue\n log_weights += torch.einsum(site[\"packed\"][\"log_prob\"]._pyro_dims + \"->\" + wd,\n [site[\"packed\"][\"log_prob\"]])\n\n for site in guide_trace.nodes.values():\n if site[\"type\"] != \"sample\":\n continue\n log_weights -= torch.einsum(site[\"packed\"][\"log_prob\"]._pyro_dims + \"->\" + wd,\n [site[\"packed\"][\"log_prob\"]])\n\n if normalized:\n log_weights = log_weights - torch.logsumexp(log_weights)\n return log_weights, model_trace, guide_trace\n\n\[email protected]_grad()\ndef psis_diagnostic(model, guide, *args, **kwargs):\n \"\"\"\n Computes the Pareto tail index k for a model/guide pair using the technique\n described in [1], which builds on previous work in [2]. If :math:`0 < k < 0.5`\n the guide is a good approximation to the model posterior, in the sense\n described in [1]. If :math:`0.5 \\\\le k \\\\le 0.7`, the guide provides a suboptimal\n approximation to the posterior, but may still be useful in practice. If\n :math:`k > 0.7` the guide program provides a poor approximation to the full\n posterior, and caution should be used when using the guide. Note, however,\n that a guide may be a poor fit to the full posterior while still yielding\n reasonable model predictions. If :math:`k < 0.0` the importance weights\n corresponding to the model and guide appear to be bounded from above; this\n would be a bizarre outcome for a guide trained via ELBO maximization. Please\n see [1] for a more complete discussion of how the tail index k should be\n interpreted.\n\n Please be advised that a large number of samples may be required for an\n accurate estimate of k.\n\n Note that we assume that the model and guide are both vectorized and have\n static structure. As is canonical in Pyro, the args and kwargs are passed\n to the model and guide.\n\n References\n [1] 'Yes, but Did It Work?: Evaluating Variational Inference.'\n Yuling Yao, Aki Vehtari, Daniel Simpson, Andrew Gelman\n [2] 'Pareto Smoothed Importance Sampling.'\n Aki Vehtari, Andrew Gelman, Jonah Gabry\n\n :param callable model: the model program.\n :param callable guide: the guide program.\n :param int num_particles: the total number of times we run the model and guide in\n order to compute the diagnostic. defaults to 1000.\n :param max_simultaneous_particles: the maximum number of simultaneous samples drawn\n from the model and guide. defaults to `num_particles`. `num_particles` must be\n divisible by `max_simultaneous_particles`. compute the diagnostic. defaults to 1000.\n :param int max_plate_nesting: optional bound on max number of nested :func:`pyro.plate`\n contexts in the model/guide. defaults to 7.\n :returns float: the PSIS diagnostic k\n \"\"\"\n\n num_particles = kwargs.pop('num_particles', 1000)\n max_simultaneous_particles = kwargs.pop('max_simultaneous_particles', num_particles)\n max_plate_nesting = kwargs.pop('max_plate_nesting', 7)\n\n if num_particles % max_simultaneous_particles != 0:\n raise ValueError(\"num_particles must be divisible by max_simultaneous_particles.\")\n\n N = num_particles // max_simultaneous_particles\n log_weights = [vectorized_importance_weights(model, guide, num_samples=max_simultaneous_particles,\n max_plate_nesting=max_plate_nesting,\n *args, **kwargs)[0] for _ in range(N)]\n log_weights = torch.cat(log_weights)\n log_weights -= log_weights.max()\n log_weights = torch.sort(log_weights, descending=False)[0]\n\n cutoff_index = - int(math.ceil(min(0.2 * num_particles, 3.0 * math.sqrt(num_particles)))) - 1\n lw_cutoff = max(math.log(1.0e-15), log_weights[cutoff_index])\n lw_tail = log_weights[log_weights > lw_cutoff]\n\n if len(lw_tail) < 10:\n warnings.warn(\"Not enough tail samples to compute PSIS diagnostic; increase num_particles.\")\n k = float('inf')\n else:\n k, _ = fit_generalized_pareto(lw_tail.exp() - math.exp(lw_cutoff))\n\n return k\n", "# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nimport torch\n\nfrom pyro.distributions import Categorical\nfrom pyro.distributions.torch_distribution import TorchDistributionMixin\nfrom pyro.ops.indexing import Vindex\nfrom pyro.util import ignore_jit_warnings\n\nfrom .messenger import Messenger\nfrom .runtime import _ENUM_ALLOCATOR\n\n\ndef _tmc_mixture_sample(msg):\n dist, num_samples = msg[\"fn\"], msg[\"infer\"].get(\"num_samples\")\n\n # find batch dims that aren't plate dims\n batch_shape = [1] * len(dist.batch_shape)\n for f in msg[\"cond_indep_stack\"]:\n if f.vectorized:\n batch_shape[f.dim] = f.size if f.size > 0 else dist.batch_shape[f.dim]\n batch_shape = tuple(batch_shape)\n\n # sample a batch\n sample_shape = (num_samples,)\n fat_sample = dist(sample_shape=sample_shape) # TODO thin before sampling\n assert fat_sample.shape == sample_shape + dist.batch_shape + dist.event_shape\n assert any(d > 1 for d in fat_sample.shape)\n\n target_shape = (num_samples,) + batch_shape + dist.event_shape\n\n # if this site has any possible ancestors, sample ancestor indices uniformly\n thin_sample = fat_sample\n if thin_sample.shape != target_shape:\n\n index = [Ellipsis] + [slice(None)] * (len(thin_sample.shape) - 1)\n squashed_dims = []\n for squashed_dim, squashed_size in zip(range(1, len(thin_sample.shape)), thin_sample.shape[1:]):\n if squashed_size > 1 and (target_shape[squashed_dim] == 1 or squashed_dim == 0):\n # uniformly sample one ancestor per upstream particle population\n ancestor_dist = Categorical(logits=torch.zeros((squashed_size,), device=thin_sample.device))\n ancestor_index = ancestor_dist.sample(sample_shape=(num_samples,))\n index[squashed_dim] = ancestor_index\n squashed_dims.append(squashed_dim)\n\n thin_sample = Vindex(thin_sample)[tuple(index)]\n for squashed_dim in squashed_dims:\n thin_sample = thin_sample.unsqueeze(squashed_dim)\n\n assert thin_sample.shape == target_shape\n return thin_sample\n\n\ndef _tmc_diagonal_sample(msg):\n dist, num_samples = msg[\"fn\"], msg[\"infer\"].get(\"num_samples\")\n\n # find batch dims that aren't plate dims\n batch_shape = [1] * len(dist.batch_shape)\n for f in msg[\"cond_indep_stack\"]:\n if f.vectorized:\n batch_shape[f.dim] = f.size if f.size > 0 else dist.batch_shape[f.dim]\n batch_shape = tuple(batch_shape)\n\n # sample a batch\n sample_shape = (num_samples,)\n fat_sample = dist(sample_shape=sample_shape) # TODO thin before sampling\n assert fat_sample.shape == sample_shape + dist.batch_shape + dist.event_shape\n assert any(d > 1 for d in fat_sample.shape)\n\n target_shape = (num_samples,) + batch_shape + dist.event_shape\n\n # if this site has any ancestors, choose ancestors from diagonal approximation\n thin_sample = fat_sample\n if thin_sample.shape != target_shape:\n\n index = [Ellipsis] + [slice(None)] * (len(thin_sample.shape) - 1)\n squashed_dims = []\n for squashed_dim, squashed_size in zip(range(1, len(thin_sample.shape)), thin_sample.shape[1:]):\n if squashed_size > 1 and (target_shape[squashed_dim] == 1 or squashed_dim == 0):\n # diagonal approximation: identify particle indices across populations\n ancestor_index = torch.arange(squashed_size, device=thin_sample.device)\n index[squashed_dim] = ancestor_index\n squashed_dims.append(squashed_dim)\n\n thin_sample = Vindex(thin_sample)[tuple(index)]\n for squashed_dim in squashed_dims:\n thin_sample = thin_sample.unsqueeze(squashed_dim)\n\n assert thin_sample.shape == target_shape\n return thin_sample\n\n\ndef enumerate_site(msg):\n dist = msg[\"fn\"]\n num_samples = msg[\"infer\"].get(\"num_samples\", None)\n if num_samples is None:\n # Enumerate over the support of the distribution.\n value = dist.enumerate_support(expand=msg[\"infer\"].get(\"expand\", False))\n elif num_samples > 1 and not msg[\"infer\"].get(\"expand\", False):\n tmc_strategy = msg[\"infer\"].get(\"tmc\", \"diagonal\")\n if tmc_strategy == \"mixture\":\n value = _tmc_mixture_sample(msg)\n elif tmc_strategy == \"diagonal\":\n value = _tmc_diagonal_sample(msg)\n else:\n raise ValueError(\"{} not a valid TMC strategy\".format(tmc_strategy))\n elif num_samples > 1 and msg[\"infer\"][\"expand\"]:\n # Monte Carlo sample the distribution.\n value = dist(sample_shape=(num_samples,))\n assert value.dim() == 1 + len(dist.batch_shape) + len(dist.event_shape)\n return value\n\n\nclass EnumMessenger(Messenger):\n \"\"\"\n Enumerates in parallel over discrete sample sites marked\n ``infer={\"enumerate\": \"parallel\"}``.\n\n :param int first_available_dim: The first tensor dimension (counting\n from the right) that is available for parallel enumeration. This\n dimension and all dimensions left may be used internally by Pyro.\n This should be a negative integer or None.\n \"\"\"\n def __init__(self, first_available_dim=None):\n assert first_available_dim is None or first_available_dim < 0, first_available_dim\n self.first_available_dim = first_available_dim\n super().__init__()\n\n def __enter__(self):\n if self.first_available_dim is not None:\n _ENUM_ALLOCATOR.set_first_available_dim(self.first_available_dim)\n self._markov_depths = {} # site name -> depth (nonnegative integer)\n self._param_dims = {} # site name -> (enum dim -> unique id)\n self._value_dims = {} # site name -> (enum dim -> unique id)\n return super().__enter__()\n\n @ignore_jit_warnings()\n def _pyro_sample(self, msg):\n \"\"\"\n :param msg: current message at a trace site.\n :returns: a sample from the stochastic function at the site.\n \"\"\"\n if msg[\"done\"] or not isinstance(msg[\"fn\"], TorchDistributionMixin):\n return\n\n # Compute upstream dims in scope; these are unsafe to use for this site's target_dim.\n scope = msg[\"infer\"].get(\"_markov_scope\") # site name -> markov depth\n param_dims = _ENUM_ALLOCATOR.dim_to_id.copy() # enum dim -> unique id\n if scope is not None:\n for name, depth in scope.items():\n if self._markov_depths[name] == depth: # hide sites whose markov context has exited\n param_dims.update(self._value_dims[name])\n self._markov_depths[msg[\"name\"]] = msg[\"infer\"][\"_markov_depth\"]\n self._param_dims[msg[\"name\"]] = param_dims\n if msg[\"is_observed\"] or msg[\"infer\"].get(\"enumerate\") != \"parallel\":\n return\n\n # Compute an enumerated value (at an arbitrary dim).\n value = enumerate_site(msg)\n actual_dim = -1 - len(msg[\"fn\"].batch_shape) # the leftmost dim of log_prob\n\n # Move actual_dim to a safe target_dim.\n target_dim, id_ = _ENUM_ALLOCATOR.allocate(None if scope is None else param_dims)\n event_dim = msg[\"fn\"].event_dim\n categorical_support = getattr(value, '_pyro_categorical_support', None)\n if categorical_support is not None:\n # Preserve categorical supports to speed up Categorical.log_prob().\n # See pyro/distributions/torch.py for details.\n assert target_dim < 0\n value = value.reshape(value.shape[:1] + (1,) * (-1 - target_dim))\n value._pyro_categorical_support = categorical_support\n elif actual_dim < target_dim:\n assert value.size(target_dim - event_dim) == 1, \\\n 'pyro.markov dim conflict at dim {}'.format(actual_dim)\n value = value.transpose(target_dim - event_dim, actual_dim - event_dim)\n while value.dim() and value.size(0) == 1:\n value = value.squeeze(0)\n elif target_dim < actual_dim:\n diff = actual_dim - target_dim\n value = value.reshape(value.shape[:1] + (1,) * diff + value.shape[1:])\n\n # Compute dims passed downstream through the value.\n value_dims = {dim: param_dims[dim] for dim in range(event_dim - value.dim(), 0)\n if value.size(dim - event_dim) > 1 and dim in param_dims}\n value_dims[target_dim] = id_\n\n msg[\"infer\"][\"_enumerate_dim\"] = target_dim\n msg[\"infer\"][\"_dim_to_id\"] = value_dims\n msg[\"value\"] = value\n msg[\"done\"] = True\n\n def _pyro_post_sample(self, msg):\n # Save all dims exposed in this sample value.\n # Whereas all of site[\"_dim_to_id\"] are needed to interpret a\n # site's log_prob tensor, only a filtered subset self._value_dims[msg[\"name\"]]\n # are needed to interpret a site's value.\n if not isinstance(msg[\"fn\"], TorchDistributionMixin):\n return\n value = msg[\"value\"]\n if value is None:\n return\n shape = value.shape[:value.dim() - msg[\"fn\"].event_dim]\n dim_to_id = msg[\"infer\"].setdefault(\"_dim_to_id\", {})\n dim_to_id.update(self._param_dims.get(msg[\"name\"], {}))\n with ignore_jit_warnings():\n self._value_dims[msg[\"name\"]] = {dim: id_ for dim, id_ in dim_to_id.items()\n if len(shape) >= -dim and shape[dim] > 1}\n", "# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nimport argparse\n\nimport numpy as np\nimport torch\nimport math\n\nimport pyro\nimport pyro.distributions as dist\nfrom pyro import poutine\nfrom pyro.infer.autoguide import AutoDelta\nfrom pyro.infer import Trace_ELBO\nfrom pyro.infer.autoguide import init_to_median\n\nfrom torch.optim import Adam\n\n\n\"\"\"\nWe demonstrate how to do sparse linear regression using a variant of the\napproach described in [1]. This approach is particularly suitable for situations\nwith many feature dimensions (large P) but not too many datapoints (small N).\nIn particular we consider a quadratic regressor of the form:\n\nf(X) = constant + sum_i theta_i X_i + sum_{i<j} theta_ij X_i X_j + observation noise\n\nNote that in order to keep the set of identified non-negligible weights theta_i\nand theta_ij sparse, the model assumes the weights satisfy a 'strong hierarchy'\ncondition. See reference [1] for details.\n\nNote that in contrast to [1] we do MAP estimation for the kernel hyperparameters\ninstead of HMC. This is not expected to be as robust as doing full Bayesian inference,\nbut in some regimes this works surprisingly well. For the latter HMC approach see\nthe NumPyro version:\n\nhttps://github.com/pyro-ppl/numpyro/blob/master/examples/sparse_regression.py\n\nReferences\n[1] The Kernel Interaction Trick: Fast Bayesian Discovery of Pairwise\n Interactions in High Dimensions.\n Raj Agrawal, Jonathan H. Huggins, Brian Trippe, Tamara Broderick\n https://arxiv.org/abs/1905.06501\n\"\"\"\n\n\npyro.enable_validation(True)\ntorch.set_default_tensor_type('torch.FloatTensor')\n\n\ndef dot(X, Z):\n return torch.mm(X, Z.t())\n\n\n# The kernel that corresponds to our quadratic regressor.\ndef kernel(X, Z, eta1, eta2, c):\n eta1sq, eta2sq = eta1.pow(2.0), eta2.pow(2.0)\n k1 = 0.5 * eta2sq * (1.0 + dot(X, Z)).pow(2.0)\n k2 = -0.5 * eta2sq * dot(X.pow(2.0), Z.pow(2.0))\n k3 = (eta1sq - eta2sq) * dot(X, Z)\n k4 = c ** 2 - 0.5 * eta2sq\n return k1 + k2 + k3 + k4\n\n\n# Most of the model code is concerned with constructing the sparsity inducing prior.\ndef model(X, Y, hypers, jitter=1.0e-4):\n S, P, N = hypers['expected_sparsity'], X.size(1), X.size(0)\n\n sigma = pyro.sample(\"sigma\", dist.HalfNormal(hypers['alpha3']))\n phi = sigma * (S / math.sqrt(N)) / (P - S)\n eta1 = pyro.sample(\"eta1\", dist.HalfCauchy(phi))\n\n msq = pyro.sample(\"msq\", dist.InverseGamma(hypers['alpha1'], hypers['beta1']))\n xisq = pyro.sample(\"xisq\", dist.InverseGamma(hypers['alpha2'], hypers['beta2']))\n\n eta2 = eta1.pow(2.0) * xisq.sqrt() / msq\n\n lam = pyro.sample(\"lambda\", dist.HalfCauchy(torch.ones(P, device=X.device)).to_event(1))\n kappa = msq.sqrt() * lam / (msq + (eta1 * lam).pow(2.0)).sqrt()\n kX = kappa * X\n\n # sample the observation noise\n var_obs = pyro.sample(\"var_obs\", dist.InverseGamma(hypers['alpha_obs'], hypers['beta_obs']))\n\n # compute the kernel for the given hyperparameters\n k = kernel(kX, kX, eta1, eta2, hypers['c']) + (var_obs + jitter) * torch.eye(N, device=X.device)\n\n # observe the outputs Y\n pyro.sample(\"Y\", dist.MultivariateNormal(torch.zeros(N, device=X.device), covariance_matrix=k),\n obs=Y)\n\n\n\"\"\"\nHere we compute the mean and variance of coefficients theta_i (where i = dimension) as well\nas for quadratic coefficients theta_ij for a given (in our case MAP) estimate of the kernel\nhyperparameters (eta1, xisq, ...).\nCompare to theorem 5.1 in reference [1].\n\"\"\"\[email protected]_grad()\ndef compute_posterior_stats(X, Y, msq, lam, eta1, xisq, c, var_obs, jitter=1.0e-4):\n N, P = X.shape\n\n # prepare for computation of posterior statistics for singleton weights\n probe = torch.zeros((P, 2, P), dtype=X.dtype, device=X.device)\n probe[:, 0, :] = torch.eye(P, dtype=X.dtype, device=X.device)\n probe[:, 1, :] = -torch.eye(P, dtype=X.dtype, device=X.device)\n\n eta2 = eta1.pow(2.0) * xisq.sqrt() / msq\n kappa = msq.sqrt() * lam / (msq + (eta1 * lam).pow(2.0)).sqrt()\n\n kX = kappa * X\n kprobe = kappa * probe\n kprobe = kprobe.reshape(-1, P)\n\n # compute various kernels\n k_xx = kernel(kX, kX, eta1, eta2, c) + (jitter + var_obs) * torch.eye(N, dtype=X.dtype, device=X.device)\n k_xx_inv = torch.inverse(k_xx)\n k_probeX = kernel(kprobe, kX, eta1, eta2, c)\n k_prbprb = kernel(kprobe, kprobe, eta1, eta2, c)\n\n # compute mean and variance for singleton weights\n vec = torch.tensor([0.50, -0.50], dtype=X.dtype, device=X.device)\n mu = torch.matmul(k_probeX, torch.matmul(k_xx_inv, Y).unsqueeze(-1)).squeeze(-1).reshape(P, 2)\n mu = (mu * vec).sum(-1)\n\n var = k_prbprb - torch.matmul(k_probeX, torch.matmul(k_xx_inv, k_probeX.t()))\n var = var.reshape(P, 2, P, 2).diagonal(dim1=-4, dim2=-2) # 2 2 P\n std = ((var * vec.unsqueeze(-1)).sum(-2) * vec.unsqueeze(-1)).sum(-2).clamp(min=0.0).sqrt()\n\n # select active dimensions (those that are non-zero with sufficient statistical significance)\n active_dims = (((mu - 4.0 * std) > 0.0) | ((mu + 4.0 * std) < 0.0)).bool()\n active_dims = active_dims.nonzero().squeeze(-1)\n\n print(\"Identified the following active dimensions:\", active_dims.data.numpy().flatten())\n print(\"Mean estimate for active singleton weights:\\n\", mu[active_dims].data.numpy())\n\n # if there are 0 or 1 active dimensions there are no quadratic weights to be found\n M = len(active_dims)\n if M < 2:\n return active_dims.data.numpy(), []\n\n # prep for computation of posterior statistics for quadratic weights\n left_dims, right_dims = torch.ones(M, M).triu(1).nonzero().t()\n left_dims, right_dims = active_dims[left_dims], active_dims[right_dims]\n\n probe = torch.zeros(left_dims.size(0), 4, P, dtype=X.dtype, device=X.device)\n left_dims_expand = left_dims.unsqueeze(-1).expand(left_dims.size(0), P)\n right_dims_expand = right_dims.unsqueeze(-1).expand(right_dims.size(0), P)\n for dim, value in zip(range(4), [1.0, 1.0, -1.0, -1.0]):\n probe[:, dim, :].scatter_(-1, left_dims_expand, value)\n for dim, value in zip(range(4), [1.0, -1.0, 1.0, -1.0]):\n probe[:, dim, :].scatter_(-1, right_dims_expand, value)\n\n kprobe = kappa * probe\n kprobe = kprobe.reshape(-1, P)\n k_probeX = kernel(kprobe, kX, eta1, eta2, c)\n k_prbprb = kernel(kprobe, kprobe, eta1, eta2, c)\n\n # compute mean and covariance for a subset of weights theta_ij (namely those with\n # 'active' dimensions i and j)\n vec = torch.tensor([0.25, -0.25, -0.25, 0.25], dtype=X.dtype, device=X.device)\n mu = torch.matmul(k_probeX, torch.matmul(k_xx_inv, Y).unsqueeze(-1)).squeeze(-1).reshape(left_dims.size(0), 4)\n mu = (mu * vec).sum(-1)\n\n var = k_prbprb - torch.matmul(k_probeX, torch.matmul(k_xx_inv, k_probeX.t()))\n var = var.reshape(left_dims.size(0), 4, left_dims.size(0), 4).diagonal(dim1=-4, dim2=-2)\n std = ((var * vec.unsqueeze(-1)).sum(-2) * vec.unsqueeze(-1)).sum(-2).clamp(min=0.0).sqrt()\n\n active_quad_dims = (((mu - 4.0 * std) > 0.0) | ((mu + 4.0 * std) < 0.0)) & (mu.abs() > 1.0e-4).bool()\n active_quad_dims = active_quad_dims.nonzero()\n\n active_quadratic_dims = np.stack([left_dims[active_quad_dims].data.numpy().flatten(),\n right_dims[active_quad_dims].data.numpy().flatten()], axis=1)\n active_quadratic_dims = np.split(active_quadratic_dims, active_quadratic_dims.shape[0])\n active_quadratic_dims = [tuple(a.tolist()[0]) for a in active_quadratic_dims]\n\n return active_dims.data.numpy(), active_quadratic_dims\n\n\n# Create an artifical dataset with N datapoints and P feature dimensions. Of the P\n# dimensions S will have non-zero singleton weights and Q(Q-1)/2 pairs of feature dimensions\n# will have non-zero quadratic weights.\ndef get_data(N=20, P=10, S=2, Q=2, sigma_obs=0.15):\n assert S < P and P > 3 and S > 2 and Q > 1 and Q <= S\n torch.manual_seed(1)\n\n X = torch.randn(N, P)\n\n singleton_weights = 2.0 * torch.rand(S) - 1.0\n Y_mean = torch.einsum(\"ni,i->n\", X[:, 0:S], singleton_weights)\n\n quadratic_weights = []\n expected_quad_dims = []\n for dim1 in range(Q):\n for dim2 in range(Q):\n if dim1 >= dim2:\n continue\n expected_quad_dims.append((dim1, dim2))\n quadratic_weights.append(2.0 * torch.rand(1) - 1.0)\n Y_mean += quadratic_weights[-1] * X[:, dim1] * X[:, dim2]\n quadratic_weights = torch.tensor(quadratic_weights)\n\n # we standardize the outputs Y\n Y = Y_mean\n Y -= Y.mean()\n Y_std1 = Y.std()\n Y /= Y_std1\n Y += sigma_obs * torch.randn(N)\n Y -= Y.mean()\n Y_std2 = Y.std()\n Y /= Y_std2\n\n assert X.shape == (N, P)\n assert Y.shape == (N,)\n\n return X, Y, singleton_weights / (Y_std1 * Y_std2), expected_quad_dims\n\n\ndef init_loc_fn(site):\n value = init_to_median(site, num_samples=50)\n # we also make sure the initial observation noise is not too large\n # (otherwise we run the danger of getting stuck in bad local optima during optimization).\n if site[\"name\"] == \"var_obs\":\n value = 0.2 * value\n return value\n\n\ndef main(args):\n # setup hyperparameters for the model\n hypers = {'expected_sparsity': max(1.0, args.num_dimensions / 10),\n 'alpha1': 3.0, 'beta1': 1.0, 'alpha2': 3.0, 'beta2': 1.0, 'alpha3': 1.0,\n 'c': 1.0, 'alpha_obs': 3.0, 'beta_obs': 1.0}\n\n P = args.num_dimensions\n S = args.active_dimensions\n Q = args.quadratic_dimensions\n\n # generate artificial dataset\n X, Y, expected_thetas, expected_quad_dims = get_data(N=args.num_data, P=P, S=S,\n Q=Q, sigma_obs=args.sigma)\n\n loss_fn = Trace_ELBO().differentiable_loss\n\n # We initialize the AutoDelta guide (for MAP estimation) with args.num_trials many\n # initial parameters sampled from the vicinity of the median of the prior distribution\n # and then continue optimizing with the best performing initialization.\n init_losses = []\n for restart in range(args.num_restarts):\n pyro.clear_param_store()\n pyro.set_rng_seed(restart)\n guide = AutoDelta(model, init_loc_fn=init_loc_fn)\n with torch.no_grad():\n init_losses.append(loss_fn(model, guide, X, Y, hypers).item())\n\n pyro.set_rng_seed(np.argmin(init_losses))\n pyro.clear_param_store()\n guide = AutoDelta(model, init_loc_fn=init_loc_fn)\n\n # Instead of using pyro.infer.SVI and pyro.optim we instead construct our own PyTorch\n # optimizer and take charge of gradient-based optimization ourselves.\n with poutine.block(), poutine.trace(param_only=True) as param_capture:\n guide(X, Y, hypers)\n params = list([pyro.param(name).unconstrained() for name in param_capture.trace])\n adam = Adam(params, lr=args.lr)\n\n report_frequency = 50\n print(\"Beginning MAP optimization...\")\n\n # the optimization loop\n for step in range(args.num_steps):\n loss = loss_fn(model, guide, X, Y, hypers) / args.num_data\n loss.backward()\n adam.step()\n adam.zero_grad()\n\n # we manually reduce the learning rate according to this schedule\n if step in [100, 300, 700, 900]:\n adam.param_groups[0]['lr'] *= 0.2\n\n if step % report_frequency == 0 or step == args.num_steps - 1:\n print(\"[step %04d] loss: %.5f\" % (step, loss))\n\n print(\"Expected singleton thetas:\\n\", expected_thetas.data.numpy())\n\n # we do the final computation using double precision\n median = guide.median() # == mode for MAP inference\n active_dims, active_quad_dims = \\\n compute_posterior_stats(X.double(), Y.double(), median['msq'].double(),\n median['lambda'].double(), median['eta1'].double(),\n median['xisq'].double(), torch.tensor(hypers['c']).double(),\n median['var_obs'].double())\n\n expected_active_dims = np.arange(S).tolist()\n\n tp_singletons = len(set(active_dims) & set(expected_active_dims))\n fp_singletons = len(set(active_dims) - set(expected_active_dims))\n fn_singletons = len(set(expected_active_dims) - set(active_dims))\n singleton_stats = (tp_singletons, fp_singletons, fn_singletons)\n\n tp_quads = len(set(active_quad_dims) & set(expected_quad_dims))\n fp_quads = len(set(active_quad_dims) - set(expected_quad_dims))\n fn_quads = len(set(expected_quad_dims) - set(active_quad_dims))\n quad_stats = (tp_quads, fp_quads, fn_quads)\n\n # We report how well we did, i.e. did we recover the sparse set of coefficients\n # that we expected for our artificial dataset?\n print(\"[SUMMARY STATS]\")\n print(\"Singletons (true positive, false positive, false negative): \" +\n \"(%d, %d, %d)\" % singleton_stats)\n print(\"Quadratic (true positive, false positive, false negative): \" +\n \"(%d, %d, %d)\" % quad_stats)\n\n\nif __name__ == '__main__':\n assert pyro.__version__.startswith('1.2.1')\n parser = argparse.ArgumentParser(description='Krylov KIT')\n parser.add_argument('--num-data', type=int, default=750)\n parser.add_argument('--num-steps', type=int, default=1000)\n parser.add_argument('--num-dimensions', type=int, default=100)\n parser.add_argument('--num-restarts', type=int, default=10)\n parser.add_argument('--sigma', type=float, default=0.05)\n parser.add_argument('--active-dimensions', type=int, default=10)\n parser.add_argument('--quadratic-dimensions', type=int, default=5)\n parser.add_argument('--lr', type=float, default=0.3)\n args = parser.parse_args()\n\n main(args)\n", "# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nimport io\nimport warnings\n\nimport pytest\nimport torch\nfrom torch.distributions import constraints\n\nimport pyro\nimport pyro.distributions as dist\nfrom pyro.contrib.easyguide import EasyGuide, easy_guide\nfrom pyro.infer import SVI, Trace_ELBO\nfrom pyro.infer.autoguide.initialization import init_to_mean, init_to_median\nfrom pyro.optim import Adam\nfrom pyro.util import ignore_jit_warnings\n\n\n# The model from tutorial/source/easyguide.ipynb\ndef model(batch, subsample, full_size):\n with ignore_jit_warnings():\n num_time_steps = len(batch)\n result = [None] * num_time_steps\n drift = pyro.sample(\"drift\", dist.LogNormal(-1, 0.5))\n with pyro.plate(\"data\", full_size, subsample=subsample):\n z = 0.\n for t in range(num_time_steps):\n z = pyro.sample(\"state_{}\".format(t),\n dist.Normal(z, drift))\n result[t] = pyro.sample(\"obs_{}\".format(t), dist.Bernoulli(logits=z),\n obs=batch[t])\n return torch.stack(result)\n\n\ndef check_guide(guide):\n full_size = 50\n batch_size = 20\n num_time_steps = 8\n pyro.set_rng_seed(123456789)\n data = model([None] * num_time_steps, torch.arange(full_size), full_size)\n assert data.shape == (num_time_steps, full_size)\n\n pyro.get_param_store().clear()\n pyro.set_rng_seed(123456789)\n svi = SVI(model, guide, Adam({\"lr\": 0.02}), Trace_ELBO())\n for epoch in range(2):\n beg = 0\n while beg < full_size:\n end = min(full_size, beg + batch_size)\n subsample = torch.arange(beg, end)\n batch = data[:, beg:end]\n beg = end\n svi.step(batch, subsample, full_size=full_size)\n\n\[email protected](\"init_fn\", [None, init_to_mean, init_to_median])\ndef test_delta_smoke(init_fn):\n\n @easy_guide(model)\n def guide(self, batch, subsample, full_size):\n self.map_estimate(\"drift\")\n with self.plate(\"data\", full_size, subsample=subsample):\n self.group(match=\"state_[0-9]*\").map_estimate()\n\n if init_fn is not None:\n guide.init = init_fn\n\n check_guide(guide)\n\n\nclass PickleGuide(EasyGuide):\n def __init__(self, model):\n super().__init__(model)\n self.init = init_to_median\n\n def guide(self, batch, subsample, full_size):\n self.map_estimate(\"drift\")\n with self.plate(\"data\", full_size, subsample=subsample):\n self.group(match=\"state_[0-9]*\").map_estimate()\n\n\ndef test_serialize():\n guide = PickleGuide(model)\n check_guide(guide)\n\n # Work around https://github.com/pytorch/pytorch/issues/27972\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=UserWarning)\n f = io.BytesIO()\n torch.save(guide, f)\n f.seek(0)\n actual = torch.load(f)\n\n assert type(actual) == type(guide)\n assert dir(actual) == dir(guide)\n check_guide(guide)\n check_guide(actual)\n\n\[email protected](\"init_fn\", [None, init_to_mean, init_to_median])\ndef test_subsample_smoke(init_fn):\n rank = 2\n\n @easy_guide(model)\n def guide(self, batch, subsample, full_size):\n self.map_estimate(\"drift\")\n group = self.group(match=\"state_[0-9]*\")\n cov_diag = pyro.param(\"state_cov_diag\",\n lambda: torch.full(group.event_shape, 0.01),\n constraint=constraints.positive)\n cov_factor = pyro.param(\"state_cov_factor\",\n lambda: torch.randn(group.event_shape + (rank,)) * 0.01)\n with self.plate(\"data\", full_size, subsample=subsample):\n loc = pyro.param(\"state_loc\",\n lambda: torch.full((full_size,) + group.event_shape, 0.5),\n event_dim=1)\n group.sample(\"states\", dist.LowRankMultivariateNormal(loc, cov_factor, cov_diag))\n\n if init_fn is not None:\n guide.init = init_fn\n\n check_guide(guide)\n\n\[email protected](\"init_fn\", [None, init_to_mean, init_to_median])\ndef test_amortized_smoke(init_fn):\n rank = 2\n\n @easy_guide(model)\n def guide(self, batch, subsample, full_size):\n num_time_steps, batch_size = batch.shape\n self.map_estimate(\"drift\")\n\n group = self.group(match=\"state_[0-9]*\")\n cov_diag = pyro.param(\"state_cov_diag\",\n lambda: torch.full(group.event_shape, 0.01),\n constraint=constraints.positive)\n cov_factor = pyro.param(\"state_cov_factor\",\n lambda: torch.randn(group.event_shape + (rank,)) * 0.01)\n\n if not hasattr(self, \"nn\"):\n self.nn = torch.nn.Linear(group.event_shape.numel(), group.event_shape.numel())\n self.nn.weight.data.fill_(1.0 / num_time_steps)\n self.nn.bias.data.fill_(-0.5)\n pyro.module(\"state_nn\", self.nn)\n with self.plate(\"data\", full_size, subsample=subsample):\n loc = self.nn(batch.t())\n group.sample(\"states\", dist.LowRankMultivariateNormal(loc, cov_factor, cov_diag))\n\n if init_fn is not None:\n guide.init = init_fn\n\n check_guide(guide)\n\n\ndef test_overlapping_plates_ok():\n\n def model(batch, subsample, full_size):\n # This is ok because the shared plate is left of the nonshared plate.\n with pyro.plate(\"shared\", full_size, subsample=subsample, dim=-2):\n x = pyro.sample(\"x\", dist.Normal(0, 1))\n with pyro.plate(\"nonshared\", 2, dim=-1):\n y = pyro.sample(\"y\", dist.Normal(0, 1))\n xy = x + y.sum(-1, keepdim=True)\n return pyro.sample(\"z\", dist.Normal(xy, 1),\n obs=batch)\n\n @easy_guide(model)\n def guide(self, batch, subsample, full_size):\n with self.plate(\"shared\", full_size, subsample=subsample, dim=-2):\n group = self.group(match=\"x|y\")\n loc = pyro.param(\"guide_loc\",\n torch.zeros((full_size, 1) + group.event_shape),\n event_dim=1)\n scale = pyro.param(\"guide_scale\",\n torch.ones((full_size, 1) + group.event_shape),\n constraint=constraints.positive,\n event_dim=1)\n group.sample(\"xy\", dist.Normal(loc, scale).to_event(1))\n\n # Generate data.\n full_size = 5\n batch_size = 2\n data = model(None, torch.arange(full_size), full_size)\n assert data.shape == (full_size, 1)\n\n # Train for one epoch.\n pyro.get_param_store().clear()\n svi = SVI(model, guide, Adam({\"lr\": 0.02}), Trace_ELBO())\n beg = 0\n while beg < full_size:\n end = min(full_size, beg + batch_size)\n subsample = torch.arange(beg, end)\n batch = data[beg:end]\n beg = end\n svi.step(batch, subsample, full_size=full_size)\n\n\ndef test_overlapping_plates_error():\n\n def model(batch, subsample, full_size):\n # This is an error because the shared plate is right of the nonshared plate.\n with pyro.plate(\"shared\", full_size, subsample=subsample, dim=-1):\n x = pyro.sample(\"x\", dist.Normal(0, 1))\n with pyro.plate(\"nonshared\", 2, dim=-2):\n y = pyro.sample(\"y\", dist.Normal(0, 1))\n xy = x + y.sum(-2)\n return pyro.sample(\"z\", dist.Normal(xy, 1),\n obs=batch)\n\n @easy_guide(model)\n def guide(self, batch, subsample, full_size):\n with self.plate(\"shared\", full_size, subsample=subsample, dim=-1):\n group = self.group(match=\"x|y\")\n loc = pyro.param(\"guide_loc\",\n torch.zeros((full_size,) + group.event_shape),\n event_dim=1)\n scale = pyro.param(\"guide_scale\",\n torch.ones((full_size,) + group.event_shape),\n constraint=constraints.positive,\n event_dim=1)\n group.sample(\"xy\", dist.Normal(loc, scale).to_event(1))\n\n # Generate data.\n full_size = 5\n batch_size = 2\n data = model(None, torch.arange(full_size), full_size)\n assert data.shape == (full_size,)\n\n # Train for one epoch.\n pyro.get_param_store().clear()\n svi = SVI(model, guide, Adam({\"lr\": 0.02}), Trace_ELBO())\n beg = 0\n end = min(full_size, beg + batch_size)\n subsample = torch.arange(beg, end)\n batch = data[beg:end]\n beg = end\n with pytest.raises(ValueError, match=\"Group expects all per-site plates\"):\n svi.step(batch, subsample, full_size=full_size)\n", "# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn.functional import softplus\n\n\n# Takes pixel intensities of the attention window to parameters (mean,\n# standard deviation) of the distribution over the latent code,\n# z_what.\nclass Encoder(nn.Module):\n def __init__(self, x_size, h_sizes, z_size, non_linear_layer):\n super().__init__()\n self.z_size = z_size\n output_size = 2 * z_size\n self.mlp = MLP(x_size, h_sizes + [output_size], non_linear_layer)\n\n def forward(self, x):\n a = self.mlp(x)\n return a[:, 0:self.z_size], softplus(a[:, self.z_size:])\n\n\n# Takes a latent code, z_what, to pixel intensities.\nclass Decoder(nn.Module):\n def __init__(self, x_size, h_sizes, z_size, bias, use_sigmoid, non_linear_layer):\n super().__init__()\n self.bias = bias\n self.use_sigmoid = use_sigmoid\n self.mlp = MLP(z_size, h_sizes + [x_size], non_linear_layer)\n\n def forward(self, z):\n a = self.mlp(z)\n if self.bias is not None:\n a = a + self.bias\n return torch.sigmoid(a) if self.use_sigmoid else a\n\n\n# A general purpose module to construct networks that look like:\n# [Linear (256 -> 1)]\n# [Linear (256 -> 256), ReLU (), Linear (256 -> 1)]\n# [Linear (256 -> 256), ReLU (), Linear (256 -> 1), ReLU ()]\n# etc.\nclass MLP(nn.Module):\n def __init__(self, in_size, out_sizes, non_linear_layer, output_non_linearity=False):\n super().__init__()\n assert len(out_sizes) >= 1\n layers = []\n in_sizes = [in_size] + out_sizes[0:-1]\n sizes = list(zip(in_sizes, out_sizes))\n for (i, o) in sizes[0:-1]:\n layers.append(nn.Linear(i, o))\n layers.append(non_linear_layer())\n layers.append(nn.Linear(sizes[-1][0], sizes[-1][1]))\n if output_non_linearity:\n layers.append(non_linear_layer())\n self.seq = nn.Sequential(*layers)\n\n def forward(self, x):\n return self.seq(x)\n\n\n# Takes the guide RNN hidden state to parameters of the guide\n# distributions over z_where and z_pres.\nclass Predict(nn.Module):\n def __init__(self, input_size, h_sizes, z_pres_size, z_where_size, non_linear_layer):\n super().__init__()\n self.z_pres_size = z_pres_size\n self.z_where_size = z_where_size\n output_size = z_pres_size + 2 * z_where_size\n self.mlp = MLP(input_size, h_sizes + [output_size], non_linear_layer)\n\n def forward(self, h):\n out = self.mlp(h)\n z_pres_p = torch.sigmoid(out[:, 0:self.z_pres_size])\n z_where_loc = out[:, self.z_pres_size:self.z_pres_size + self.z_where_size]\n z_where_scale = softplus(out[:, (self.z_pres_size + self.z_where_size):])\n return z_pres_p, z_where_loc, z_where_scale\n\n\nclass Identity(nn.Module):\n def __init__(self):\n super().__init__()\n\n def forward(self, x):\n return x\n", "# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nimport pytest\nimport torch\n\nimport pyro\nimport pyro.distributions as dist\nimport pyro.poutine as poutine\nfrom pyro.infer import SMCFilter\nfrom tests.common import assert_close\n\n\nclass SmokeModel:\n\n def __init__(self, state_size, plate_size):\n self.state_size = state_size\n self.plate_size = plate_size\n\n def init(self, state):\n self.t = 0\n state[\"x_mean\"] = pyro.sample(\"x_mean\", dist.Normal(0., 1.))\n state[\"y_mean\"] = pyro.sample(\"y_mean\",\n dist.MultivariateNormal(torch.zeros(self.state_size),\n torch.eye(self.state_size)))\n\n def step(self, state, x=None, y=None):\n v = pyro.sample(\"v_{}\".format(self.t), dist.Normal(0., 1.))\n with pyro.plate(\"plate\", self.plate_size):\n w = pyro.sample(\"w_{}\".format(self.t), dist.Normal(v, 1.))\n x = pyro.sample(\"x_{}\".format(self.t),\n dist.Normal(state[\"x_mean\"] + w, 1), obs=x)\n y = pyro.sample(\"y_{}\".format(self.t),\n dist.MultivariateNormal(state[\"y_mean\"] + w.unsqueeze(-1), torch.eye(self.state_size)),\n obs=y)\n self.t += 1\n return x, y\n\n\nclass SmokeGuide:\n\n def __init__(self, state_size, plate_size):\n self.state_size = state_size\n self.plate_size = plate_size\n\n def init(self, state):\n self.t = 0\n pyro.sample(\"x_mean\", dist.Normal(0., 2.))\n pyro.sample(\"y_mean\",\n dist.MultivariateNormal(torch.zeros(self.state_size),\n 2.*torch.eye(self.state_size)))\n\n def step(self, state, x=None, y=None):\n v = pyro.sample(\"v_{}\".format(self.t), dist.Normal(0., 2.))\n with pyro.plate(\"plate\", self.plate_size):\n pyro.sample(\"w_{}\".format(self.t), dist.Normal(v, 2.))\n self.t += 1\n\n\[email protected](\"max_plate_nesting\", [1, 2])\[email protected](\"state_size\", [2, 5, 1])\[email protected](\"plate_size\", [3, 7, 1])\[email protected](\"num_steps\", [1, 2, 10])\ndef test_smoke(max_plate_nesting, state_size, plate_size, num_steps):\n model = SmokeModel(state_size, plate_size)\n guide = SmokeGuide(state_size, plate_size)\n\n smc = SMCFilter(model, guide, num_particles=100, max_plate_nesting=max_plate_nesting)\n\n true_model = SmokeModel(state_size, plate_size)\n\n state = {}\n true_model.init(state)\n truth = [true_model.step(state) for t in range(num_steps)]\n\n smc.init()\n assert set(smc.state) == {\"x_mean\", \"y_mean\"}\n for x, y in truth:\n smc.step(x, y)\n assert set(smc.state) == {\"x_mean\", \"y_mean\"}\n smc.get_empirical()\n\n\nclass HarmonicModel:\n\n def __init__(self):\n self.A = torch.tensor([[0., 1.],\n [-1., 0.]])\n self.B = torch.tensor([3., 3.])\n self.sigma_z = torch.tensor(1.)\n self.sigma_y = torch.tensor(1.)\n\n def init(self, state):\n self.t = 0\n state[\"z\"] = pyro.sample(\"z_init\",\n dist.Delta(torch.tensor([1., 0.]), event_dim=1))\n\n def step(self, state, y=None):\n self.t += 1\n state[\"z\"] = pyro.sample(\"z_{}\".format(self.t),\n dist.Normal(state[\"z\"].matmul(self.A),\n self.B*self.sigma_z).to_event(1))\n y = pyro.sample(\"y_{}\".format(self.t),\n dist.Normal(state[\"z\"][..., 0], self.sigma_y),\n obs=y)\n\n state[\"z_{}\".format(self.t)] = state[\"z\"] # saved for testing\n\n return state[\"z\"], y\n\n\nclass HarmonicGuide:\n\n def __init__(self):\n self.model = HarmonicModel()\n\n def init(self, state):\n self.t = 0\n pyro.sample(\"z_init\", dist.Delta(torch.tensor([1., 0.]), event_dim=1))\n\n def step(self, state, y=None):\n self.t += 1\n\n # Proposal distribution\n pyro.sample(\"z_{}\".format(self.t),\n dist.Normal(state[\"z\"].matmul(self.model.A),\n torch.tensor([2., 2.])).to_event(1))\n\n\ndef generate_data():\n model = HarmonicModel()\n\n state = {}\n model.init(state)\n zs = [torch.tensor([1., 0.])]\n ys = [None]\n for t in range(50):\n z, y = model.step(state)\n zs.append(z)\n ys.append(y)\n\n return zs, ys\n\n\ndef score_latent(zs, ys):\n model = HarmonicModel()\n with poutine.trace() as trace:\n with poutine.condition(data={\"z_{}\".format(t): z for t, z in enumerate(zs)}):\n state = {}\n model.init(state)\n for y in ys[1:]:\n model.step(state, y)\n\n return trace.trace.log_prob_sum()\n\n\ndef test_likelihood_ratio():\n\n model = HarmonicModel()\n guide = HarmonicGuide()\n\n smc = SMCFilter(model, guide, num_particles=100, max_plate_nesting=0)\n\n zs, ys = generate_data()\n zs_true, ys_true = generate_data()\n smc.init()\n for y in ys_true[1:]:\n smc.step(y)\n i = smc.state._log_weights.max(0)[1]\n values = {k: v[i] for k, v in smc.state.items()}\n\n zs_pred = [torch.tensor([1., 0.])]\n zs_pred += [values[\"z_{}\".format(t)] for t in range(1, 51)]\n\n assert(score_latent(zs_true, ys_true) > score_latent(zs, ys_true))\n assert(score_latent(zs_pred, ys_true) > score_latent(zs_pred, ys))\n assert(score_latent(zs_pred, ys_true) > score_latent(zs, ys_true))\n\n\ndef test_gaussian_filter():\n dim = 4\n init_dist = dist.MultivariateNormal(torch.zeros(dim), scale_tril=torch.eye(dim) * 10)\n trans_mat = torch.eye(dim)\n trans_dist = dist.MultivariateNormal(torch.zeros(dim), scale_tril=torch.eye(dim))\n obs_mat = torch.eye(dim)\n obs_dist = dist.MultivariateNormal(torch.zeros(dim), scale_tril=torch.eye(dim) * 2)\n hmm = dist.GaussianHMM(init_dist, trans_mat, trans_dist, obs_mat, obs_dist)\n\n class Model:\n def init(self, state):\n state[\"z\"] = pyro.sample(\"z_init\", init_dist)\n self.t = 0\n\n def step(self, state, datum=None):\n state[\"z\"] = pyro.sample(\"z_{}\".format(self.t),\n dist.MultivariateNormal(state[\"z\"], scale_tril=trans_dist.scale_tril))\n datum = pyro.sample(\"obs_{}\".format(self.t),\n dist.MultivariateNormal(state[\"z\"], scale_tril=obs_dist.scale_tril),\n obs=datum)\n self.t += 1\n return datum\n\n class Guide:\n def init(self, state):\n pyro.sample(\"z_init\", init_dist)\n self.t = 0\n\n def step(self, state, datum):\n pyro.sample(\"z_{}\".format(self.t),\n dist.MultivariateNormal(state[\"z\"], scale_tril=trans_dist.scale_tril * 2))\n self.t += 1\n\n # Generate data.\n num_steps = 20\n model = Model()\n state = {}\n model.init(state)\n data = torch.stack([model.step(state) for _ in range(num_steps)])\n\n # Perform inference.\n model = Model()\n guide = Guide()\n smc = SMCFilter(model, guide, num_particles=1000, max_plate_nesting=0)\n smc.init()\n for t, datum in enumerate(data):\n smc.step(datum)\n expected = hmm.filter(data[:1+t])\n actual = smc.get_empirical()[\"z\"]\n assert_close(actual.variance ** 0.5, expected.variance ** 0.5, atol=0.1, rtol=0.5)\n sigma = actual.variance.max().item() ** 0.5\n assert_close(actual.mean, expected.mean, atol=3 * sigma)\n", "# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom collections import defaultdict\n\nimport torch\n\nimport pyro\nimport pyro.ops.jit\nfrom pyro import poutine\nfrom pyro.infer.elbo import ELBO\nfrom pyro.infer.util import torch_item, is_validation_enabled\nfrom pyro.infer.enum import get_importance_trace\nfrom pyro.util import check_if_enumerated, warn_if_nan\n\n\ndef _compute_mmd(X, Z, kernel):\n mmd = torch.mean(kernel(X)) + torch.mean(kernel(Z)) - torch.mean(kernel(X, Z)) * 2\n return mmd\n\n\nclass Trace_MMD(ELBO):\n \"\"\"\n An objective similar to ELBO, but with Maximum Mean Discrepancy (MMD)\n between marginal variational posterior and prior distributions\n instead of KL-divergence between variational posterior and prior distributions\n as in vanilla ELBO.\n The simplest example is MMD-VAE model [1]. The corresponding loss function is given as follows:\n\n :math: `L(\\\\theta, \\\\phi) = -E_{p_{data}(x)} E_{q(z | x; \\\\phi)} \\\\log p(x | z; \\\\theta) +\n MMD(q(z; \\\\phi) \\\\| p(z))`,\n\n where z is a latent code. MMD between two distributions is defined as follows:\n\n :math: `MMD(q(z) \\\\| p(z)) = E_{p(z), p(z')} k(z,z') + E_{q(z), q(z')} k(z,z') - 2 E_{p(z), q(z')} k(z,z')`,\n\n where k is a kernel.\n\n DISCLAIMER: this implementation treats only the particle dimension as batch dimension when computing MMD.\n All other dimensions are treated as event dimensions.\n For this reason, one needs large `num_particles` in order to have reasonable variance of MMD Monte-Carlo estimate.\n As a consequence, it is recommended to set `vectorize_particles=True` (default).\n The general case will be implemented in future versions.\n\n :param kernel: A kernel used to compute MMD.\n An instance of :class: `pyro.contrib.gp.kernels.kernel.Kernel`,\n or a dict that maps latent variable names to instances of :class: `pyro.contrib.gp.kernels.kernel.Kernel`.\n In the latter case, different kernels are used for different latent variables.\n\n :param mmd_scale: A scaling factor for MMD terms.\n Float, or a dict that maps latent variable names to floats.\n In the latter case, different scaling factors are used for different latent variables.\n\n References\n\n [1] `A Tutorial on Information Maximizing Variational Autoencoders (InfoVAE)`\n Shengjia Zhao\n https://ermongroup.github.io/blog/a-tutorial-on-mmd-variational-autoencoders/\n\n [2] `InfoVAE: Balancing Learning and Inference in Variational Autoencoders`\n Shengjia Zhao, Jiaming Song, Stefano Ermon\n \"\"\"\n\n def __init__(self,\n kernel, mmd_scale=1,\n num_particles=10,\n max_plate_nesting=float('inf'),\n max_iarange_nesting=None, # DEPRECATED\n vectorize_particles=True,\n strict_enumeration_warning=True,\n ignore_jit_warnings=False,\n retain_graph=None):\n super().__init__(\n num_particles, max_plate_nesting, max_iarange_nesting, vectorize_particles,\n strict_enumeration_warning, ignore_jit_warnings, retain_graph,\n )\n self._kernel = None\n self._mmd_scale = None\n self.kernel = kernel\n self.mmd_scale = mmd_scale\n\n @property\n def kernel(self):\n return self._kernel\n\n @kernel.setter\n def kernel(self, kernel):\n if isinstance(kernel, dict):\n # fix kernel's parameters\n for k in kernel.values():\n if isinstance(k, pyro.contrib.gp.kernels.kernel.Kernel):\n k.requires_grad_(False)\n else:\n raise TypeError(\"`kernel` values should be instances of `pyro.contrib.gp.kernels.kernel.Kernel`\")\n self._kernel = kernel\n elif isinstance(kernel, pyro.contrib.gp.kernels.kernel.Kernel):\n kernel.requires_grad_(False)\n self._kernel = defaultdict(lambda: kernel)\n else:\n raise TypeError(\"`kernel` should be an instance of `pyro.contrib.gp.kernels.kernel.Kernel`\")\n\n @property\n def mmd_scale(self):\n return self._mmd_scale\n\n @mmd_scale.setter\n def mmd_scale(self, mmd_scale):\n if isinstance(mmd_scale, dict):\n self._mmd_scale = mmd_scale\n elif isinstance(mmd_scale, (int, float)):\n self._mmd_scale = defaultdict(lambda: float(mmd_scale))\n else:\n raise TypeError(\"`mmd_scale` should be either float, or a dict of floats\")\n\n def _get_trace(self, model, guide, args, kwargs):\n \"\"\"\n Returns a single trace from the guide, and the model that is run\n against it.\n \"\"\"\n model_trace, guide_trace = get_importance_trace(\n \"flat\", self.max_plate_nesting, model, guide, args, kwargs)\n if is_validation_enabled():\n check_if_enumerated(guide_trace)\n return model_trace, guide_trace\n\n def _differentiable_loss_parts(self, model, guide, args, kwargs):\n all_model_samples = defaultdict(list)\n all_guide_samples = defaultdict(list)\n\n loglikelihood = 0.0\n penalty = 0.0\n for model_trace, guide_trace in self._get_traces(model, guide, args, kwargs):\n if self.vectorize_particles:\n model_trace_independent = poutine.trace(\n self._vectorized_num_particles(model)\n ).get_trace(*args, **kwargs)\n else:\n model_trace_independent = poutine.trace(model, graph_type='flat').get_trace(*args, **kwargs)\n\n loglikelihood_particle = 0.0\n for name, model_site in model_trace.nodes.items():\n if model_site['type'] == 'sample':\n if name in guide_trace and not model_site['is_observed']:\n guide_site = guide_trace.nodes[name]\n independent_model_site = model_trace_independent.nodes[name]\n if not independent_model_site[\"fn\"].has_rsample:\n raise ValueError(\"Model site {} is not reparameterizable\".format(name))\n if not guide_site[\"fn\"].has_rsample:\n raise ValueError(\"Guide site {} is not reparameterizable\".format(name))\n\n particle_dim = -self.max_plate_nesting - independent_model_site[\"fn\"].event_dim\n\n model_samples = independent_model_site['value']\n guide_samples = guide_site['value']\n\n if self.vectorize_particles:\n model_samples = model_samples.transpose(-model_samples.dim(), particle_dim)\n model_samples = model_samples.view(model_samples.shape[0], -1)\n\n guide_samples = guide_samples.transpose(-guide_samples.dim(), particle_dim)\n guide_samples = guide_samples.view(guide_samples.shape[0], -1)\n else:\n model_samples = model_samples.view(1, -1)\n guide_samples = guide_samples.view(1, -1)\n\n all_model_samples[name].append(model_samples)\n all_guide_samples[name].append(guide_samples)\n else:\n loglikelihood_particle = loglikelihood_particle + model_site['log_prob_sum']\n\n loglikelihood = loglikelihood_particle / self.num_particles + loglikelihood\n\n for name in all_model_samples.keys():\n all_model_samples[name] = torch.cat(all_model_samples[name])\n all_guide_samples[name] = torch.cat(all_guide_samples[name])\n divergence = _compute_mmd(all_model_samples[name], all_guide_samples[name], kernel=self._kernel[name])\n penalty = self._mmd_scale[name] * divergence + penalty\n\n warn_if_nan(loglikelihood, \"loglikelihood\")\n warn_if_nan(penalty, \"penalty\")\n return loglikelihood, penalty\n\n def differentiable_loss(self, model, guide, *args, **kwargs):\n \"\"\"\n Computes the MMD-VAE-type loss [1]. Calling backward on the latter\n leads to valid gradient estimates as long as latent variables\n in both the guide and the model are reparameterizable.\n\n References\n\n [1] `A Tutorial on Information Maximizing Variational Autoencoders (InfoVAE)`\n Shengjia Zhao\n https://ermongroup.github.io/blog/a-tutorial-on-mmd-variational-autoencoders/\n \"\"\"\n loglikelihood, penalty = self._differentiable_loss_parts(model, guide, args, kwargs)\n loss = -loglikelihood + penalty\n warn_if_nan(loss, \"loss\")\n return loss\n\n def loss(self, model, guide, *args, **kwargs):\n \"\"\"\n :returns: returns an estimate of the MMD-VAE-type loss [1]\n :rtype: float\n\n Computes the MMD-VAE-type loss with an estimator that uses num_particles many samples/particles.\n\n References\n\n [1] `A Tutorial on Information Maximizing Variational Autoencoders (InfoVAE)`\n Shengjia Zhao\n https://ermongroup.github.io/blog/a-tutorial-on-mmd-variational-autoencoders/\n \"\"\"\n loss = self.differentiable_loss(model, guide, *args, **kwargs)\n return torch_item(loss)\n\n def loss_and_grads(self, model, guide, *args, **kwargs):\n \"\"\"\n :returns: returns an estimate of the MMD-VAE-type loss [1]\n :rtype: float\n\n Computes the MMD-VAE-type loss and performs backward on it.\n Leads to valid gradient estimates as long as latent variables\n in both the guide and the model are reparameterizable.\n Num_particles many samples are used to form the estimators.\n\n References\n\n [1] `A Tutorial on Information Maximizing Variational Autoencoders (InfoVAE)`\n Shengjia Zhao\n https://ermongroup.github.io/blog/a-tutorial-on-mmd-variational-autoencoders/\n \"\"\"\n loss = self.differentiable_loss(model, guide, *args, **kwargs)\n loss.backward(retain_graph=self.retain_graph)\n return torch_item(loss)\n" ]
[ [ "torch.cat", "torch.einsum", "torch.tensor", "torch.exp", "torch.no_grad", "torch.sort", "torch.logsumexp" ], [ "torch.zeros", "torch.arange" ], [ "torch.optim.Adam", "torch.set_default_tensor_type", "numpy.split", "torch.ones", "torch.zeros", "torch.manual_seed", "torch.randn", "torch.einsum", "torch.eye", "numpy.arange", "torch.inverse", "torch.tensor", "torch.matmul", "torch.no_grad", "numpy.argmin", "torch.rand" ], [ "torch.ones", "torch.full", "torch.load", "torch.zeros", "torch.randn", "torch.arange", "torch.stack", "torch.save" ], [ "torch.nn.Linear", "torch.nn.Sequential", "torch.sigmoid", "torch.nn.functional.softplus" ], [ "torch.zeros", "torch.eye", "torch.tensor" ], [ "torch.cat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shuxiaobo/TextTransferLearning
[ "6fb93bd43dde7012ece1bbe7a5beee0a991ccc43" ]
[ "datasets/binary.py" ]
[ "'''\nBinary classifier and corresponding datasets : MR, CR, SUBJ, MPQA...\n'''\nfrom __future__ import absolute_import, division, unicode_literals\n\nimport io\nimport os\nimport logging\nfrom utils.util import logger\nfrom torch.utils.data import Dataset\nfrom utils.util import prepare_dictionary\nfrom tensorflow.python.keras.preprocessing import sequence\n\n\nclass BinaryClassifierEval(Dataset):\n def __init__(self, args, num_class = 2, seed = 1111, filename = None):\n \"\"\"\n\n :param args:\n :param num_class: result class number\n :param seed: random seed\n :param filename: train | valid | test filename, default is train\n \"\"\"\n self.seed = seed\n self.args = args\n self.num_class = num_class\n self.max_len = 0\n filename = filename if filename else args.train_file\n\n self.data_x, self.data_y = self.load_file(os.path.join(self.args.tmp_dir, self.__class__.__name__, filename))\n\n self.n_samples = len(self.data_x)\n self.word_file = os.path.join(args.tmp_dir, self.__class__.__name__, args.word_file)\n if os.path.isfile(self.word_file) and os.path.getsize(self.word_file) > 0:\n self.word2id = self.get_word_index(self.word_file)\n else:\n self.word2id = self.prepare_dict(self.word_file)\n\n def load_file(self, fpath):\n \"\"\"\n load the data file with format : x \\t y\n Note: the data_y should be the sparse id, and start from 0.\n for example if you have 3 class, the id must range in (0, 1, 2)\n\n :param fpath: file path\n :return: data_x, data_y\n \"\"\"\n with io.open(fpath, 'r', encoding = 'utf-8') as f:\n data_x = list()\n data_y = list()\n for line in f.read().splitlines():\n line = line.strip().split(' ')\n data_x.append(line[:-1])\n data_y.append(int(line[-1]))\n self.max_len = len(line[:-1]) if len(line[:-1]) > self.max_len else self.max_len\n return data_x, data_y\n\n def prepare_dict(self, file_name):\n logger(\"Prepare the dictionary for the {}...\".format(self.__class__.__name__))\n word2id = prepare_dictionary(data = self.data_x, dict_path = file_name, exclude_n = self.args.skip_top, max_size = self.args.num_words)\n logger(\"Word2id size : %d\" % len(word2id))\n return word2id\n\n def get_word_index(self, path = None):\n if not path:\n path = self.args.tmp_dir + self.__class__.__name__ + self.args.word_file\n word2id = dict()\n with open(path, mode = 'r', encoding = 'utf-8') as f:\n for l in f:\n word2id.setdefault(l.strip(), len(word2id))\n logger('Word2id size : %d' % len(word2id))\n return word2id\n\n @staticmethod\n def batchfy_fn(data):\n x = [d[0] for d in data]\n y = [d[1] for d in data]\n max_len = max(map(len, x))\n return sequence.pad_sequences(x, maxlen = max_len, padding = 'post'), y\n\n def __getitem__(self, index):\n result = [self.word2id[d] if self.word2id.get(d) else self.word2id['_<UNKNOW>'] for d in self.data_x[index]]\n return result, self.data_y[index]\n\n def __len__(self):\n return self.n_samples\n\n\nclass CR(BinaryClassifierEval):\n def __init__(self, args, seed = 1111, filename = None):\n logging.debug('***** Task : ' + self.__class__.__name__ + ' *****\\n\\n')\n super(self.__class__, self).__init__(args, seed, filename)\n\n\nclass MR(BinaryClassifierEval):\n def __init__(self, args, seed = 1111, filename = None):\n logging.debug('***** Task : ' + self.__class__.__name__ + ' *****\\n\\n')\n super(self.__class__, self).__init__(args, seed, filename)\n\n\nclass SUBJ(BinaryClassifierEval):\n def __init__(self, args, seed = 1111, filename = None):\n logging.debug('***** Task : ' + self.__class__.__name__ + ' *****\\n\\n')\n super(self.__class__, self).__init__(args, seed, filename)\n\n\nclass MPQA(BinaryClassifierEval):\n def __init__(self, args, seed = 1111, filename = None):\n logging.debug('***** Task : ' + self.__class__.__name__ + ' *****\\n\\n')\n super(self.__class__, self).__init__(args, seed, filename)\n\n\nclass Kaggle(BinaryClassifierEval):\n def __init__(self, args, seed = 1111, filename = None):\n logging.debug('***** Task : ' + self.__class__.__name__ + ' *****\\n\\n')\n super(self.__class__, self).__init__(args, seed, filename)\n\n\nclass TREC(BinaryClassifierEval):\n def __init__(self, args, seed = 1111, filename = None):\n logging.debug('***** Task : ' + self.__class__.__name__ + ' *****\\n\\n')\n super(self.__class__, self).__init__(args, seed, filename)\n" ]
[ [ "tensorflow.python.keras.preprocessing.sequence.pad_sequences" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.4", "2.2", "2.3", "2.4", "1.5", "1.7", "2.5", "2.6" ] } ]
sanchitvohra/EGVSR
[ "3c0b478179f772d7fe7521655008a2d79a6b6185", "3c0b478179f772d7fe7521655008a2d79a6b6185" ]
[ "codes/torch2onnx.py", "codes/official_metrics/LPIPSmodels/pretrained_networks.py" ]
[ "import os\nimport os.path as osp\nimport argparse\nimport yaml\nimport time\nimport torch\n\nfrom data import create_dataloader, prepare_data\nfrom models import define_model\nfrom models.networks import define_generator\nfrom utils import base_utils, data_utils\nfrom metrics.model_summary import register, profile_model\n\ndef test(opt):\n # logging\n logger = base_utils.get_logger('base')\n if opt['verbose']:\n logger.info('{} Configurations {}'.format('=' * 20, '=' * 20))\n base_utils.print_options(opt, logger)\n\n # infer and evaluate performance for each model\n for load_path in opt['model']['generator']['load_path_lst']:\n # setup model index\n model_idx = osp.splitext(osp.split(load_path)[-1])[0]\n\n # log\n logger.info('=' * 40)\n logger.info('Testing model: {}'.format(model_idx))\n logger.info('=' * 40)\n\n # create model\n opt['model']['generator']['load_path'] = load_path\n model = define_model(opt)\n\n # for each test dataset\n for dataset_idx in sorted(opt['dataset'].keys()):\n # use dataset with prefix `test`\n if not dataset_idx.startswith('test'):\n continue\n\n ds_name = opt['dataset'][dataset_idx]['name']\n logger.info('Testing on {}: {}'.format(dataset_idx, ds_name))\n\n # create data loader\n test_loader = create_dataloader(opt, dataset_idx=dataset_idx)\n\n # infer and store results for each sequence\n for i, data in enumerate(test_loader):\n\n # fetch data\n lr_data = data['lr'][0]\n seq_idx = data['seq_idx'][0]\n frm_idx = [frm_idx[0] for frm_idx in data['frm_idx']]\n\n # infer\n hr_seq = model.infer(lr_data) # thwc|rgb|uint8\n\n # save results (optional)\n if opt['test']['save_res']:\n res_dir = osp.join(\n opt['test']['res_dir'], ds_name, model_idx)\n res_seq_dir = osp.join(res_dir, seq_idx)\n data_utils.save_sequence(\n res_seq_dir, hr_seq, frm_idx, to_bgr=True)\n\n logger.info('-' * 40)\n\n # logging\n logger.info('Finish testing')\n logger.info('=' * 40)\n\n\nif __name__ == '__main__':\n # ----------------- parse arguments ----------------- #\n parser = argparse.ArgumentParser()\n parser.add_argument('--exp_dir', type=str, default=\"../../experiments_BI/TecoGAN/001\")\n parser.add_argument('--mode', type=str, default=\"test\")\n parser.add_argument('--model', type=str, default=\"TecoGAN\")\n parser.add_argument('--opt', type=str, default=\"test_onnx.yml\")\n parser.add_argument('--gpu_id', type=int, default=0)\n parser.add_argument('--lr_size', type=str, default='3x960x540')\n parser.add_argument('--test_speed', action='store_true')\n args = parser.parse_args()\n\n # ----------------- get options ----------------- #\n print(args.exp_dir)\n with open(osp.join(args.exp_dir, args.opt), 'r') as f:\n opt = yaml.load(f.read(), Loader=yaml.FullLoader)\n\n # ----------------- general configs ----------------- #\n # experiment dir\n opt['exp_dir'] = args.exp_dir\n # random seed\n base_utils.setup_random_seed(opt['manual_seed'])\n # logger\n base_utils.setup_logger('base')\n opt['verbose'] = opt.get('verbose', False)\n\n # device\n if args.gpu_id >= 0:\n os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_id)\n if torch.cuda.is_available():\n # TODO: torch.backends.cudnn.benchmark setting\n # torch.backends.cudnn.deterministic = True\n # torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.benchmark = True\n opt['device'] = 'cuda'\n else:\n opt['device'] = 'cpu'\n else:\n opt['device'] = 'cpu'\n\n # ----------------- test ----------------- #\n # basic configs\n scale = opt['scale']\n device = torch.device(opt['device'])\n\n # create model\n net_G = define_generator(opt).to(device)\n\n from models.networks.tecogan_nets import FNet, SRNet\n\n fnet = FNet(in_nc=opt['model']['generator']['in_nc']).to(device)\n srnet = SRNet(in_nc=opt['model']['generator']['in_nc'], out_nc=3, nf=64, nb=10, upsample_func=None, scale=4).to(device)\n\n # get dummy input\n lr_size = tuple(map(int, args.lr_size.split('x')))\n dummy_input_dict = net_G.generate_dummy_input(lr_size)\n for key in dummy_input_dict.keys():\n dummy_input_dict[key] = dummy_input_dict[key].to(device)\n\n lr_curr = dummy_input_dict['lr_curr']\n lr_prev = dummy_input_dict['lr_prev']\n hr_prev = dummy_input_dict['hr_prev']\n hr_prev_warp = torch.rand(1, 3*16, 960, 540, dtype=torch.float32).to(device)\n\n # test running speed\n n_test = 30\n tot_time = 0\n\n fnet.eval()\n\n for i in range(n_test):\n print('run num:', i)\n start_time = time.time()\n with torch.no_grad():\n try:\n # rst = net_G(**dummy_input_dict)\n # rst = fnet(lr_curr, lr_prev)\n rst = srnet(lr_curr, hr_prev_warp)\n except RuntimeError as e:\n if 'out of memory' in str(e):\n print('| WARNING: ran out of memory')\n if hasattr(torch.cuda, 'empty_cache'):\n torch.cuda.empty_cache()\n else:\n raise e\n\n end_time = time.time()\n tot_time += end_time - start_time\n\n\n print('Speed (FPS): {:.3f} (averaged for {} runs)'.format(n_test / tot_time, n_test))\n print('-' * 40)\n\n # torch to onnx\n # input_fnet = (lr_curr, lr_prev)\n # input_srnet = (lr_curr, hr_prev_warp)\n # torch.onnx.export(fnet, input_fnet, \"fnet.onnx\", verbose=True, opset_version=11)\n\n", "from collections import namedtuple\nimport torch\nfrom torchvision import models\n# from IPython import embed\n\nclass squeezenet(torch.nn.Module):\n def __init__(self, requires_grad=False, pretrained=True):\n super(squeezenet, self).__init__()\n pretrained_features = models.squeezenet1_1(pretrained=pretrained).features\n self.slice1 = torch.nn.Sequential()\n self.slice2 = torch.nn.Sequential()\n self.slice3 = torch.nn.Sequential()\n self.slice4 = torch.nn.Sequential()\n self.slice5 = torch.nn.Sequential()\n self.slice6 = torch.nn.Sequential()\n self.slice7 = torch.nn.Sequential()\n self.N_slices = 7\n for x in range(2):\n self.slice1.add_module(str(x), pretrained_features[x])\n for x in range(2,5):\n self.slice2.add_module(str(x), pretrained_features[x])\n for x in range(5, 8):\n self.slice3.add_module(str(x), pretrained_features[x])\n for x in range(8, 10):\n self.slice4.add_module(str(x), pretrained_features[x])\n for x in range(10, 11):\n self.slice5.add_module(str(x), pretrained_features[x])\n for x in range(11, 12):\n self.slice6.add_module(str(x), pretrained_features[x])\n for x in range(12, 13):\n self.slice7.add_module(str(x), pretrained_features[x])\n if not requires_grad:\n for param in self.parameters():\n param.requires_grad = False\n\n def forward(self, X):\n h = self.slice1(X)\n h_relu1 = h\n h = self.slice2(h)\n h_relu2 = h\n h = self.slice3(h)\n h_relu3 = h\n h = self.slice4(h)\n h_relu4 = h\n h = self.slice5(h)\n h_relu5 = h\n h = self.slice6(h)\n h_relu6 = h\n h = self.slice7(h)\n h_relu7 = h\n vgg_outputs = namedtuple(\"SqueezeOutputs\", ['relu1','relu2','relu3','relu4','relu5','relu6','relu7'])\n out = vgg_outputs(h_relu1,h_relu2,h_relu3,h_relu4,h_relu5,h_relu6,h_relu7)\n\n return out\n\n\nclass alexnet(torch.nn.Module):\n def __init__(self, requires_grad=False, pretrained=True):\n super(alexnet, self).__init__()\n alexnet_pretrained_features = models.alexnet(pretrained=pretrained).features\n self.slice1 = torch.nn.Sequential()\n self.slice2 = torch.nn.Sequential()\n self.slice3 = torch.nn.Sequential()\n self.slice4 = torch.nn.Sequential()\n self.slice5 = torch.nn.Sequential()\n self.N_slices = 5\n for x in range(2):\n self.slice1.add_module(str(x), alexnet_pretrained_features[x])\n for x in range(2, 5):\n self.slice2.add_module(str(x), alexnet_pretrained_features[x])\n for x in range(5, 8):\n self.slice3.add_module(str(x), alexnet_pretrained_features[x])\n for x in range(8, 10):\n self.slice4.add_module(str(x), alexnet_pretrained_features[x])\n for x in range(10, 12):\n self.slice5.add_module(str(x), alexnet_pretrained_features[x])\n if not requires_grad:\n for param in self.parameters():\n param.requires_grad = False\n\n def forward(self, X):\n h = self.slice1(X)\n h_relu1 = h\n h = self.slice2(h)\n h_relu2 = h\n h = self.slice3(h)\n h_relu3 = h\n h = self.slice4(h)\n h_relu4 = h\n h = self.slice5(h)\n h_relu5 = h\n alexnet_outputs = namedtuple(\"AlexnetOutputs\", ['relu1', 'relu2', 'relu3', 'relu4', 'relu5'])\n out = alexnet_outputs(h_relu1, h_relu2, h_relu3, h_relu4, h_relu5)\n\n return out\n\nclass vgg16(torch.nn.Module):\n def __init__(self, requires_grad=False, pretrained=True):\n super(vgg16, self).__init__()\n vgg_pretrained_features = models.vgg16(pretrained=pretrained).features\n self.slice1 = torch.nn.Sequential()\n self.slice2 = torch.nn.Sequential()\n self.slice3 = torch.nn.Sequential()\n self.slice4 = torch.nn.Sequential()\n self.slice5 = torch.nn.Sequential()\n self.N_slices = 5\n for x in range(4):\n self.slice1.add_module(str(x), vgg_pretrained_features[x])\n for x in range(4, 9):\n self.slice2.add_module(str(x), vgg_pretrained_features[x])\n for x in range(9, 16):\n self.slice3.add_module(str(x), vgg_pretrained_features[x])\n for x in range(16, 23):\n self.slice4.add_module(str(x), vgg_pretrained_features[x])\n for x in range(23, 30):\n self.slice5.add_module(str(x), vgg_pretrained_features[x])\n if not requires_grad:\n for param in self.parameters():\n param.requires_grad = False\n\n def forward(self, X):\n h = self.slice1(X)\n h_relu1_2 = h\n h = self.slice2(h)\n h_relu2_2 = h\n h = self.slice3(h)\n h_relu3_3 = h\n h = self.slice4(h)\n h_relu4_3 = h\n h = self.slice5(h)\n h_relu5_3 = h\n vgg_outputs = namedtuple(\"VggOutputs\", ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3', 'relu5_3'])\n out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3)\n\n return out\n\n\n\nclass resnet(torch.nn.Module):\n def __init__(self, requires_grad=False, pretrained=True, num=18):\n super(resnet, self).__init__()\n if(num==18):\n self.net = models.resnet18(pretrained=pretrained)\n elif(num==34):\n self.net = models.resnet34(pretrained=pretrained)\n elif(num==50):\n self.net = models.resnet50(pretrained=pretrained)\n elif(num==101):\n self.net = models.resnet101(pretrained=pretrained)\n elif(num==152):\n self.net = models.resnet152(pretrained=pretrained)\n self.N_slices = 5\n\n self.conv1 = self.net.conv1\n self.bn1 = self.net.bn1\n self.relu = self.net.relu\n self.maxpool = self.net.maxpool\n self.layer1 = self.net.layer1\n self.layer2 = self.net.layer2\n self.layer3 = self.net.layer3\n self.layer4 = self.net.layer4\n\n def forward(self, X):\n h = self.conv1(X)\n h = self.bn1(h)\n h = self.relu(h)\n h_relu1 = h\n h = self.maxpool(h)\n h = self.layer1(h)\n h_conv2 = h\n h = self.layer2(h)\n h_conv3 = h\n h = self.layer3(h)\n h_conv4 = h\n h = self.layer4(h)\n h_conv5 = h\n\n outputs = namedtuple(\"Outputs\", ['relu1','conv2','conv3','conv4','conv5'])\n out = outputs(h_relu1, h_conv2, h_conv3, h_conv4, h_conv5)\n\n return out\n" ]
[ [ "torch.cuda.empty_cache", "torch.no_grad", "torch.rand", "torch.cuda.is_available", "torch.device" ], [ "torch.nn.Sequential" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dariaomelkina/UCU-Semester-Homework
[ "c980d467e9eba3ab64c4db36c66c20ac92c7ba00" ]
[ "examples/libs_and_modules_usage_example.py" ]
[ "import json\nimport dash\nimport urllib.request\nimport urllib.parse\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport pandas\nimport plotly.express as px\n\nBASE_URL = \"https://api.nasa.gov/insight_weather/?api_key=DEMO_KEY&feedtype=json&ver=1.0\"\n\n\ndef get_data_from_URL(base_url):\n with urllib.request.urlopen(base_url) as response:\n data = response.read()\n data = data.decode(\"utf-8\")\n data = json.loads(data)\n return data\n\n\nready_data = get_data_from_URL(BASE_URL)\nlst = []\nfor key in ready_data.keys():\n if key != \"sol_keys\" or key != \"validity_checks\":\n try:\n df = pandas.DataFrame(ready_data[key])\n lst.append(df[\"AT\"][\"av\"])\n except KeyError:\n break\n\ninf_series = pandas.DataFrame(list(zip(lst, ready_data['sol_keys'])), columns=[\"average temperature\", \"day\"])\n\nfig = px.bar(inf_series, x='day', y='average temperature', text='average temperature',\n hover_data=['average temperature', 'day'], color='average temperature',\n labels={'pop': 'temperature'}, height=400)\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n\ncolors = {\n 'text': '#91165a'\n}\n\napp.layout = html.Div(children=[\n html.H1(\n children='Dash Experiment',\n style={\n 'textAlign': 'center',\n 'color': colors['text']\n }\n ),\n\n html.Div(children='There are average temperatures on Mars by days.', style={\n 'textAlign': 'center',\n 'color': colors['text']\n }),\n\n dcc.Graph(\n id='example-graph-2',\n figure=fig,\n )\n])\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
tpinhoda/Graph-Based_Spatial_Cross_Validation
[ "19300a715d3d03580232926bbc1f6ea8800b23e3" ]
[ "src/paralleling/train_predict/cross_validation.py" ]
[ "import contextlib\nimport os\nimport sys\nimport pandas as pd\nimport geopandas as gpd\nfrom weka.core import jvm\nfrom src import utils\nfrom src.pipeline import Pipeline\nfrom src.visualization.performance import VizMetrics\nfrom src.visualization.dependence import VizDependence\n\n# Set pipeline switchers\nSWITCHERS = {\n \"scv\": False,\n \"fs\": False,\n \"train\": True,\n \"predict\": True,\n \"evaluate\": False,\n}\n\nml_methods = [\n \"KNN\",\n \"OLS\",\n \"Lasso\",\n \"Ridge\",\n \"ElasticNet\",\n \"DT\",\n \"LGBM\",\n \"RF\",\n \"MLP\",\n \"SVM\",\n]\n\n\ndef main(root_path, dataset, fs_method, index_col, index_fold, target_col, ml_method):\n \"\"\"Runs main script\"\"\"\n utils.initialize_coloredlog()\n utils.initialize_rich_tracerback()\n utils.initialize_logging()\n\n data_path = os.path.join(root_path, dataset, \"data.csv\")\n # Load data\n data = pd.read_csv(data_path, index_col=index_col, low_memory=False)\n with contextlib.suppress(KeyError):\n data.drop(columns=[\"[GEO]_LATITUDE\", \"[GEO]_LONGITUDE\"], inplace=True)\n # Run pipeline\n CrossValidation = Pipeline(\n root_path=os.path.join(root_path, dataset),\n data=data,\n meshblocks=None,\n index_col=index_col,\n fold_col=index_fold,\n target_col=target_col,\n scv_method=\"CrossValidation\",\n fs_method=fs_method,\n ml_method=ml_method,\n switchers=SWITCHERS,\n )\n\n print(\n f\"Running the CrossValidation SCV approach for dataset: {dataset} ML Method = {ml_method}\"\n )\n CrossValidation.run()\n\n\nif __name__ == \"__main__\":\n root_path = sys.argv[1]\n dataset = sys.argv[2]\n fs_method = sys.argv[3]\n index_col = sys.argv[4]\n fold_col = sys.argv[5]\n target_col = sys.argv[6]\n ml_method = sys.argv[7]\n print(dataset, fs_method, index_col, fold_col, target_col)\n main(root_path, dataset, fs_method, index_col, fold_col, target_col, ml_method)\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
jingliinpurdue/Fast-and-Robust-UAV-to-UAV-Detection-and-Tracking
[ "317e85a03f5c374ef8ec53b543208ec36655fa07" ]
[ "util/kalman2d.py" ]
[ "# Opencv 2---Version\n# -*- coding: utf-8 -*-\n'''\n kalman2d - 2D Kalman filter using OpenCV\n \n Based on http://jayrambhia.wordpress.com/2012/07/26/kalman-filter/\n \n Copyright (C) 2014 Simon D. Levy\n \n This code is free software: you can redistribute it and/or modify\n it under the terms of the GNU Lesser General Public License as\n published by the Free Software Foundation, either version 3 of the\n License, or (at your option) any later version.\n This code is distributed in the hope that it will be useful,\n \n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n \n You should have received a copy of the GNU Lesser General Public License\n along with this code. If not, see <http://www.gnu.org/licenses/>.\n '''\n\n#from cv2 import cv\nimport cv2\nimport numpy as np\n\nclass Kalman2D(object):\n '''\n A class for 2D Kalman filtering\n '''\n \n def __init__(self, processNoiseCovariance=1e-4, measurementNoiseCovariance=1e-1, errorCovariancePost=0.1):\n #def __init__(self,processNoiseCovariance=1e-1, measurementNoiseCovariance=1e1, errorCovariancePost=1e4):\n '''\n Constructs a new Kalman2D object.\n For explanation of the error covariances see\n http://en.wikipedia.org/wiki/Kalman_filter\n '''\n # state space:location--2d,speed--2d\n #self.kalman = cv.CreateKalman(4, 2, 0)\n self.kalman = cv2.KalmanFilter(4, 2, 0)\n self.kalman_measurement = np.array([[1.],[1.]],np.float32)\n \n self.kalman.transitionMatrix = np.array([[1.,0., 1.,0.], [0., 1., 0., 1.], [0., 0., 1., 0.], [0., 0., 0., 1.]],np.float32)\n \n\n \n self.kalman.measurementMatrix = np.array([[1.,0.,0.,0.],[0.,1.,0.,0.]],np.float32)\n \n self.kalman.processNoiseCov = processNoiseCovariance * np.array([[1.,0.,0.,0.],[0.,1.,0.,0.],[0.,0.,1.,0.],[0.,0.,0.,1.]],np.float32)\n self.kalman.measurementNoiseCov = np.array([[1.,0.],[0.,1.]],np.float32) * measurementNoiseCovariance\n self.kalman.errorCovPost = np.array([[1.,0., 0, 0],[0.,1., 0, 0],[0.,0, 1, 0],[0.,0, 0, 1]],np.float32) * errorCovariancePost\n #cv.SetIdentity(self.kalman.measurement_matrix)\n #Initialize identity matrix\n #cv.SetIdentity(self.kalman.process_noise_cov, cv.RealScalar(processNoiseCovariance))\n #cv.SetIdentity(self.kalman.measurement_noise_cov, cv.RealScalar(measurementNoiseCovariance))\n #cv.SetIdentity(self.kalman.error_cov_post, cv.RealScalar(errorCovariancePost))\n \n self.predicted = np.array((2,1), np.float32)\n self.corrected = np.zeros((2,1), np.float32)\n\n def update(self, x, y):\n '''\n Updates the filter with a new X,Y measurement\n '''\n self.kalman_measurement = np.array([[np.float32(x)],[np.float32(y)]])\n #self.kalman_measurement[0, 0] = x\n #self.kalman_measurement[1, 0] = y\n #print self.kalman.predict()\n self.predicted = self.kalman.predict()\n self.corrected = self.kalman.correct(self.kalman_measurement)\n #self.corrected = cv.KalmanCorrect(self.kalman, self.kalman_measurement)\n\n def getEstimate(self):\n '''\n Returns the current X,Y estimate.\n '''\n \n return self.corrected[0,0], self.corrected[1,0]\n \n def getPrediction(self):\n '''\n Returns the current X,Y prediction.\n '''\n \n return self.predicted[0,0], self.predicted[1,0]\n" ]
[ [ "numpy.array", "numpy.zeros", "numpy.float32" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lzamparo/SeqDemote
[ "3eaf18e88c9dc6a3d1a69444ecdba9f9b5d9682a", "3eaf18e88c9dc6a3d1a69444ecdba9f9b5d9682a", "3eaf18e88c9dc6a3d1a69444ecdba9f9b5d9682a" ]
[ "tests/test_train_utils.py", "src/pytorch_visualize.py", "src/utils/torch_model_construction_utils.py" ]
[ "import os \nimport numpy as np\nfrom nose.tools import eq_, ok_\n\nimport torch\nimport utils.train_utils as tr_utils\nimport utils.torch_model_construction_utils as tmu\n\ndef flip_random(data, num_labels):\n ''' return a column of 0,1 labels with num_labels flipped '''\n length = data.shape[0]\n flip_positions = np.random.randint(0,length,num_labels)\n for position in flip_positions:\n if data[position] == 0:\n data[position] = 1\n else:\n data[position] = 0\n return data\n \n\ndef make_fuzzy_predictions(preds, eps = 0.025, shape=0.1):\n ''' Add noise to 0-1 array to simulate predictions '''\n \n zeros = preds[preds == 0]\n ones = preds[preds == 1]\n \n zero_noise = np.random.gamma(eps, shape, size=zeros.shape)\n ones_noise = -1.0 * np.random.gamma(eps, shape, size=ones.shape)\n \n noisy_zeros = zeros + zero_noise\n noisy_ones = ones + ones_noise\n \n preds[preds == 0] = noisy_zeros\n preds[preds == 1] = noisy_ones\n\n assert(np.alltrue(preds > 0))\n assert(np.alltrue(preds <= 1))\n return preds\n\n\ndef make_classification_labels_and_preds(shape=(128,164), p=0.1, \n flips=10, noisy=False, \n eps=0.025, g_shape=0.1,\n as_list=False):\n ''' fixture generator for mt_aupr / mt_auroc \n returns labels, y_hat \n \n as_list := return y_hat fixture as a list of one-dimensional arrays '''\n \n labels = np.random.binomial(1,p,size=shape) \n preds = np.array(labels.copy(), dtype=np.float)\n \n for col in preds.transpose():\n col = flip_random(col, flips)\n \n if noisy:\n preds = make_fuzzy_predictions(preds, eps, g_shape)\n \n if as_list:\n preds_list = [preds[:,i] for i in range(preds.shape[1])]\n return labels, preds_list\n \n return labels, preds\n\n\n\ndef make_presigmoid_activations(preds, confident=True, to_tensor=False):\n ''' fixture generator for pre-sigmoid activations from \n network output. Makes more confident predictions or less\n confident predictions'''\n \n extended_activations = np.zeros_like(preds)\n if confident:\n noise = np.random.gamma(5, 1, size=extended_activations.shape)\n else:\n noise = np.random.gamma(1,0.5, size=extended_activations.shape)\n \n # want to iterate elementwise here, maybe flatten / it / reshape?\n for e, p, n in zip(np.nditer(extended_activations, op_flags=[['readwrite']]), preds.flat, noise.flat):\n if p > 0.5:\n e += n\n else:\n e -= n\n \n if to_tensor:\n return torch.tensor(extended_activations)\n \n return extended_activations \n\ndef test_focal_loss():\n ''' make sure focal loss increases for uncertain predictions '''\n \n ### If I need to compare the weights, losses pre-activations for each fixture \n ### across all tasks, set reduce=False\n \n labels, preds = make_classification_labels_and_preds(shape=(4,4), flips=1)\n focal_loss = tmu.FocalLoss(reduce=True) \n \n # generate certain predictions, calculate focal loss\n certain_activations = make_presigmoid_activations(preds, confident=True, to_tensor=True)\n certain_loss = tr_utils.per_task_loss(certain_activations, torch.tensor(labels, dtype=torch.double), focal_loss, do_sum=False)\n \n # generate less-certain predictions, calculate focal loss\n uncertain_activations = make_presigmoid_activations(preds, confident=False, to_tensor=True) \n uncertain_loss = tr_utils.per_task_loss(uncertain_activations, torch.tensor(labels, dtype=torch.double), focal_loss, do_sum=False)\n \n # Q: should less-certain losses have much greater loss?\n # A: depends on the level of certainty (i.e p_t) and gamma.\n ok_(sum(uncertain_loss) < sum(certain_loss))\n\ndef test_st_accuracy():\n ''' make sure ST accuracy works '''\n test_labels, test_preds = make_classification_labels_and_preds()\n test_labels = test_labels[:,0]\n test_preds = test_preds[:,0]\n test_accuracy = tr_utils.st_accuracy(test_labels, test_preds)\n ok_(0.5 <= test_accuracy <= 1.0)\n\ndef test_mt_accuracy():\n ''' make sure MT accuracy works '''\n test_labels, test_preds = make_classification_labels_and_preds()\n test_accuracy = tr_utils.mt_accuracy(test_labels, test_preds)\n ok_(0.5 <= test_accuracy <= 1.0)\n \ndef test_mt_precision():\n ''' make sure MT precision works '''\n test_labels, test_preds = make_classification_labels_and_preds()\n test_precision = tr_utils.mt_avg_precision(test_labels, test_preds)\n ok_(0.0 <= test_precision <= 1.0) \n \ndef test_noisy_mt_precision():\n ''' make sure MT precision works '''\n test_labels, test_preds = make_classification_labels_and_preds(noisy=True)\n test_precision = tr_utils.mt_avg_precision(test_labels, test_preds)\n ok_(0.0 <= test_precision <= 1.0) \n \ndef test_mt_precision_at_recall():\n ''' make sure MT precision at recall works '''\n test_labels, test_preds = make_classification_labels_and_preds(noisy=True)\n test_precision = tr_utils.mt_precision_at_recall(test_labels, test_preds)\n ok_(0.0 <= test_precision <= 1.0)\n \ndef test_mt_precision_at_recall_list():\n ''' make sure MT precision at recall works '''\n test_labels, test_preds = make_classification_labels_and_preds(noisy=True, as_list=True)\n test_precision = tr_utils.mt_precision_at_recall(test_labels, test_preds)\n ok_(0.0 <= test_precision <= 1.0)\n \ndef test_mt_f1():\n ''' make sure MT f1 works '''\n test_labels, test_preds = make_classification_labels_and_preds(noisy=True)\n test_f1 = tr_utils.mt_avg_f1_score(test_labels, test_preds)\n ok_(0.0 <= test_f1 <= 1.0)\n \ndef test_mt_f1_list():\n ''' make sure MT f1 with predictions as list works '''\n test_labels, test_preds = make_classification_labels_and_preds(noisy=True, as_list=True)\n test_f1 = tr_utils.mt_avg_f1_score(test_labels, test_preds)\n ok_(0.0 <= test_f1 <= 1.0)\n\ndef test_mt_mcc():\n ''' make sure MT MCC works '''\n test_labels, test_preds = make_classification_labels_and_preds(noisy=True)\n test_mcc = tr_utils.mt_avg_mcc(test_labels, test_preds)\n ok_(-1.0 <= test_mcc <= 1.0)\n \ndef test_mt_mcc_list():\n ''' make sure MT MCC works '''\n test_labels, test_preds = make_classification_labels_and_preds(noisy=True, as_list=True)\n test_mcc = tr_utils.mt_avg_mcc(test_labels, test_preds)\n ok_(-1.0 <= test_mcc <= 1.0) \n \n", "import argparse\nimport os\nimport torch\nimport importlib.util\n\nimport numpy as np\nfrom torch.utils.data import DataLoader\nfrom utils import train_utils\n\nfrom tensorboardX import SummaryWriter\nfrom torchviz import make_dot, make_dot_from_trace\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"config\", help='path to config file')\nparser.add_argument(\"--logdir\", help='write tensorboard logs here', \n default='~/projects/SeqDemote/results/tb_logs')\nargs = parser.parse_args()\n\n### Load up a model to visualize it with either tensorboardX or torchviz\nmodel_config = args.config\nmodel_path_name = os.path.join(os.path.expanduser(os.getcwd()),'models',model_config)\nspec = importlib.util.spec_from_file_location(model_config, model_path_name)\nmodel_module = importlib.util.module_from_spec(spec)\nspec.loader.exec_module(model_module)\n\nprint(\"...Build model, load model state from save state if provided\")\nmodel = model_module.net\n\nprint(\"...Getting shape of dummy data\", flush=True)\nif hasattr(model_module, 'batch_size'):\n batch_size = model_module.batch_size\n embedding_dim = model_module.embedding_dim_len\n num_peaks = model_module.embedded_seq_len // embedding_dim\nelse:\n batch_size = 128\n\n# generate dummy data, pass through the model\nx = torch.randn((batch_size, 1, num_peaks, embedding_dim))\ny = model(x)\n\n# try to draw the model\n# ... using Tensorbard logging\n#writer = SummaryWriter(os.path.expanduser(args.logdir))\n#writer.add_graph(model, x)\n\n# ... using pytorchviz make_dot\ng = make_dot(y, params=dict(list(model.named_parameters()) + [('x', x)]))\ng.save(filename=\"model_fig.dot\", directory=os.path.expanduser(args.logdir))\n\n# ... using pytorchviz make_dot from trace \nwith torch.onnx.set_training(model, False):\n trace, _ = torch.jit.get_trace_graph(model, args=(x,))\ng = make_dot_from_trace(trace)\ng.save(filename=\"model_fig_jit.dot\", directory=os.path.expanduser(args.logdir))\n\n#x = torch.randn(1, 3, 227, 227).requires_grad_(True)\n#y = model(x)\n#make_dot(y, params=dict(list(model.named_parameters()) + [('x', x)]))", "import torch\nimport torch.nn as nn\n\n\n### FocalLoss (cf. arxiv:) to re-weight those w/ more uncertainty\nclass FocalLoss(nn.Module):\n \n def __init__(self, reduce=True, gamma=1.5, alpha=0.7):\n super(FocalLoss, self).__init__()\n self.reduce = reduce\n self.gamma = gamma\n self.alpha = alpha\n \n def _get_weights(self, x, t):\n '''\n Helper to get the weights for focal loss calculation\n '''\n p = nn.functional.sigmoid(x)\n p_t = p*t + (1 - p)*(1 - t)\n alpha_t = self.alpha * t + (1 - self.alpha)*(1 - t)\n w = alpha_t * (1 - p_t).pow(self.gamma)\n return w\n \n def focal_loss(self, x, t):\n '''\n Focal Loss cf. arXiv:1708.02002\n \n Args:\n x: (tensor) output from last layer of network\n t: (tensor) targets in [0,1]\n alpha: (float) class imbalance correction weight \\in (0,1)\n gamma: (float) amplification factor for uncertain classification\n \n Return:\n (tensor) focal loss.\n '''\n weights = self._get_weights(x, t)\n return nn.functional.binary_cross_entropy_with_logits(x, t, weights, size_average=False, reduce=self.reduce)\n \n def forward(self, input, target):\n return self.focal_loss(input, target)\n\n\n### Functions that allow for re-initialization of model and\n### optimizer to tune hyperparameters \n\n\ndef init_weights(m, gain=nn.init.calculate_gain('relu')):\n ''' Recursively initalizes the weights of a network. '''\n \n if isinstance(m, nn.Linear):\n torch.nn.init.xavier_uniform_(m.weight)\n m.bias.data.fill_(0.01)\n \n if isinstance(m, nn.Conv2d):\n torch.nn.init.orthogonal_(m.weight, gain)\n m.bias.data.fill_(0.1)\n \n\ndef reinitialize_model(model, num_factors=19):\n ''' Initialize and return a model '''\n net = model(num_factors=num_factors)\n net.apply(init_weights)\n return net\n\ndef get_model_param_lists(net):\n ''' Partition the model parameters into biases, weights, and \n weights intended to be sparse '''\n biases, weights, sparse_weights = [], [], []\n for name, p in net.named_parameters():\n if 'bias' in name:\n biases += [p]\n \n elif 'sparse' in name:\n sparse_weights += [p]\n \n else:\n weights += [p] \n \n return biases, weights, sparse_weights\n\n\ndef get_sparse_weights_penalty(net, sparse_lambda=1e-6, cuda=True):\n ''' Return a list of additional sparsity penalties on those layers\n identfied in the model as sparse \n '''\n sparse_penalties = []\n for name, p in net.named_parameters():\n if 'weight_v' in name:\n if cuda:\n p = p.cuda()\n L1_loss = sparse_lambda * (torch.abs(p)).sum()\n sparse_penalties.append(L1_loss) \n return sparse_penalties\n\n\ndef orthogonal_filter_penalty(net, orth_lambda=1e-6, cuda=True):\n ''' Return a list of additional decorrelative penalty on the conv filters '''\n \n for name, p in net.named_parameters():\n if 'orth' in name and 'weight_v' in name:\n p_flattened = p.view(p.size(0),-1)\n WWt = torch.mm(p_flattened, torch.transpose(p_flattened,0,1))\n eye = torch.Tensor(torch.eye(p_flattened.size(0)))\n if cuda:\n eye = eye.cuda()\n WWt = WWt.cuda()\n WWt -= eye\n orth_loss = orth_lambda * WWt.sum()\n return [orth_loss]\n\ndef initialize_optimizer(weights, biases, sparse_weights, hyperparams_dict, optimizer=torch.optim.Adam):\n ''' A standard model to initialize the list of dictionaries for\n paramterizing an optimizer '''\n \n weights_dict = {'params': weights}\n biases_dict = {'params': biases}\n sparse_dict = {'params': sparse_weights}\n \n try:\n weight_lambda = hyperparams_dict['weight_lambda']\n weights_dict['weight_decay'] = weight_lambda\n except KeyError:\n pass # no additional WD for weights\n \n try:\n bias_lambda = hyperparams_dict['bias_lambda']\n biases_dict['weight_decay'] = bias_lambda\n except KeyError:\n pass # no additional WD for biases\n\n try:\n sparse_lambda = hyperparams_dict['sparse_lambda']\n sparse_dict['weight_decay'] = sparse_lambda\n except KeyError:\n pass # no additional sparsity\n \n optimizer_param_dicts = [weights_dict, biases_dict, sparse_dict]\n return optimizer, optimizer_param_dicts\n\n\n \n\n \n\n\n\n\n" ]
[ [ "numpy.nditer", "torch.tensor", "numpy.alltrue", "numpy.zeros_like", "numpy.random.gamma", "numpy.random.binomial", "numpy.random.randint" ], [ "torch.randn", "torch.jit.get_trace_graph", "torch.onnx.set_training" ], [ "torch.nn.init.calculate_gain", "torch.abs", "torch.transpose", "torch.nn.functional.binary_cross_entropy_with_logits", "torch.nn.functional.sigmoid", "torch.nn.init.orthogonal_", "torch.nn.init.xavier_uniform_" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
albpurpura/PLTR
[ "e21d3eb24fb0d269abd68ba23677501c30bb08eb" ]
[ "main_dasalc.py" ]
[ "import argparse\nimport logging\nimport os\nimport time\nimport uuid\n\nimport numpy as np\nimport pyltr\nimport tensorflow as tf\n\nfrom evaluation import compute_mean_ndcg, compute_perf_metrics, create_trec_eval_format_run_qrels\nfrom lambdamart import compute_lambdamart_preds\n# from model import ReRanker\nfrom dasalc_model import ReRanker\n\nfrom simulate_unsupervised_rj import compare_artif_rj_with_real_ones, sample_labels\nfrom utils import load_model, pad_list, save_model\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\n\ndef add_arguments(parser):\n \"\"\"Build ArgumentParser.\"\"\"\n parser.register(\"type\", \"bool\", lambda v: v.lower() == \"true\")\n parser.add_argument(\"--coll_name\", type=str, default='MQ2007', help=\"Collection name\")\n parser.add_argument(\"--data_folder\", type=str, default='../LETOR_data/MQ2007/', help=\"Data folder.\")\n parser.add_argument(\"--simulate_labels\", type=str, default=False,\n help=\"Whether to train with simulated labels or not.\")\n parser.add_argument(\"--expand_training_data\", type=str, default=False,\n help=\"Whether to expand training data or not.\")\n parser.add_argument(\"--det_model\", type=str, default=True, help=\"Whether to use probabilistic layers or not.\")\n parser.add_argument(\"--rerank_lambdamart\", type=str, default=False,\n help=\"Whether to rerank lambdamart preds or from scratch.\")\n parser.add_argument(\"--lambdamart_preds_path\", type=str, default='../LETOR_data/MQ2007/lambdamart_runs',\n help=\"LM data folder.\")\n # model parameters\n parser.add_argument(\"--seed\", type=float, default=0, help=\"The random seed to use.\")\n parser.add_argument(\"--n_binomial_samples\", type=float, default=32,\n help=\"The number of binomial samples to simulate.\")\n # parser.add_argument(\"--loss\", type=str, default='Hinge', help=\"The loss to use to train the model.\")\n # parser.add_argument(\"--loss\", type=str, default='KL_B', help=\"The loss to use to train the model.\")\n # parser.add_argument(\"--loss\", type=str, default='KL_G', help=\"The loss to use to train the model.\")\n # parser.add_argument(\"--loss\", type=str, default='KL_G_H', help=\"The loss to use to train the model.\")\n # parser.add_argument(\"--loss\", type=str, default='KL_B_H', help=\"The loss to use to train the model.\")\n # parser.add_argument(\"--loss\", type=str, default='ApproxNDCG', help=\"The loss to use to train the model.\")\n parser.add_argument(\"--loss\", type=str, default='ApproxNDCG_G', help=\"The loss to use to train the model.\")\n # parser.add_argument(\"--loss\", type=str, default='MSE', help=\"The loss to use to train the model.\")\n parser.add_argument(\"--norm_labels\", type=bool, default=False,\n help=\"Whether to normalize within [0,1] the relevance labels.\")\n parser.add_argument(\"--num_features\", type=int, default=46, help=\"Number of features per document.\")\n parser.add_argument(\"--num_epochs\", type=int, default=100, help=\"The number of epochs for training.\")\n parser.add_argument(\"--n_heads\", type=int, default=2, help=\"Num heads.\")\n parser.add_argument(\"--batch_size\", type=int, default=4, help=\"The batch size for training.\") # MQ2007\n # parser.add_argument(\"--batch_size\", type=int, default=2, help=\"The batch size for training.\") # MQ2008\n parser.add_argument(\"--list_size_test\", type=int, default=147, help=\"List size.\") # MQ2007\n # parser.add_argument(\"--list_size_test\", type=int, default=120, help=\"List size.\") # MQ2008\n # parser.add_argument(\"--list_size_train\", type=int, default=147, help=\"List size.\")\n parser.add_argument(\"--learning_rate\", type=float, default=1e-3, help=\"Learning rate for optimizer.\") # MQ2008 and MQ2007\n parser.add_argument(\"--model_ckpt_path\", type=str, default='./output/chkpts/',\n help=\"Output path for checkpoint saving.\")\n\n\ndef remove_queries_without_rel_docs(rj, docs, rl_lengths, dids):\n indices_to_remove = []\n for i in range(len(rj)):\n if max(rj[i]) == 0:\n indices_to_remove.append(i)\n rj = [rj[i] for i in range(len(rj)) if i not in indices_to_remove]\n docs = [docs[i] for i in range(len(docs)) if i not in indices_to_remove]\n rl_lengths = [rl_lengths[i] for i in range(len(rl_lengths)) if i not in indices_to_remove]\n dids = [dids[i] for i in range(len(dids)) if i not in indices_to_remove]\n return rj, docs, rl_lengths, dids\n\n\ndef group_docs_with_lambdamart_preds(preds, qids, docs, labels, max_list_size):\n grouped = {}\n for i in range(len(qids)):\n if qids[i] in grouped.keys():\n grouped[qids[i]].append((preds[i], docs[i], labels[i]))\n else:\n grouped[qids[i]] = [(preds[i], docs[i], labels[i])]\n grouped_docs = []\n grouped_labels = []\n rl_lengths = []\n for group in grouped.values():\n g = np.array(group)\n lmp = g[:, 0]\n indices = np.argsort(-lmp)\n\n ranked_list = list(g[:, 1][indices])\n ranked_labels = list(g[:, 2][indices])\n\n while len(ranked_list) < max_list_size:\n ranked_list.append(np.zeros(FLAGS.num_features))\n ranked_labels.append(0.0)\n ranked_list = ranked_list[:max_list_size]\n ranked_labels = ranked_labels[:max_list_size]\n\n grouped_docs.append(ranked_list)\n grouped_labels.append(ranked_labels)\n rl_lengths.append(min(max_list_size, len(lmp)))\n return grouped_docs, grouped_labels, rl_lengths\n\n\ndef read_data(data_folder, fold_f):\n # data_fpath = './data_proc/{}_{}_listSize={}_rerank_lambdamart={}.hkl'.format(FLAGS.coll_name, fold_f,\n # FLAGS.list_size_test,\n # FLAGS.rerank_lambdamart)\n # if not os.path.isfile(data_fpath) or not FLAGS.load_proc_data:\n\n training_file_path = os.path.join(os.path.join(data_folder, fold_f), 'train.txt')\n valid_file_path = os.path.join(os.path.join(data_folder, fold_f), 'vali.txt')\n test_file_path = os.path.join(os.path.join(data_folder, fold_f), 'test.txt')\n\n docs_train, lab_train, qids_train, _ = pyltr.data.letor.read_dataset(open(training_file_path))\n docs_val, lab_val, qids_val, _ = pyltr.data.letor.read_dataset(open(valid_file_path))\n docs_test, lab_test, qids_test, _ = pyltr.data.letor.read_dataset(open(test_file_path))\n\n dids_train = ['fake_did_{}'.format(i) for i in range(len(docs_train))]\n dids_test = ['fake_did_{}'.format(i) for i in range(len(docs_test))]\n dids_val = ['fake_did_{}'.format(i) for i in range(len(docs_val))]\n\n max_l = np.max(lab_train)\n print('max label: {}'.format(max_l))\n lab_train = np.array(lab_train) / max_l\n lab_val = np.array(lab_val) / max_l\n lab_test = np.array(lab_test) / max_l\n\n assert 0 <= max(lab_test) <= 1\n assert 0 <= max(lab_train) <= 1\n assert 0 <= max(lab_val) <= 1\n\n # without lambdamart\n ranking_lists_train, all_labels_train, rl_lengths_train, resp_qids_train, resp_dids_train = \\\n group_data_in_ranking_lists(docs_train, lab_train, qids_train, dids_train, FLAGS.list_size_test)\n ranking_lists_val, all_labels_val, rl_lengths_val, resp_qids_val, resp_dids_val = \\\n group_data_in_ranking_lists(docs_val, lab_val, qids_val, dids_val, FLAGS.list_size_test)\n ranking_lists_test, all_labels_test, rl_lengths_test, resp_qids_test, resp_dids_test = \\\n group_data_in_ranking_lists(docs_test, lab_test, qids_test, dids_test, FLAGS.list_size_test)\n\n # if FLAGS.load_proc_data:\n # print('dumping data')\n # save_model(((ranking_lists_train, all_labels_train, rl_lengths_train, resp_dids_train),\n # (ranking_lists_test, all_labels_test, rl_lengths_test, resp_dids_test, resp_qids_test),\n # (ranking_lists_val, all_labels_val, rl_lengths_val, resp_dids_val, resp_qids_val),\n # (np.array(lab_val, dtype=np.float32), np.array(lab_test, dtype=np.float32),\n # np.array(qids_val, dtype=np.float32), np.array(qids_test, dtype=np.float32))),\n # data_fpath)\n # else:\n # print('loading data')\n # (ranking_lists_train, all_labels_train, rl_lengths_train, resp_dids_train), \\\n # (ranking_lists_test, all_labels_test, rl_lengths_test, resp_dids_test, resp_qids_test), \\\n # (ranking_lists_val, all_labels_val, rl_lengths_val, resp_dids_val, resp_qids_val), \\\n # (lab_val, lab_test, qids_val, qids_test) = load_model(data_fpath)\n\n all_labels_train, ranking_lists_train, rl_lengths_train, resp_dids_train = remove_queries_without_rel_docs(\n all_labels_train, ranking_lists_train, rl_lengths_train, resp_dids_train)\n if FLAGS.simulate_labels:\n # artif_labels = compute_simulated_labels(ranking_lists_train, rl_lengths_train, all_labels_train)\n artif_labels = sample_labels(all_labels_train, rl_lengths_train, FLAGS.n_binomial_samples)\n compare_artif_rj_with_real_ones(artif_labels, all_labels_train, rl_lengths_train)\n all_labels_train = artif_labels\n\n all_labels_train, ranking_lists_train, rl_lengths_train, resp_dids_train = remove_queries_without_rel_docs(\n all_labels_train, ranking_lists_train, rl_lengths_train, resp_dids_train)\n\n # avg_n_rel_docs = np.mean([np.sum([1 for rj in rl if rj > 0]) for rl in all_labels_train])\n # print('avg number of relevant documents per ranked list in training data: {}'.format(avg_n_rel_docs))\n\n if FLAGS.expand_training_data:\n ranking_lists_train, all_labels_train, rl_lengths_train = augment_training_data(ranking_lists_train,\n all_labels_train,\n rl_lengths_train)\n all_labels_train, ranking_lists_train, rl_lengths_train, resp_dids_train = remove_queries_without_rel_docs(\n all_labels_train, ranking_lists_train, rl_lengths_train, resp_dids_train)\n else:\n FLAGS.list_size_train = FLAGS.list_size_test\n\n return ranking_lists_train, all_labels_train, rl_lengths_train, resp_dids_train, \\\n ranking_lists_val, all_labels_val, rl_lengths_val, resp_dids_val, resp_qids_val, \\\n ranking_lists_test, all_labels_test, rl_lengths_test, resp_dids_test, resp_qids_test, \\\n lab_val, lab_test, qids_val, qids_test\n\n\ndef augment_training_data(training_docs, training_rj, rl_lengths):\n training_rj = np.array(training_rj)\n rl_lengths = np.array(rl_lengths)\n n_samples_per_rl = 5\n new_ranked_lists = []\n new_rj = []\n new_lengths = []\n for i in range(len(training_docs)):\n docs_to_sample = np.array(training_docs[i][:rl_lengths[i]])\n for _ in range(n_samples_per_rl):\n sel_indices = np.random.choice([idx for idx in range(len(docs_to_sample))], size=FLAGS.list_size_train,\n replace=True)\n new_ranked_lists.append(docs_to_sample[sel_indices])\n new_rj.append(training_rj[i][sel_indices])\n new_lengths.append(FLAGS.list_size_train)\n return new_ranked_lists, new_rj, new_lengths\n\n\ndef load_lambdaMART_preds(fold_f, lambdamart_preds_path):\n \"\"\"\n Fold\tTraining.txt\tValidation.txt Test.txt\n Fold1\tS1, S2, S3\tS4\t\tS5\n Fold2\tS2, S3, S4\tS5\t\tS1\n Fold3\tS3, S4, S5\tS1\t\tS2\n Fold4\tS4, S5, S1\tS2\t\tS3\n Fold5\tS5, S1, S2\tS3\t\tS4\n \"\"\"\n test_preds_path = os.path.join(lambdamart_preds_path, FLAGS.coll_name + '_lightgbm_' + fold_f + '.hkl')\n if not os.path.isfile(test_preds_path):\n compute_lambdamart_preds(FLAGS)\n test_preds = load_model(test_preds_path)\n training_folds = []\n validation_folds = []\n if fold_f == 'Fold1':\n training_folds = ['Fold2', 'Fold3', 'Fold4']\n validation_folds = ['Fold5']\n elif fold_f == 'Fold2':\n training_folds = ['Fold3', 'Fold4', 'Fold5']\n validation_folds = ['Fold1']\n elif fold_f == 'Fold3':\n training_folds = ['Fold4', 'Fold5', 'Fold1']\n validation_folds = ['Fold2']\n elif fold_f == 'Fold4':\n training_folds = ['Fold5', 'Fold1', 'Fold2']\n validation_folds = ['Fold3']\n elif fold_f == 'Fold5':\n training_folds = ['Fold1', 'Fold2', 'Fold3']\n validation_folds = ['Fold4']\n\n training_preds = []\n for ff in training_folds:\n tmp_model_path = os.path.join(lambdamart_preds_path, FLAGS.coll_name + '_lightgbm_' + ff + '.hkl')\n training_preds.extend(load_model(tmp_model_path))\n val_preds_path = os.path.join(lambdamart_preds_path, FLAGS.coll_name + '_lightgbm_' + validation_folds[0] + '.hkl')\n val_preds = load_model(val_preds_path)\n return training_preds, test_preds, val_preds\n\n\ndef group_data_in_ranking_lists(vectors, labels, qids, dids, list_size):\n assert len(qids) == len(labels)\n assert len(qids) == len(vectors)\n data_indices_grouped_by_qid = {}\n for i in range(len(qids)):\n curr_qid = qids[i]\n if curr_qid not in data_indices_grouped_by_qid.keys():\n data_indices_grouped_by_qid[curr_qid] = [i]\n else:\n data_indices_grouped_by_qid[curr_qid].append(i)\n\n print('mean ranking list length: %2.4f' % np.mean([len(item) for item in data_indices_grouped_by_qid.values()]))\n print('max ranking list length: %2.4f' % np.max([len(item) for item in data_indices_grouped_by_qid.values()]))\n print('min ranking list length: %2.4f' % np.min([len(item) for item in data_indices_grouped_by_qid.values()]))\n\n ranking_lists = []\n all_labels = []\n rl_lengths = []\n resp_qids = []\n all_dids = []\n for qid, indices_group in data_indices_grouped_by_qid.items():\n curr_dids = [dids[i] for i in indices_group]\n vecs = [vectors[i] for i in indices_group]\n curr_labels = [labels[i] for i in indices_group]\n original_rl_len = len(curr_labels)\n # pad ranking lists now\n vecs = pad_list(vecs, list_size)\n curr_labels = curr_labels[0: min(list_size, len(curr_labels))]\n curr_labels = curr_labels + [0.0] * (list_size - len(curr_labels))\n resp_qids.append(qid)\n curr_dids = curr_dids[0: min(list_size, len(curr_dids))]\n curr_dids.extend('padding_did_{}'.format(i) for i in range(list_size - len(curr_dids)))\n\n # append to output values\n all_labels.append(curr_labels)\n ranking_lists.append(vecs)\n all_dids.append(curr_dids)\n rl_lengths.append(min(list_size, original_rl_len))\n\n return ranking_lists, all_labels, rl_lengths, resp_qids, all_dids\n\n\ndef group_rj_in_ranking_lists_no_pad_trim(qids, labs):\n ranking_lists = {}\n for i in range(len(qids)):\n qid = qids[i]\n label = labs[i]\n if qid in ranking_lists.keys():\n ranking_lists[qid].append(label)\n else:\n ranking_lists[qid] = [label]\n doc_scores = []\n doc_rj = []\n for k, ranking_list in ranking_lists.items():\n curr_scores = []\n curr_rj = []\n for i in range(len(ranking_list)):\n curr_rj.append(ranking_list[i])\n doc_scores.append(curr_scores)\n doc_rj.append(curr_rj)\n return doc_rj\n\n\ndef compute_ranking_lists_rl_length_masks(rl_lengths, list_size):\n rl_masks = []\n for i in range(len(rl_lengths)):\n curr_v = np.zeros(list_size)\n for j in range(min(len(curr_v), rl_lengths[i])):\n curr_v[j] = 1\n rl_masks.append(curr_v)\n return rl_masks\n\n\ndef remove_training_rl_without_rel_docs(train_rj, train_docs, rl_lengths_train):\n indices_to_remove_train = []\n for i in range(len(train_rj)):\n if max(train_rj[i]) == 0:\n indices_to_remove_train.append(i)\n\n train_rj = [train_rj[i] for i in range(len(train_rj)) if i not in indices_to_remove_train]\n train_docs = [train_docs[i] for i in range(len(train_docs)) if i not in indices_to_remove_train]\n rl_lengths_train = [rl_lengths_train[i] for i in range(len(rl_lengths_train)) if i not in indices_to_remove_train]\n return train_rj, train_docs, rl_lengths_train\n\n\ndef test_model(sess, model, model_path, test_rj, test_docs, rl_lengths, qids_test, labels_test_non_grouped,\n silent=False):\n rl_test_masks = compute_ranking_lists_rl_length_masks(rl_lengths, FLAGS.list_size_test)\n\n # initialize graph and session\n # tf.reset_default_graph()\n # sess_config = tf.ConfigProto()\n # sess_config.gpu_options.allow_growth = True\n # sess = tf.Session(config=sess_config, graph=tf.get_default_graph())\n\n # initialize model\n # model = ReRanker(FLAGS.seed, FLAGS.learning_rate, det_model=FLAGS.det_model, n_heads=FLAGS.n_heads,\n # num_features=FLAGS.num_features, n=FLAGS.n_binomial_samples,\n # loss_fn=FLAGS.loss, list_size=FLAGS.list_size_train, max_label_value=4,\n # norm_labels=FLAGS.norm_labels)\n tf.set_random_seed(FLAGS.seed)\n # sess.run(model.init_op)\n model.saver.restore(sess, model_path)\n sess.graph.finalize()\n # compute_predictions\n msamples = 50\n if FLAGS.det_model:\n msamples = 1\n all_preds = np.zeros(shape=(msamples, len(test_docs), FLAGS.list_size_test))\n for k in range(msamples):\n scores = sess.run(model.logits,\n {model.training: False, model.input_docs: test_docs, model.rl_lengths_mask: rl_test_masks})\n if FLAGS.loss == 'ML':\n all_preds[k] = np.argmax(scores, axis=-1)\n else:\n all_preds[k] = scores\n\n avg_preds = np.mean(all_preds, axis=0)\n var_preds = np.var(all_preds, axis=0)\n\n for i in range(len(rl_test_masks)):\n for j in range(len(rl_test_masks[i])):\n if rl_test_masks[i][j] == 0:\n rl_test_masks[i][j] = 0 # -np.inf\n else:\n rl_test_masks[i][j] = 0\n avg_preds = rl_test_masks + avg_preds\n var_preds = rl_test_masks + var_preds\n\n grouped_rj = group_rj_in_ranking_lists_no_pad_trim(qids_test, labels_test_non_grouped)\n ideal_rel_j_lists = [np.array(rl)[np.argsort(-np.array(rl))] for rl in grouped_rj]\n ndcg_1, base_1 = compute_mean_ndcg(avg_preds, test_rj, ideal_rel_j_lists, 1)\n\n return avg_preds, ndcg_1, var_preds, compute_perf_metrics(avg_preds, test_rj, ideal_rel_j_lists, silent, rl_lengths)\n\n\ndef get_batches(all_docs, all_labels, rl_lengths_mask):\n db = []\n rb = []\n lb = []\n for i in range(len(all_docs)):\n db.append(all_docs[i])\n rb.append(all_labels[i])\n lb.append(rl_lengths_mask[i])\n if len(db) == FLAGS.batch_size:\n yield db, rb, lb\n db = []\n rb = []\n lb = []\n if len(db) > 0:\n yield db, rb, lb\n\n\ndef train_model(sess, model, train_docs, train_rj, rl_train_masks, test_rj, test_docs, rl_lengths_test,\n labels_test_non_grouped, qids_test, model_suffix):\n ckpt_paths = []\n perfs = []\n max_patience = 20\n patience = 20\n ploss = None\n early_stopping = False\n for epoch in range(1, FLAGS.num_epochs + 1):\n if early_stopping:\n break\n print('*** EPOCH: %d/%d' % (epoch, FLAGS.num_epochs))\n start = time.time()\n for db, rjb, lenb in get_batches(train_docs, train_rj, rl_train_masks):\n _, step, loss = sess.run(\n [model.train_op, model.global_step, model.loss],\n feed_dict={model.input_docs: db,\n model.relevance_judgments: rjb,\n model.rl_lengths_mask: lenb,\n model.training: True})\n if ploss is None:\n ploss = loss\n else:\n if loss >= ploss:\n patience -= 1\n if patience == 0:\n early_stopping = True\n print('early stopping')\n break\n else:\n patience = max_patience\n if step % 50 == 0:\n end = time.time()\n print('step: %d, loss: %2.6f, time: %2.3fs' % (step, loss, (end - start)))\n step = sess.run(model.global_step)\n # save_path = model.saver.save(sess, os.path.join(FLAGS.model_ckpt_path, 'ckpt_' + model_suffix),\n # global_step=step)\n for _ in range(100):\n try:\n save_path = model.saver.save(sess, os.path.join(FLAGS.model_ckpt_path, 'ckpt_' + model_suffix),\n global_step=step)\n except:\n print('exception, retrying')\n continue\n break\n\n print(\"Model saved in path: %s\" % save_path)\n preds, ndcg_1, var_preds, _ = test_model(sess, model, save_path, test_rj, test_docs, rl_lengths_test, qids_test,\n labels_test_non_grouped, silent=False)\n\n perfs.append(ndcg_1)\n ckpt_paths.append(save_path)\n\n return ckpt_paths, perfs\n\n\ndef train_eval_model(train_rj, train_docs, test_rj, test_docs, rl_lengths_train, rl_lengths_test,\n labels_test_non_grouped, qids_test, model_suffix=str(uuid.uuid4())):\n rl_train_masks = compute_ranking_lists_rl_length_masks(rl_lengths_train, FLAGS.list_size_train)\n print('max ranking list length in training data: %d' % max(rl_lengths_train))\n print('max ranking list length in test data: %d' % max(rl_lengths_test))\n\n # initialize graph and session\n tf.reset_default_graph()\n sess_config = tf.ConfigProto()\n sess_config.gpu_options.allow_growth = True\n sess = tf.Session(config=sess_config, graph=tf.get_default_graph())\n\n # initialize model\n model = ReRanker(FLAGS.seed, FLAGS.learning_rate, coll_name=FLAGS.coll_name, det_model=FLAGS.det_model,\n n_heads=FLAGS.n_heads,\n num_features=FLAGS.num_features, n=FLAGS.n_binomial_samples,\n loss_fn=FLAGS.loss, list_size=FLAGS.list_size_train, max_label_value=4,\n norm_labels=FLAGS.norm_labels)\n tf.set_random_seed(FLAGS.seed)\n sess.run(model.init_op)\n sess.graph.finalize()\n start_training = time.time()\n ckpt_paths, perfs = train_model(sess, model, train_docs, train_rj, rl_train_masks, test_rj, test_docs,\n rl_lengths_test, labels_test_non_grouped, qids_test, model_suffix)\n print('Model trained in: %2.4fs' % (time.time() - start_training))\n\n # load and evaluate best model\n best_model_path = ckpt_paths[np.argmax(perfs)]\n print('Best ckpt model path: %s' % best_model_path)\n return best_model_path, sess, model\n\n\ndef run():\n fold_folders = ['Fold1', 'Fold2', 'Fold3', 'Fold4', 'Fold5']\n # fold_folders = ['Fold1']\n all_preds = []\n all_rjs = []\n all_qids_test = []\n all_qids_test_non_g = []\n all_dids_test = []\n all_lab_test_non_grouped = []\n all_rl_lengths = []\n perfs_across_folds = {}\n for fold_f in fold_folders:\n ranking_lists_train, all_labels_train, rl_lengths_train, resp_dids_train, \\\n ranking_lists_val, all_labels_val, rl_lengths_val, resp_dids_val, resp_qids_val, \\\n ranking_lists_test, all_labels_test, rl_lengths_test, resp_dids_test, resp_qids_test, \\\n lab_val_non_grouped, lab_test_non_grouped, qids_val, qids_test = read_data(data_folder=FLAGS.data_folder,\n fold_f=fold_f)\n # print(qids_test)\n best_model_path, sess, model = train_eval_model(all_labels_train, ranking_lists_train, all_labels_val,\n ranking_lists_val,\n rl_lengths_train, rl_lengths_val, lab_val_non_grouped, qids_val)\n avg_preds, ndcg_1, var_preds, all_perf = test_model(sess, model, best_model_path, all_labels_test,\n ranking_lists_test, rl_lengths_test, qids_test,\n lab_test_non_grouped)\n all_preds.extend(avg_preds)\n all_rjs.extend(all_labels_test)\n all_qids_test.extend(resp_qids_test)\n all_qids_test_non_g.extend(qids_test)\n all_dids_test.extend(resp_dids_test)\n all_lab_test_non_grouped.extend(lab_test_non_grouped)\n all_rl_lengths.extend(rl_lengths_test)\n\n for k, v in all_perf.items():\n if k in perfs_across_folds.keys():\n perfs_across_folds[k].append(v)\n else:\n perfs_across_folds[k] = [v]\n\n for k, v in perfs_across_folds.items():\n print('{}: {}'.format(k, np.mean(v)))\n # save_model((all_preds, all_rjs, all_qids_test, all_dids_test, all_qids_test_non_g, all_lab_test_non_grouped),\n # './output/final_preds_data_{}_{}_{}.hkl'.format(FLAGS.coll_name, FLAGS.loss, FLAGS.simulate_labels))\n grouped_rj = group_rj_in_ranking_lists_no_pad_trim(all_qids_test_non_g, all_lab_test_non_grouped)\n ideal_rel_j_lists = [np.array(rl)[np.argsort(-np.array(rl))] for rl in grouped_rj]\n all_rjs = np.array(all_rjs) * int(1.0 / sorted(set(all_lab_test_non_grouped))[1])\n ideal_rel_j_lists = np.array(ideal_rel_j_lists) * int(1.0 / sorted(set(all_lab_test_non_grouped))[1])\n print('\\nFINAL PERF AVGD ACROSS FOLDS:')\n # import pdb\n # pdb.set_trace()\n compute_perf_metrics(all_preds, all_rjs, ideal_rel_j_lists, False, all_rl_lengths, max_rj=2.0)\n create_trec_eval_format_run_qrels(all_preds, all_dids_test, all_qids_test, all_rjs,\n 'DASALC_{}_loss={}_simulate_labels={}_det_model={}'.format(FLAGS.coll_name,\n FLAGS.loss,\n FLAGS.simulate_labels,\n FLAGS.det_model),\n './output')\n\n return\n\n\ndef create_trec_format_run(qids, dids, preds, ofpath):\n out = open(ofpath, 'w')\n for ranked_list_idx in range(len(preds)):\n sorted_indices = np.argsort(preds[ranked_list_idx])\n for item_idx in sorted_indices:\n run_line = '{} Q0 {} {} {} {}\\n'.format(qids[ranked_list_idx], dids[ranked_list_idx][item_idx],\n item_idx + 1, preds[ranked_list_idx][item_idx], 'PFusion')\n out.write(run_line)\n out.close()\n\n\ndef flatten_stuff_provide_fake_qids(all_preds, all_rjs):\n preds = []\n labels = []\n qids = []\n for i in range(len(all_preds)):\n preds.extend(all_preds[i])\n labels.extend(all_rjs[i])\n qids.extend([i] * len(all_preds[i]))\n return np.array(preds), np.array(labels), np.array(qids)\n\n\nif __name__ == '__main__':\n os.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\n logging.getLogger(\"tensorflow\").setLevel(logging.CRITICAL)\n\n arg_parser = argparse.ArgumentParser()\n add_arguments(arg_parser)\n FLAGS, unparsed = arg_parser.parse_known_args()\n for arg in vars(FLAGS):\n print(arg, \":\", getattr(FLAGS, arg))\n\n if not os.path.exists(FLAGS.model_ckpt_path):\n os.makedirs(FLAGS.model_ckpt_path)\n np.random.seed(FLAGS.seed)\n tf.random.set_random_seed(FLAGS.seed)\n run()\n print(FLAGS.loss)\n print('DONE')\n" ]
[ [ "tensorflow.get_default_graph", "numpy.random.seed", "numpy.var", "tensorflow.ConfigProto", "numpy.max", "tensorflow.reset_default_graph", "numpy.mean", "numpy.argmax", "tensorflow.set_random_seed", "numpy.argsort", "numpy.array", "numpy.zeros", "tensorflow.random.set_random_seed" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kokeshing/espnet
[ "9e2bfc5cdecbb8846f5c6cb26d22010b06e98c40" ]
[ "espnet/tts/pytorch_backend/tts.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Copyright 2018 Nagoya University (Tomoki Hayashi)\n# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)\n\n\"\"\"E2E-TTS training / decoding functions.\"\"\"\n\nimport copy\nimport json\nimport logging\nimport math\nimport os\nimport time\n\nimport chainer\nimport kaldiio\nimport numpy as np\nimport torch\n\nfrom chainer import training\nfrom chainer.training import extensions\n\nfrom espnet.asr.asr_utils import get_model_conf\nfrom espnet.asr.asr_utils import snapshot_object\nfrom espnet.asr.asr_utils import torch_load\nfrom espnet.asr.asr_utils import torch_resume\nfrom espnet.asr.asr_utils import torch_snapshot\nfrom espnet.asr.pytorch_backend.asr_init import load_trained_modules\nfrom espnet.nets.pytorch_backend.nets_utils import pad_list\nfrom espnet.nets.tts_interface import TTSInterface\nfrom espnet.utils.dataset import ChainerDataLoader\nfrom espnet.utils.dataset import TransformDataset\nfrom espnet.utils.dynamic_import import dynamic_import\nfrom espnet.utils.io_utils import LoadInputsAndTargets\nfrom espnet.utils.training.batchfy import make_batchset\nfrom espnet.utils.training.evaluator import BaseEvaluator\n\nfrom espnet.utils.deterministic_utils import set_deterministic_pytorch\nfrom espnet.utils.training.train_utils import check_early_stop\nfrom espnet.utils.training.train_utils import set_early_stop\n\nfrom espnet.utils.training.iterators import ShufflingEnabler\n\nimport matplotlib\n\nfrom espnet.utils.training.tensorboard_logger import TensorboardLogger\nfrom tensorboardX import SummaryWriter\n\nmatplotlib.use('Agg')\n\n\nclass CustomEvaluator(BaseEvaluator):\n \"\"\"Custom evaluator.\"\"\"\n\n def __init__(self, model, iterator, target, device):\n \"\"\"Initilize module.\n\n Args:\n model (torch.nn.Module): Pytorch model instance.\n iterator (chainer.dataset.Iterator): Iterator for validation.\n target (chainer.Chain): Dummy chain instance.\n device (torch.device): The device to be used in evaluation.\n\n \"\"\"\n super(CustomEvaluator, self).__init__(iterator, target)\n self.model = model\n self.device = device\n\n # The core part of the update routine can be customized by overriding.\n def evaluate(self):\n \"\"\"Evaluate over validation iterator.\"\"\"\n iterator = self._iterators['main']\n\n if self.eval_hook:\n self.eval_hook(self)\n\n if hasattr(iterator, 'reset'):\n iterator.reset()\n it = iterator\n else:\n it = copy.copy(iterator)\n\n summary = chainer.reporter.DictSummary()\n\n self.model.eval()\n with torch.no_grad():\n for batch in it:\n if isinstance(batch, tuple):\n x = tuple(arr.to(self.device) for arr in batch)\n else:\n x = batch\n for key in x.keys():\n x[key] = x[key].to(self.device)\n observation = {}\n with chainer.reporter.report_scope(observation):\n # convert to torch tensor\n if isinstance(x, tuple):\n self.model(*x)\n else:\n self.model(**x)\n summary.add(observation)\n self.model.train()\n\n return summary.compute_mean()\n\n\nclass CustomUpdater(training.StandardUpdater):\n \"\"\"Custom updater.\"\"\"\n\n def __init__(self, model, grad_clip, iterator, optimizer, device, accum_grad=1):\n \"\"\"Initilize module.\n\n Args:\n model (torch.nn.Module) model: Pytorch model instance.\n grad_clip (float) grad_clip : The gradient clipping value.\n iterator (chainer.dataset.Iterator): Iterator for training.\n optimizer (torch.optim.Optimizer) : Pytorch optimizer instance.\n device (torch.device): The device to be used in training.\n\n \"\"\"\n super(CustomUpdater, self).__init__(iterator, optimizer)\n self.model = model\n self.grad_clip = grad_clip\n self.device = device\n self.clip_grad_norm = torch.nn.utils.clip_grad_norm_\n self.accum_grad = accum_grad\n self.forward_count = 0\n\n # The core part of the update routine can be customized by overriding.\n def update_core(self):\n \"\"\"Update model one step.\"\"\"\n # When we pass one iterator and optimizer to StandardUpdater.__init__,\n # they are automatically named 'main'.\n train_iter = self.get_iterator('main')\n optimizer = self.get_optimizer('main')\n\n # Get the next batch (a list of json files)\n batch = train_iter.next()\n if isinstance(batch, tuple):\n x = tuple(arr.to(self.device) for arr in batch)\n else:\n x = batch\n for key in x.keys():\n x[key] = x[key].to(self.device)\n\n # compute loss and gradient\n if isinstance(x, tuple):\n loss = self.model(*x).mean() / self.accum_grad\n else:\n loss = self.model(**x).mean() / self.accum_grad\n loss.backward()\n\n # update parameters\n self.forward_count += 1\n if self.forward_count != self.accum_grad:\n return\n self.forward_count = 0\n\n # compute the gradient norm to check if it is normal or not\n grad_norm = self.clip_grad_norm(self.model.parameters(), self.grad_clip)\n logging.debug('grad norm={}'.format(grad_norm))\n if math.isnan(grad_norm):\n logging.warning('grad norm is nan. Do not update model.')\n else:\n optimizer.step()\n optimizer.zero_grad()\n\n def update(self):\n \"\"\"Run update function.\"\"\"\n self.update_core()\n if self.forward_count == 0:\n self.iteration += 1\n\n\nclass CustomConverter(object):\n \"\"\"Custom converter.\"\"\"\n\n def __init__(self):\n \"\"\"Initilize module.\"\"\"\n # NOTE: keep as class for future development\n pass\n\n def __call__(self, batch, device=torch.device('cpu')):\n \"\"\"Convert a given batch.\n\n Args:\n batch (list): List of ndarrays.\n device (torch.device): The device to be send.\n\n Returns:\n dict: Dict of converted tensors.\n\n Examples:\n >>> batch = [([np.arange(5), np.arange(3)],\n [np.random.randn(8, 2), np.random.randn(4, 2)],\n None, None)]\n >>> conveter = CustomConverter()\n >>> conveter(batch, torch.device(\"cpu\"))\n {'xs': tensor([[0, 1, 2, 3, 4],\n [0, 1, 2, 0, 0]]),\n 'ilens': tensor([5, 3]),\n 'ys': tensor([[[-0.4197, -1.1157],\n [-1.5837, -0.4299],\n [-2.0491, 0.9215],\n [-2.4326, 0.8891],\n [ 1.2323, 1.7388],\n [-0.3228, 0.6656],\n [-0.6025, 1.3693],\n [-1.0778, 1.3447]],\n [[ 0.1768, -0.3119],\n [ 0.4386, 2.5354],\n [-1.2181, -0.5918],\n [-0.6858, -0.8843],\n [ 0.0000, 0.0000],\n [ 0.0000, 0.0000],\n [ 0.0000, 0.0000],\n [ 0.0000, 0.0000]]]),\n 'labels': tensor([[0., 0., 0., 0., 0., 0., 0., 1.],\n [0., 0., 0., 1., 1., 1., 1., 1.]]),\n 'olens': tensor([8, 4])}\n\n \"\"\"\n # batch should be located in list\n assert len(batch) == 1\n xs, ys, spembs, extras = batch[0]\n\n # get list of lengths (must be tensor for DataParallel)\n ilens = torch.from_numpy(np.array([x.shape[0] for x in xs])).long().to(device)\n olens = torch.from_numpy(np.array([y.shape[0] for y in ys])).long().to(device)\n\n # perform padding and conversion to tensor\n xs = pad_list([torch.from_numpy(x).long() for x in xs], 0).to(device)\n ys = pad_list([torch.from_numpy(y).float() for y in ys], 0).to(device)\n\n # make labels for stop prediction\n labels = ys.new_zeros(ys.size(0), ys.size(1))\n for i, l in enumerate(olens):\n labels[i, l - 1:] = 1.0\n\n # prepare dict\n new_batch = {\n \"xs\": xs,\n \"ilens\": ilens,\n \"ys\": ys,\n \"labels\": labels,\n \"olens\": olens,\n }\n\n # load speaker embedding\n if spembs is not None:\n spembs = torch.from_numpy(np.array(spembs)).float()\n new_batch[\"spembs\"] = spembs.to(device)\n\n # load second target\n if extras is not None:\n extras = pad_list([torch.from_numpy(extra).float() for extra in extras], 0)\n new_batch[\"extras\"] = extras.to(device)\n\n return new_batch\n\n\ndef train(args):\n \"\"\"Train E2E-TTS model.\"\"\"\n set_deterministic_pytorch(args)\n\n # check cuda availability\n if not torch.cuda.is_available():\n logging.warning('cuda is not available')\n\n # get input and output dimension info\n with open(args.valid_json, 'rb') as f:\n valid_json = json.load(f)['utts']\n utts = list(valid_json.keys())\n\n # reverse input and output dimension\n idim = int(valid_json[utts[0]]['output'][0]['shape'][1])\n odim = int(valid_json[utts[0]]['input'][0]['shape'][1])\n logging.info('#input dims : ' + str(idim))\n logging.info('#output dims: ' + str(odim))\n\n # get extra input and output dimenstion\n if args.use_speaker_embedding:\n args.spk_embed_dim = int(valid_json[utts[0]]['input'][1]['shape'][0])\n else:\n args.spk_embed_dim = None\n if args.use_second_target:\n args.spc_dim = int(valid_json[utts[0]]['input'][1]['shape'][1])\n else:\n args.spc_dim = None\n\n # write model config\n if not os.path.exists(args.outdir):\n os.makedirs(args.outdir)\n model_conf = args.outdir + '/model.json'\n with open(model_conf, 'wb') as f:\n logging.info('writing a model config file to' + model_conf)\n f.write(json.dumps((idim, odim, vars(args)),\n indent=4, ensure_ascii=False, sort_keys=True).encode('utf_8'))\n for key in sorted(vars(args).keys()):\n logging.info('ARGS: ' + key + ': ' + str(vars(args)[key]))\n\n # specify model architecture\n if args.enc_init is not None or args.dec_init is not None:\n model = load_trained_modules(idim, odim, args, TTSInterface)\n else:\n model_class = dynamic_import(args.model_module)\n model = model_class(idim, odim, args)\n assert isinstance(model, TTSInterface)\n logging.info(model)\n reporter = model.reporter\n\n # check the use of multi-gpu\n if args.ngpu > 1:\n model = torch.nn.DataParallel(model, device_ids=list(range(args.ngpu)))\n if args.batch_size != 0:\n logging.warning('batch size is automatically increased (%d -> %d)' % (\n args.batch_size, args.batch_size * args.ngpu))\n args.batch_size *= args.ngpu\n\n # set torch device\n device = torch.device(\"cuda\" if args.ngpu > 0 else \"cpu\")\n model = model.to(device)\n\n # Setup an optimizer\n if args.opt == 'adam':\n optimizer = torch.optim.Adam(\n model.parameters(), args.lr, eps=args.eps,\n weight_decay=args.weight_decay)\n elif args.opt == 'noam':\n from espnet.nets.pytorch_backend.transformer.optimizer import get_std_opt\n optimizer = get_std_opt(model, args.adim, args.transformer_warmup_steps, args.transformer_lr)\n else:\n raise NotImplementedError(\"unknown optimizer: \" + args.opt)\n\n # FIXME: TOO DIRTY HACK\n setattr(optimizer, 'target', reporter)\n setattr(optimizer, 'serialize', lambda s: reporter.serialize(s))\n\n # read json data\n with open(args.train_json, 'rb') as f:\n train_json = json.load(f)['utts']\n with open(args.valid_json, 'rb') as f:\n valid_json = json.load(f)['utts']\n\n use_sortagrad = args.sortagrad == -1 or args.sortagrad > 0\n if use_sortagrad:\n args.batch_sort_key = \"input\"\n # make minibatch list (variable length)\n train_batchset = make_batchset(train_json, args.batch_size,\n args.maxlen_in, args.maxlen_out, args.minibatches,\n batch_sort_key=args.batch_sort_key,\n min_batch_size=args.ngpu if args.ngpu > 1 else 1,\n shortest_first=use_sortagrad,\n count=args.batch_count,\n batch_bins=args.batch_bins,\n batch_frames_in=args.batch_frames_in,\n batch_frames_out=args.batch_frames_out,\n batch_frames_inout=args.batch_frames_inout,\n swap_io=True, iaxis=0, oaxis=0)\n valid_batchset = make_batchset(valid_json, args.batch_size,\n args.maxlen_in, args.maxlen_out, args.minibatches,\n batch_sort_key=args.batch_sort_key,\n min_batch_size=args.ngpu if args.ngpu > 1 else 1,\n count=args.batch_count,\n batch_bins=args.batch_bins,\n batch_frames_in=args.batch_frames_in,\n batch_frames_out=args.batch_frames_out,\n batch_frames_inout=args.batch_frames_inout,\n swap_io=True, iaxis=0, oaxis=0)\n\n load_tr = LoadInputsAndTargets(\n mode='tts',\n use_speaker_embedding=args.use_speaker_embedding,\n use_second_target=args.use_second_target,\n preprocess_conf=args.preprocess_conf,\n preprocess_args={'train': True}, # Switch the mode of preprocessing\n keep_all_data_on_mem=args.keep_all_data_on_mem,\n )\n\n load_cv = LoadInputsAndTargets(\n mode='tts',\n use_speaker_embedding=args.use_speaker_embedding,\n use_second_target=args.use_second_target,\n preprocess_conf=args.preprocess_conf,\n preprocess_args={'train': False}, # Switch the mode of preprocessing\n keep_all_data_on_mem=args.keep_all_data_on_mem,\n )\n\n converter = CustomConverter()\n # hack to make batchsize argument as 1\n # actual bathsize is included in a list\n train_iter = {'main': ChainerDataLoader(\n dataset=TransformDataset(train_batchset, lambda data: converter([load_tr(data)])),\n batch_size=1, num_workers=args.num_iter_processes,\n shuffle=not use_sortagrad, collate_fn=lambda x: x[0])}\n valid_iter = {'main': ChainerDataLoader(\n dataset=TransformDataset(valid_batchset, lambda data: converter([load_cv(data)])),\n batch_size=1, shuffle=False, collate_fn=lambda x: x[0],\n num_workers=args.num_iter_processes)}\n\n # Set up a trainer\n updater = CustomUpdater(model, args.grad_clip, train_iter, optimizer, device, args.accum_grad)\n trainer = training.Trainer(updater, (args.epochs, 'epoch'), out=args.outdir)\n\n # Resume from a snapshot\n if args.resume:\n logging.info('resumed from %s' % args.resume)\n torch_resume(args.resume, trainer)\n\n # set intervals\n eval_interval = (args.eval_interval_epochs, 'epoch')\n save_interval = (args.save_interval_epochs, 'epoch')\n report_interval = (args.report_interval_iters, 'iteration')\n\n # Evaluate the model with the test dataset for each epoch\n trainer.extend(CustomEvaluator(\n model, valid_iter, reporter, device), trigger=eval_interval)\n\n # Save snapshot for each epoch\n trainer.extend(torch_snapshot(), trigger=save_interval)\n\n # Save best models\n trainer.extend(snapshot_object(model, 'model.loss.best'),\n trigger=training.triggers.MinValueTrigger(\n 'validation/main/loss', trigger=eval_interval))\n\n # Save attention figure for each epoch\n if args.num_save_attention > 0:\n data = sorted(list(valid_json.items())[:args.num_save_attention],\n key=lambda x: int(x[1]['input'][0]['shape'][1]), reverse=True)\n if hasattr(model, \"module\"):\n att_vis_fn = model.module.calculate_all_attentions\n plot_class = model.module.attention_plot_class\n else:\n att_vis_fn = model.calculate_all_attentions\n plot_class = model.attention_plot_class\n att_reporter = plot_class(\n att_vis_fn, data, args.outdir + '/att_ws',\n converter=converter,\n transform=load_cv,\n device=device, reverse=True)\n trainer.extend(att_reporter, trigger=eval_interval)\n else:\n att_reporter = None\n\n # Make a plot for training and validation values\n if hasattr(model, \"module\"):\n base_plot_keys = model.module.base_plot_keys\n else:\n base_plot_keys = model.base_plot_keys\n plot_keys = []\n for key in base_plot_keys:\n plot_key = ['main/' + key, 'validation/main/' + key]\n trainer.extend(extensions.PlotReport(\n plot_key, 'epoch', file_name=key + '.png'), trigger=eval_interval)\n plot_keys += plot_key\n trainer.extend(extensions.PlotReport(\n plot_keys, 'epoch', file_name='all_loss.png'), trigger=eval_interval)\n\n # Write a log of evaluation statistics for each epoch\n trainer.extend(extensions.LogReport(trigger=report_interval))\n report_keys = ['epoch', 'iteration', 'elapsed_time'] + plot_keys\n trainer.extend(extensions.PrintReport(report_keys), trigger=report_interval)\n trainer.extend(extensions.ProgressBar(), trigger=report_interval)\n\n set_early_stop(trainer, args)\n if args.tensorboard_dir is not None and args.tensorboard_dir != \"\":\n writer = SummaryWriter(args.tensorboard_dir)\n trainer.extend(TensorboardLogger(writer, att_reporter), trigger=report_interval)\n\n if use_sortagrad:\n trainer.extend(ShufflingEnabler([train_iter]),\n trigger=(args.sortagrad if args.sortagrad != -1 else args.epochs, 'epoch'))\n\n # Run the training\n trainer.run()\n check_early_stop(trainer, args.epochs)\n\n\[email protected]_grad()\ndef decode(args):\n \"\"\"Decode with E2E-TTS model.\"\"\"\n set_deterministic_pytorch(args)\n # read training config\n idim, odim, train_args = get_model_conf(args.model, args.model_conf)\n\n # show arguments\n for key in sorted(vars(args).keys()):\n logging.info('args: ' + key + ': ' + str(vars(args)[key]))\n\n # define model\n model_class = dynamic_import(train_args.model_module)\n model = model_class(idim, odim, train_args)\n assert isinstance(model, TTSInterface)\n logging.info(model)\n\n # load trained model parameters\n logging.info('reading model parameters from ' + args.model)\n torch_load(args.model, model)\n model.eval()\n\n # set torch device\n device = torch.device(\"cuda\" if args.ngpu > 0 else \"cpu\")\n model = model.to(device)\n\n # read json data\n with open(args.json, 'rb') as f:\n js = json.load(f)['utts']\n\n # check directory\n outdir = os.path.dirname(args.out)\n if len(outdir) != 0 and not os.path.exists(outdir):\n os.makedirs(outdir)\n\n load_inputs_and_targets = LoadInputsAndTargets(\n mode='tts', load_input=False, sort_in_input_length=False,\n use_speaker_embedding=train_args.use_speaker_embedding,\n preprocess_conf=train_args.preprocess_conf\n if args.preprocess_conf is None else args.preprocess_conf,\n preprocess_args={'train': False} # Switch the mode of preprocessing\n )\n\n # define function for plot prob and att_ws\n def _plot_and_save(array, figname, figsize=(6, 4), dpi=150):\n import matplotlib.pyplot as plt\n shape = array.shape\n if len(shape) == 1:\n # for eos probability\n plt.figure(figsize=figsize, dpi=dpi)\n plt.plot(array)\n plt.xlabel(\"Frame\")\n plt.ylabel(\"Probability\")\n plt.ylim([0, 1])\n elif len(shape) == 2:\n # for tacotron 2 attention weights, whose shape is (out_length, in_length)\n plt.figure(figsize=figsize, dpi=dpi)\n plt.imshow(array, aspect=\"auto\")\n plt.xlabel(\"Input\")\n plt.ylabel(\"Output\")\n elif len(shape) == 4:\n # for transformer attention weights, whose shape is (#leyers, #heads, out_length, in_length)\n plt.figure(figsize=(figsize[0] * shape[0], figsize[1] * shape[1]), dpi=dpi)\n for idx1, xs in enumerate(array):\n for idx2, x in enumerate(xs, 1):\n plt.subplot(shape[0], shape[1], idx1 * shape[1] + idx2)\n plt.imshow(x, aspect=\"auto\")\n plt.xlabel(\"Input\")\n plt.ylabel(\"Output\")\n else:\n raise NotImplementedError(\"Support only from 1D to 4D array.\")\n plt.tight_layout()\n if not os.path.exists(os.path.dirname(figname)):\n # NOTE: exist_ok = True is needed for parallel process decoding\n os.makedirs(os.path.dirname(figname), exist_ok=True)\n plt.savefig(figname)\n plt.close()\n\n # define function to calculate focus rate (see section 3.3 in https://arxiv.org/abs/1905.09263)\n def _calculate_focus_rete(att_ws):\n if att_ws is None:\n # fastspeech case -> None\n return 1.0\n elif len(att_ws.shape) == 2:\n # tacotron 2 case -> (L, T)\n return float(att_ws.max(dim=-1)[0].mean())\n elif len(att_ws.shape) == 4:\n # transformer case -> (#layers, #heads, L, T)\n return float(att_ws.max(dim=-1)[0].mean(dim=-1).max())\n else:\n raise ValueError(\"att_ws should be 2 or 4 dimensional tensor.\")\n\n # define function to convert attention to duration\n def _convert_att_to_duration(att_ws):\n if len(att_ws.shape) == 2:\n # tacotron 2 case -> (L, T)\n pass\n elif len(att_ws.shape) == 4:\n # transformer case -> (#layers, #heads, L, T)\n # get the most diagonal head according to focus rate\n att_ws = torch.cat([att_w for att_w in att_ws], dim=0) # (#heads * #layers, L, T)\n diagonal_scores = att_ws.max(dim=-1)[0].mean(dim=-1) # (#heads * #layers,)\n diagonal_head_idx = diagonal_scores.argmax()\n att_ws = att_ws[diagonal_head_idx] # (L, T)\n else:\n raise ValueError(\"att_ws should be 2 or 4 dimensional tensor.\")\n # calculate duration from 2d attention weight\n durations = torch.stack([att_ws.argmax(-1).eq(i).sum() for i in range(att_ws.shape[1])])\n return durations.view(-1, 1).float()\n\n # define writer instances\n feat_writer = kaldiio.WriteHelper(\n 'ark,scp:{o}.ark,{o}.scp'.format(o=args.out))\n if args.save_durations:\n dur_writer = kaldiio.WriteHelper(\n 'ark,scp:{o}.ark,{o}.scp'.format(\n o=args.out.replace(\"feats\", \"durations\")))\n if args.save_focus_rates:\n fr_writer = kaldiio.WriteHelper(\n 'ark,scp:{o}.ark,{o}.scp'.format(\n o=args.out.replace(\"feats\", \"focus_rates\")))\n\n # start decoding\n for idx, utt_id in enumerate(js.keys()):\n # setup inputs\n batch = [(utt_id, js[utt_id])]\n data = load_inputs_and_targets(batch)\n x = torch.LongTensor(data[0][0]).to(device)\n spemb = None\n if train_args.use_speaker_embedding:\n spemb = torch.FloatTensor(data[1][0]).to(device)\n\n # decode and write\n start_time = time.time()\n outs, probs, att_ws = model.inference(x, args, spemb=spemb)\n logging.info(\"inference speed = %.1f frames / sec.\" % (\n int(outs.size(0)) / (time.time() - start_time)))\n if outs.size(0) == x.size(0) * args.maxlenratio:\n logging.warning(\"output length reaches maximum length (%s).\" % utt_id)\n focus_rate = _calculate_focus_rete(att_ws)\n logging.info('(%d/%d) %s (size: %d->%d, focus rate: %.3f)' % (\n idx + 1, len(js.keys()), utt_id, x.size(0), outs.size(0), focus_rate))\n feat_writer[utt_id] = outs.cpu().numpy()\n if args.save_durations:\n ds = _convert_att_to_duration(att_ws)\n dur_writer[utt_id] = ds.cpu().numpy()\n if args.save_focus_rates:\n fr_writer[utt_id] = np.array(focus_rate).reshape(1, 1)\n\n # plot and save prob and att_ws\n if probs is not None:\n _plot_and_save(probs.cpu().numpy(), os.path.dirname(args.out) + \"/probs/%s_prob.png\" % utt_id)\n if att_ws is not None:\n _plot_and_save(att_ws.cpu().numpy(), os.path.dirname(args.out) + \"/att_ws/%s_att_ws.png\" % utt_id)\n\n # close file object\n feat_writer.close()\n if args.save_durations:\n dur_writer.close()\n if args.save_focus_rates:\n fr_writer.close()\n" ]
[ [ "matplotlib.pyplot.imshow", "torch.cat", "matplotlib.pyplot.plot", "torch.no_grad", "torch.FloatTensor", "torch.cuda.is_available", "torch.device", "matplotlib.pyplot.tight_layout", "torch.from_numpy", "matplotlib.pyplot.subplot", "matplotlib.pyplot.close", "matplotlib.pyplot.figure", "torch.LongTensor", "matplotlib.pyplot.ylim", "matplotlib.pyplot.savefig", "numpy.array", "matplotlib.pyplot.ylabel", "matplotlib.use", "matplotlib.pyplot.xlabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JulianKnodt/nerf_atlas
[ "6866713c498cea026cb215260a779a2c6c13246c" ]
[ "scripts/2d_recon.py" ]
[ "import sys\nsys.path[0] = sys.path[0][:-len(\"scripts/\")] # hacky way to treat it as root directory\n\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision as tv\nfrom src.neural_blocks import ( SkipConnMLP, FourierEncoder )\nfrom src.utils import ( fat_sigmoid )\nfrom tqdm import trange\n\nts = 100\nepochs = 20_000\nSCALE = 1\n\nclass LearnedImage(nn.Module):\n def __init__(self):\n super().__init__()\n self.query = SkipConnMLP(\n in_size=2, out=1, latent_size=0,\n activation=torch.sin, num_layers=5, hidden_size=256, init=\"siren\",\n )\n def forward(self, x):\n return fat_sigmoid(self.query(x))\n\nclass PixelImage(nn.Module):\n def __init__(self, frame):\n super().__init__()\n assert(len(frame.shape) == 3)\n # just use a frame from the training data, ensures it has all elements\n self.data = frame.permute(2,1,0)#nn.Parameter(torch.randn(1, 1, 256, 256))\n def forward(self, x):\n B = x.shape[0]\n vals = F.grid_sample(\n self.data.expand(B,-1,-1,-1),\n x,\n padding_mode=\"zero\",\n align_corners=False,\n ).permute(0,2,3,1)\n return vals\n\nclass LIIF(nn.Module):\n def __init__(\n self,\n reso:int=16,\n emb_size:int=128\n ):\n super().__init__()\n self.grid = nn.Parameter(torch.randn(1, emb_size, reso, reso))\n self.query = SkipConnMLP(\n in_size=emb_size, out=1, latent_size=2,\n activation=torch.sin, num_layers=5, hidden_size=256, init=\"siren\",\n )\n def forward(self, x):\n B = x.shape[0]\n latent = F.grid_sample(\n self.grid.expand(B, -1,-1,-1), x,\n padding_mode=\"reflection\",\n align_corners=False,\n ).permute(0,2,3,1)\n sz = latent.shape[1:3]\n cell_size = torch.tensor([1/sz[0], 1/sz[1]]).to(x.device)\n cell_size = cell_size[None,None,None].expand(B,sz[0],sz[1],-1)\n return fat_sigmoid(self.query(latent, cell_size))\n\nclass DistanceImage(nn.Module):\n def __init__(self, n=32):\n super().__init__()\n self.n = n\n self.points = nn.Parameter(torch.randn(n, 2, requires_grad=True), requires_grad=True)\n self.query = SkipConnMLP(\n in_size=n, out=1, num_layers=5, hidden_size=512, init=\"xavier\",\n )\n def forward(self, x): return self.from_pts(self.points, x)\n def from_pts(self, pts, x):\n pairwise_disp = pts[:, None, None, ...] - x[...,None,:]\n pairwise_dist = pairwise_disp.square().sum(dim=-1)\n return fat_sigmoid(self.query(1/(1e-5+pairwise_dist)))\n\n#torch.autograd.set_detect_anomaly(True); print(\"DEBUG\")\n\nclass LongAnimator(nn.Module):\n def __init__(\n self, img,\n segments:int,\n spline:int=16,\n seg_emb_size:int=128,\n anchor_interim:int=128,\n ):\n super().__init__()\n self.img = img\n self.spline_n = spline\n self.ses = ses = seg_emb_size\n segments = int(segments)\n self.segments = segments\n self.midsize = anchor_interim\n\n self.seg_emb = nn.Embedding(segments+2, ses, max_norm=1)\n self.anchors = SkipConnMLP(\n in_size=2, out=2+anchor_interim, latent_size=ses,\n num_layers=5, hidden_size=512, init=\"xavier\",\n )\n self.point_estim=SkipConnMLP(\n in_size=2, out=(spline-2)*2,\n num_layers=5, hidden_size=512,\n latent_size=ses+2*anchor_interim, init=\"xavier\",\n )\n def forward(self, x, t):\n B = t.shape[0]\n t = t[:, None, None, None]\n seg = t.floor().int().clamp(min=0)\n emb = self.seg_emb(torch.cat([seg,seg+1], dim=-1)).expand(-1, *x.shape[1:3], -1, -1)\n anchors, anchor_latent = self.anchors(\n x[..., None, :].expand(B,-1,-1,2,-1), emb,\n ).split([2, self.midsize], dim=-1)\n start, end = [a[None].squeeze(-2) for a in anchors.split([1,1], dim=-2)]\n point_estim_latent = torch.cat([emb[..., 0, :], anchor_latent.flatten(-2)], dim=-1)\n midpts = torch.stack(\n self.point_estim(x.expand(B,-1,-1,-1), point_estim_latent).split(2, dim=-1), dim=0\n )\n ctrl_pts = torch.cat([start, midpts-start, end], dim=0)\n # Bound pts within some space\n ctrl_pts = 2*ctrl_pts.tanh()\n dx = de_casteljau(ctrl_pts, t.frac(), self.spline_n)\n return self.img(x+dx)\n\nclass LongAnimatorPts(nn.Module):\n def __init__(\n self,\n img: DistanceImage,\n segments:int,\n spline:int=5,\n seg_emb_size:int=128,\n anchor_interim:int=128,\n ):\n super().__init__()\n self.img = img\n self.spline_n = spline\n self.ses = ses = seg_emb_size\n segments = int(segments)\n self.segments = segments\n self.midsize = anchor_interim\n\n self.seg_emb = nn.Embedding(segments+2, ses)\n self.anchors = SkipConnMLP(\n in_size=2, out=2+anchor_interim, latent_size=ses,\n num_layers=5, hidden_size=512, init=\"xavier\",\n )\n self.point_estim=SkipConnMLP(\n in_size=2, out=(spline-2)*2,\n num_layers=5, hidden_size=512,\n latent_size=ses+2*anchor_interim, init=\"xavier\",\n )\n def forward(self, x, t):\n B = t.shape[0]\n N = self.img.n\n t = t[:, None]\n seg = t.floor().int().clamp(min=0)\n emb = self.seg_emb(torch.cat([seg,seg+1], dim=-1))[:, None, ...].expand(B,N,2,-1)\n rigs = self.img.points[None].expand(B,-1,-1) # [N:2]\n anchors, anchor_latent = self.anchors(\n rigs[...,None,:].expand(B,N,2,2), emb,\n ).split([2, self.midsize], dim=-1)\n start, end = [a[None].squeeze(-2) for a in anchors.split([1,1], dim=-2)]\n point_estim_latent = torch.cat([emb[..., 0, :], anchor_latent.flatten(-2)], dim=-1)\n midpts = torch.stack(self.point_estim(rigs, point_estim_latent).split(2, dim=-1), dim=0)\n ctrl_pts = torch.cat([start, midpts-start, end], dim=0)\n dx = de_casteljau(ctrl_pts, t[:,None].frac(), self.spline_n)\n return self.img.from_pts(rigs+dx, x)\n\n# A single Skip Connected MLP\nclass SimpleAnimator(nn.Module):\n def __init__(self, img, *args, **kwargs):\n super().__init__()\n self.img = img\n self.pred = SkipConnMLP(\n in_size=1, out=2,\n num_layers=7, hidden_size=512,\n init=\"xavier\",\n )\n def forward(self, x, t):\n B = t.shape[0]\n dx = self.pred(t[..., None])[:, None, None]\n return self.img(x + dx)\n\ndef de_casteljau(coeffs, t, N: int):\n betas = coeffs\n m1t = 1 - t\n for i in range(1, N): betas = betas[:-1] * m1t + betas[1:] * t\n return betas.squeeze(0)\n\ndef fft_loss(x, ref):\n got = torch.fft.rfft2(x, dim=(-3, -2), norm=\"ortho\")\n exp = torch.fft.rfft2(ref, dim=(-3, -2), norm=\"ortho\")\n return (got - exp).abs().mean()\n\ndef train(model, ref, times):\n t = trange(epochs)\n bs=min(12, times.shape[0])\n grid = torch.stack(torch.meshgrid(\n torch.linspace(-SCALE, SCALE, ts),\n torch.linspace(-SCALE, SCALE, ts),\n indexing=\"ij\",\n ),dim=-1).unsqueeze(0).to(device)\n opt = torch.optim.Adam(model.parameters(), lr=1e-4, weight_decay=1e-6)\n for i in t:\n opt.zero_grad()\n for rs, ats in zip(ref.split(bs, dim=0), times.split(bs, dim=0)):\n got = model(grid, ats)\n exp = rs.to(device)\n #loss = fft_loss(got, exp)\n loss = F.mse_loss(got, exp) #loss = F.l1_loss(got, exp)\n loss.backward()\n opt.step()\n t.set_postfix(L2=f\"{loss.item():.02e}\")\n if i % 250 == 0:\n with torch.no_grad():\n pred_img = tv.utils.make_grid(got.permute(0,3,1,2)).cpu()\n exp_img = tv.utils.make_grid(rs.permute(0,3,1,2))\n result = torch.cat([pred_img, exp_img], dim=1)\n tv.utils.save_image(result, f\"outputs/animate_{i:05}.png\")\n if i % 1000 == 0 and i != 0:\n torch.save(model, \"models/animate_long.pt\")\n torch.nn.utils.clip_grad_norm_(model.parameters(), 1)\n opt.step()\n torch.save(model, \"models/animate_long.pt\")\n\n\ndef test(ref, model, num_secs, n:int=1800):\n model = model.eval()\n times = torch.linspace(0,num_secs,n,device=device)\n grid = torch.stack(torch.meshgrid(\n torch.linspace(-SCALE, SCALE, ts),\n torch.linspace(-SCALE, SCALE, ts),\n indexing=\"ij\",\n ),dim=-1).unsqueeze(0).to(device)\n\n batch_size = 12\n out = []\n for batch in times.split(batch_size, dim=0):\n out.append(model(grid, batch))\n out = torch.cat(out, dim=0).cpu()\n loss = F.mse_loss(ref, out)\n print(\"Final Loss\", f\"{loss.item():.03e}\")\n pred_img = tv.utils.make_grid(out.permute(0,3,1,2), num_secs)\n tv.utils.save_image(pred_img, f\"outputs/final.png\")\n tv.io.write_video(\"outputs/animation.mp4\", out.expand(-1, -1, -1, 3)*255, int(n/num_secs))\n\ndevice=\"cuda:0\"\ndef main():\n with torch.no_grad():\n frames, _, info = tv.io.read_video(\"data/heider/animation.mp4\", pts_unit=\"sec\")\n og_frames = frames\n fps = info[\"video_fps\"]\n og_num_frames = frames.shape[0]\n num_secs = int(frames.shape[0]//fps)\n frames = frames[::int(fps//8)]\n num_frames = frames.shape[0]\n frames = (frames/255).mean(dim=-1, keepdim=True)\n frames = tv.transforms.functional.resize(frames.permute(0,3,1,2), (ts, ts)).permute(0,2,3,1)\n\n times = torch.linspace(0, num_secs, num_frames).to(device)\n\n #model = LongAnimatorPts(DistanceImage(), segments=num_secs).to(device)\n\n # frames[80] has all components\n #model = LongAnimator(LIIF(), segments=num_secs).to(device)\n #model = SimpleAnimator(LIIF()).to(device)\n\n model = torch.load(\"models/animate_long.pt\")\n\n train(model, frames[:24], times[:24])\n with torch.no_grad():\n ref = tv.transforms.functional.resize(og_frames.permute(0,3,1,2), (ts, ts)).permute(0,2,3,1)\n ref = (ref/255).mean(dim=-1, keepdim=True)\n test(ref, model, num_secs, og_num_frames)\n\n\nif __name__ == \"__main__\": main()\n" ]
[ [ "torch.linspace", "torch.load", "torch.cat", "torch.randn", "torch.nn.Embedding", "torch.tensor", "torch.nn.functional.mse_loss", "torch.no_grad", "torch.fft.rfft2", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ytian81/CarND-Behavioral-Cloning-P3
[ "df912ae149035330f4e6be8a6c76a3271d522611" ]
[ "finetune.py" ]
[ "from keras.callbacks import EarlyStopping, ModelCheckpoint\nfrom keras.layers import Activation, Conv2D, Cropping2D, Dense, Dropout, Flatten, Lambda, MaxPool2D\nfrom keras.models import Sequential\nfrom keras.regularizers import l2\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\nfrom keras.models import load_model\nfrom scipy import ndimage\nfrom sklearn.utils import shuffle\nfrom model import assemble_model\n\nimport csv\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# data_folder = './data/'\n# data_folder = './Track2/'\ndata_folder = './turn/'\n\ndef get_data():\n # Read driving log data from csv file\n lines = []\n with open(data_folder+'/driving_log.csv') as f:\n reader = csv.reader(f)\n for line in reader:\n lines.append(line)\n\n # Modify image path and extract outputs\n images = []\n steering_angles = []\n delta = 0.2\n for line in lines:\n # Use center, left and right images\n angle_corrections = [0.0, delta, -delta]\n for idx in range(3):\n image_path = line[idx]\n image_path = data_folder + '/IMG/' + image_path.split('/')[-1]\n image = ndimage.imread(image_path)\n images.append(image)\n steering_angle = float(line[3]) + angle_corrections[idx]\n steering_angles.append(steering_angle)\n\n # Augment data (double the amount of data)\n images.append(np.fliplr(image))\n steering_angles.append(-steering_angle)\n\n images = np.array(images)\n steering_angles = np.array(steering_angles)\n\n # shuffle data before split validation set\n images, steering_angles = shuffle(images, steering_angles)\n return images, steering_angles\n\nX_train, y_train = get_data()\n\nmodel = assemble_model()\nmodel.load_weights('batch_128_model.h5')\n\nmodel.compile(loss='mse', optimizer='adam')\n# Train 15 epoches at most and save the best model, early stop if validation loss stops improving\ncheckpoint = ModelCheckpoint('model.h5', monitor='val_loss', save_best_only=True, mode='min', verbose=0)\nearlystop = EarlyStopping(monitor='val_loss', patience=3, mode='min')\nhistory_object = model.fit(X_train, y_train, validation_split=0.3, shuffle=True, epochs=15,\n callbacks=[checkpoint, earlystop])\n\n# Draw training statistics\nplt.plot(history_object.history['loss'])\nplt.plot(history_object.history['val_loss'])\nplt.title('model mean squared error loss')\nplt.ylabel('mean squared error loss')\nplt.xlabel('epoch')\nplt.legend(['training set', 'validation set'], loc='upper right')\nplt.savefig('training_stats.jpg')\n" ]
[ [ "matplotlib.pyplot.legend", "scipy.ndimage.imread", "matplotlib.pyplot.title", "numpy.fliplr", "sklearn.utils.shuffle", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.14", "0.15", "1.0", "0.19", "0.18", "1.2", "0.12", "0.10", "0.17", "0.16" ], "tensorflow": [] } ]
agramfort/nilearn
[ "f075440e6d97b5bf359bb25e9197dbcbbc26e5f2", "f075440e6d97b5bf359bb25e9197dbcbbc26e5f2", "f075440e6d97b5bf359bb25e9197dbcbbc26e5f2", "f075440e6d97b5bf359bb25e9197dbcbbc26e5f2" ]
[ "nilearn/_utils/niimg_conversions.py", "nilearn/tests/test_testing.py", "examples/connectivity/plot_probabilistic_atlas_extraction.py", "nilearn/plotting/img_plotting.py" ]
[ "\"\"\"\nConversion utilities.\n\"\"\"\n# Author: Gael Varoquaux, Alexandre Abraham, Philippe Gervais\n# License: simplified BSD\nimport warnings\n\nimport numpy as np\nimport itertools\nfrom sklearn.externals.joblib import Memory\n\nfrom .cache_mixin import cache\nfrom .niimg import _safe_get_data, load_niimg, new_img_like\nfrom .compat import _basestring\n\n\ndef _check_fov(img, affine, shape):\n \"\"\" Return True if img's field of view correspond to given\n shape and affine, False elsewhere.\n \"\"\"\n img = check_niimg(img)\n return (img.shape[:3] == shape and\n np.allclose(img.get_affine(), affine))\n\n\ndef _check_same_fov(img1, img2):\n \"\"\" Return True if img1 and img2 have the same field of view\n (shape and affine), False elsewhere.\n \"\"\"\n img1 = check_niimg(img1)\n img2 = check_niimg(img2)\n return (img1.shape[:3] == img2.shape[:3]\n and np.allclose(img1.get_affine(), img2.get_affine()))\n\n\ndef _index_img(img, index):\n \"\"\"Helper function for check_niimg_4d.\"\"\"\n return new_img_like(\n img, img.get_data()[:, :, :, index], img.get_affine(),\n copy_header=True)\n\n\ndef _iter_check_niimg(niimgs, ensure_ndim=None, atleast_4d=False,\n target_fov=None,\n memory=Memory(cachedir=None),\n memory_level=0, verbose=0):\n \"\"\"Iterate over a list of niimgs and do sanity checks and resampling\n\n Parameters\n ----------\n\n niimgs: list of niimg\n Image to iterate over\n\n ensure_ndim: integer, optional\n If specified, an error is raised if the data does not have the\n required dimension.\n\n atleast_4d: boolean, optional\n If True, any 3D image is converted to a 4D single scan.\n\n target_fov: tuple of affine and shape\n If specified, images are resampled to this field of view\n \"\"\"\n ref_fov = None\n resample_to_first_img = False\n ndim_minus_one = ensure_ndim - 1 if ensure_ndim is not None else None\n if target_fov is not None and target_fov != \"first\":\n ref_fov = target_fov\n for i, niimg in enumerate(niimgs):\n try:\n niimg = check_niimg(\n niimg, ensure_ndim=ndim_minus_one, atleast_4d=atleast_4d)\n if i == 0:\n ndim_minus_one = len(niimg.shape)\n if ref_fov is None:\n ref_fov = (niimg.get_affine(), niimg.shape[:3])\n resample_to_first_img = True\n\n if not _check_fov(niimg, ref_fov[0], ref_fov[1]):\n if target_fov is not None:\n from nilearn import image # we avoid a circular import\n if resample_to_first_img:\n warnings.warn('Affine is different across subjects.'\n ' Realignement on first subject '\n 'affine forced')\n niimg = cache(\n image.resample_img, memory, func_memory_level=2,\n memory_level=memory_level)(\n niimg, target_affine=ref_fov[0],\n target_shape=ref_fov[1])\n else:\n raise ValueError(\n \"Field of view of image #%d is different from \"\n \"reference FOV.\\n\"\n \"Reference affine:\\n%r\\nImage affine:\\n%r\\n\"\n \"Reference shape:\\n%r\\nImage shape:\\n%r\\n\"\n % (i, ref_fov[0], niimg.get_affine(), ref_fov[1],\n niimg.shape))\n yield niimg\n except TypeError as exc:\n img_name = ''\n if isinstance(niimg, _basestring):\n img_name = \" (%s) \" % niimg\n\n exc.args = (('Error encountered while loading image #%d%s'\n % (i, img_name),) + exc.args)\n raise\n\n\ndef check_niimg(niimg, ensure_ndim=None, atleast_4d=False,\n return_iterator=False):\n \"\"\"Check that niimg is a proper 3D/4D niimg. Turn filenames into objects.\n\n Parameters\n ----------\n niimg: Niimg-like object\n See http://nilearn.github.io/building_blocks/manipulating_mr_images.html#niimg.\n If niimg is a string, consider it as a path to Nifti image and\n call nibabel.load on it. If it is an object, check if get_data()\n and get_affine() methods are present, raise TypeError otherwise.\n\n ensure_ndim: integer {3, 4}, optional\n Indicate the dimensionality of the expected niimg. An\n error is raised if the niimg is of another dimensionality.\n\n atleast_4d: boolean, optional\n Indicates if a 3d image should be turned into a single-scan 4d niimg.\n\n Returns\n -------\n result: 3D/4D Niimg-like object\n Result can be nibabel.Nifti1Image or the input, as-is. It is guaranteed\n that the returned object has get_data() and get_affine() methods.\n\n Notes\n -----\n In nilearn, special care has been taken to make image manipulation easy.\n This method is a kind of pre-requisite for any data processing method in\n nilearn because it checks if data have a correct format and loads them if\n necessary.\n\n Its application is idempotent.\n \"\"\"\n # in case of an iterable\n if hasattr(niimg, \"__iter__\") and not isinstance(niimg, _basestring):\n if ensure_ndim == 3:\n raise TypeError(\n \"Data must be a 3D Niimg-like object but you provided a %s.\"\n \" See http://nilearn.github.io/building_blocks/\"\n \"manipulating_mr_images.html#niimg.\" % type(niimg))\n if return_iterator:\n return _iter_check_niimg(niimg, ensure_ndim=ensure_ndim)\n return concat_niimgs(niimg, ensure_ndim=ensure_ndim)\n\n # Otherwise, it should be a filename or a SpatialImage, we load it\n niimg = load_niimg(niimg)\n\n if ensure_ndim == 3 and len(niimg.shape) == 4 and niimg.shape[3] == 1:\n # \"squeeze\" the image.\n data = _safe_get_data(niimg)\n affine = niimg.get_affine()\n niimg = new_img_like(niimg, data[:, :, :, 0], affine)\n if atleast_4d and len(niimg.shape) == 3:\n data = niimg.get_data().view()\n data.shape = data.shape + (1, )\n niimg = new_img_like(niimg, data, niimg.get_affine())\n\n if ensure_ndim is not None and len(niimg.shape) != ensure_ndim:\n raise TypeError(\n \"Data must be a %iD Niimg-like object but you provided an \"\n \"image of shape %s. See \"\n \"http://nilearn.github.io/building_blocks/\"\n \"manipulating_mr_images.html#niimg.\" % (ensure_ndim, niimg.shape))\n\n if return_iterator:\n return (_index_img(niimg, i) for i in range(niimg.shape[3]))\n\n return niimg\n\n\ndef check_niimg_3d(niimg):\n \"\"\"Check that niimg is a proper 3D niimg-like object and load it.\n Parameters\n ----------\n niimg: Niimg-like object\n See http://nilearn.github.io/building_blocks/manipulating_mr_images.html#niimg.\n If niimg is a string, consider it as a path to Nifti image and\n call nibabel.load on it. If it is an object, check if get_data()\n and get_affine() methods are present, raise TypeError otherwise.\n\n Returns\n -------\n result: 3D Niimg-like object\n Result can be nibabel.Nifti1Image or the input, as-is. It is guaranteed\n that the returned object has get_data() and get_affine() methods.\n\n Notes\n -----\n In nilearn, special care has been taken to make image manipulation easy.\n This method is a kind of pre-requisite for any data processing method in\n nilearn because it checks if data have a correct format and loads them if\n necessary.\n\n Its application is idempotent.\n \"\"\"\n return check_niimg(niimg, ensure_ndim=3)\n\n\ndef check_niimg_4d(niimg, return_iterator=False):\n \"\"\"Check that niimg is a proper 4D niimg-like object and load it.\n\n Parameters\n ----------\n niimg: 4D Niimg-like object\n See http://nilearn.github.io/building_blocks/manipulating_mr_images.html#niimg.\n If niimgs is an iterable, checks if data is really 4D. Then,\n considering that it is a list of niimg and load them one by one.\n If niimg is a string, consider it as a path to Nifti image and\n call nibabel.load on it. If it is an object, check if get_data\n and get_affine methods are present, raise an Exception otherwise.\n\n return_iterator: boolean\n If True, an iterator of 3D images is returned. This reduces the memory\n usage when `niimgs` contains 3D images.\n If False, a single 4D image is returned. When `niimgs` contains 3D\n images they are concatenated together.\n\n Returns\n -------\n niimg: 4D nibabel.Nifti1Image or iterator of 3D nibabel.Nifti1Image\n\n Notes\n -----\n This function is the equivalent to check_niimg_3d() for Niimg-like objects\n with a session level.\n\n Its application is idempotent.\n \"\"\"\n return check_niimg(niimg, ensure_ndim=4, return_iterator=return_iterator)\n\n\ndef concat_niimgs(niimgs, dtype=np.float32, ensure_ndim=None,\n memory=Memory(cachedir=None), memory_level=0,\n auto_resample=False, verbose=0):\n \"\"\"Concatenate a list of 3D/4D niimgs of varying lengths.\n\n The niimgs list can contain niftis/paths to images of varying dimensions\n (i.e., 3D or 4D) as well as different 3D shapes and affines, as they\n will be matched to the first image in the list if auto_resample=True.\n\n Parameters\n ----------\n niimgs: iterable of Niimg-like objects\n See http://nilearn.github.io/building_blocks/manipulating_mr_images.html#niimg.\n Niimgs to concatenate.\n\n dtype: numpy dtype, optional\n the dtype of the returned image\n\n ensure_ndim: integer, optional\n Indicate the dimensionality of the expected niimg. An\n error is raised if the niimg is of another dimensionality.\n\n auto_resample: boolean\n Converts all images to the space of the first one.\n\n verbose: int\n Controls the amount of verbosity (0 means no messages).\n\n memory : instance of joblib.Memory or string\n Used to cache the resampling process.\n By default, no caching is done. If a string is given, it is the\n path to the caching directory.\n\n memory_level : integer, optional\n Rough estimator of the amount of memory used by caching. Higher value\n means more memory for caching.\n\n Returns\n -------\n concatenated: nibabel.Nifti1Image\n A single image.\n \"\"\"\n\n target_fov = 'first' if auto_resample else None\n\n # First niimg is extracted to get information and for new_img_like\n first_niimg = None\n\n iterator, literator = itertools.tee(iter(niimgs))\n try:\n first_niimg = check_niimg(next(literator))\n except StopIteration:\n raise TypeError('Cannot concatenate empty objects')\n\n if ensure_ndim is None:\n ndim = len(first_niimg.shape)\n else:\n ndim = ensure_ndim - 1\n\n lengths = [first_niimg.shape[-1] if ndim == 4 else 1]\n for niimg in literator:\n # We check the dimensionality of the niimg\n niimg = check_niimg(niimg, ensure_ndim=ndim)\n lengths.append(niimg.shape[-1] if ndim == 4 else 1)\n\n target_shape = first_niimg.shape[:3]\n data = np.ndarray(target_shape + (sum(lengths), ),\n order=\"F\", dtype=dtype)\n cur_4d_index = 0\n for index, (size, niimg) in enumerate(zip(lengths, _iter_check_niimg(\n iterator, atleast_4d=True, target_fov=target_fov,\n memory=memory, memory_level=memory_level))):\n\n if verbose > 0:\n if isinstance(niimg, _basestring):\n nii_str = \"image \" + niimg\n else:\n nii_str = \"image #\" + str(index)\n print(\"Concatenating {0}: {1}\".format(index + 1, nii_str))\n\n data[..., cur_4d_index:cur_4d_index + size] = niimg.get_data()\n cur_4d_index += size\n\n return new_img_like(first_niimg, data, first_niimg.get_affine())\n", "import itertools\n\nimport numpy as np\n\nfrom nose.tools import assert_equal, assert_raises\n\nfrom nilearn._utils.testing import generate_fake_fmri\n\n\ndef test_generate_fake_fmri():\n shapes = [(6, 6, 7), (10, 11, 12)]\n lengths = [16, 20]\n kinds = ['noise', 'step']\n n_blocks = [None, 1, 4]\n block_size = [None, 4]\n block_type = ['classification', 'regression']\n\n rand_gen = np.random.RandomState(3)\n\n for shape, length, kind, n_block, bsize, btype in itertools.product(\n shapes, lengths, kinds, n_blocks, block_size, block_type):\n\n if n_block is None:\n fmri, mask = generate_fake_fmri(\n shape=shape, length=length, kind=kind,\n n_blocks=n_block, block_size=bsize,\n block_type=btype,\n rand_gen=rand_gen)\n else:\n fmri, mask, target = generate_fake_fmri(\n shape=shape, length=length, kind=kind,\n n_blocks=n_block, block_size=bsize,\n block_type=btype,\n rand_gen=rand_gen)\n\n assert_equal(fmri.shape[:-1], shape)\n assert_equal(fmri.shape[-1], length)\n\n if n_block is not None:\n assert_equal(target.size, length)\n\n assert_raises(ValueError, generate_fake_fmri, length=10, n_blocks=10,\n block_size=None, rand_gen=rand_gen)\n", "\"\"\"\nExtracting signals of a probabilistic atlas of rest functional regions\n========================================================================\n\nThis example extracts the signal on regions defined via a probabilistic\natlas, to construct a functional connectome.\n\nWe use the `MSDL atlas\n<https://team.inria.fr/parietal/research/spatial_patterns/spatial-patterns-in-resting-state/>`_\nof functional regions in rest.\n\nThe key to extract signals is to use the\n:class:`nilearn.input_data.NiftiMapsMasker` that can transform nifti\nobjects to time series using a probabilistic atlas.\n\nAs the MSDL atlas comes with (x, y, z) MNI coordinates for the different\nregions, we can visualize the matrix as a graph of interaction in a\nbrain. To avoid having too dense a graph, we represent only the 20% edges\nwith the highest values.\n\n\"\"\"\n\nfrom nilearn import datasets\natlas = datasets.fetch_msdl_atlas()\natlas_filename = atlas['maps']\n\n# Load the labels\nimport numpy as np\ncsv_filename = atlas['labels']\n\n# The recfromcsv function can load a csv file\nlabels = np.recfromcsv(csv_filename)\nnames = labels['name']\n\nfrom nilearn.input_data import NiftiMapsMasker\nmasker = NiftiMapsMasker(maps_img=atlas_filename, standardize=True,\n memory='nilearn_cache', verbose=5)\n\ndata = datasets.fetch_adhd(n_subjects=1)\n\n# print basic dataset information\nprint('First subject resting-state nifti image (4D) is located at: %s' %\n data.func[0])\n\ntime_series = masker.fit_transform(data.func[0],\n confounds=data.confounds)\n\ncorrelation_matrix = np.corrcoef(time_series.T)\n\n# Display the correlation matrix\nfrom matplotlib import pyplot as plt\nplt.figure(figsize=(10, 10))\nplt.imshow(correlation_matrix, interpolation=\"nearest\")\n# And display the labels\nx_ticks = plt.xticks(range(len(names)), names, rotation=90)\ny_ticks = plt.yticks(range(len(names)), names)\n\n# And now display the corresponding graph\nfrom nilearn import plotting\ncoords = np.vstack((labels['x'], labels['y'], labels['z'])).T\n\n# We threshold to keep only the 20% of edges with the highest value\n# because the graph is very dense\nplotting.plot_connectome(correlation_matrix, coords,\n edge_threshold=\"80%\")\n\nplt.show()\n\n\n", "\"\"\"\nFunctions to do automatic visualization of Niimg-like objects\nSee http://nilearn.github.io/building_blocks/manipulating_mr_images.html#niimg.\n\nOnly matplotlib is required.\n\"\"\"\n\n# Author: Gael Varoquaux, Chris Filo Gorgolewski\n# License: BSD\n\n# Standard library imports\nimport functools\nimport numbers\nimport warnings\n\n# Standard scientific libraries imports (more specific imports are\n# delayed, so that the part module can be used without them).\nimport numpy as np\nfrom scipy import ndimage\nfrom nibabel.spatialimages import SpatialImage\n\nfrom .._utils.numpy_conversions import as_ndarray\n\nimport matplotlib.pyplot as plt\n\nfrom .. import _utils\nfrom .._utils import new_img_like\nfrom .._utils.extmath import fast_abs_percentile\nfrom .._utils.fixes.matplotlib_backports import (cbar_outline_get_xy,\n cbar_outline_set_xy)\nfrom ..datasets import load_mni152_template\nfrom .displays import get_slicer, get_projector\nfrom . import cm\n\n################################################################################\n# Core, usage-agnostic functions\n\n\ndef _plot_img_with_bg(img, bg_img=None, cut_coords=None,\n output_file=None, display_mode='ortho',\n colorbar=False, figure=None, axes=None, title=None,\n threshold=None, annotate=True,\n draw_cross=True, black_bg=False,\n bg_vmin=None, bg_vmax=None, interpolation=\"nearest\",\n display_factory=get_slicer,\n cbar_vmin=None, cbar_vmax=None,\n **kwargs):\n \"\"\" Internal function, please refer to the docstring of plot_img for parameters\n not listed below.\n\n Parameters\n ----------\n bg_vmin: float\n vmin for bg_img\n bg_vmax: float\n vmax for bg_img\n interpolation: string\n passed to the add_overlay calls\n display_factory: function\n takes a display_mode argument and return a display class\n \"\"\"\n show_nan_msg = False\n if ('vmax' in kwargs and kwargs['vmax'] is not None and\n np.isnan(kwargs['vmax'])):\n kwargs.pop('vmax')\n show_nan_msg = True\n if ('vmin' in kwargs and kwargs['vmin'] is not None and\n np.isnan(kwargs['vmin'])):\n kwargs.pop('vmin')\n show_nan_msg = True\n if show_nan_msg:\n nan_msg = ('NaN is not permitted for the vmax and vmin arguments.\\n'\n 'Tip: Use np.nan_max() instead of np.max().')\n warnings.warn(nan_msg)\n\n if img is not False and img is not None:\n img = _utils.check_niimg_3d(img)\n data = img.get_data()\n affine = img.get_affine()\n\n if np.isnan(np.sum(data)):\n data = np.nan_to_num(data)\n\n # Deal with automatic settings of plot parameters\n if threshold == 'auto':\n # Threshold epsilon below a percentile value, to be sure that some\n # voxels pass the threshold\n threshold = fast_abs_percentile(data) - 1e-5\n\n img = new_img_like(img, as_ndarray(data), affine)\n\n display = display_factory(display_mode)(\n img,\n threshold=threshold,\n cut_coords=cut_coords,\n figure=figure, axes=axes,\n black_bg=black_bg,\n colorbar=colorbar)\n\n if bg_img is not None:\n bg_img = _utils.check_niimg_3d(bg_img)\n display.add_overlay(bg_img,\n vmin=bg_vmin, vmax=bg_vmax,\n cmap=plt.cm.gray, interpolation=interpolation)\n\n if img is not None and img is not False:\n display.add_overlay(new_img_like(img, data, affine),\n threshold=threshold, interpolation=interpolation,\n colorbar=colorbar, **kwargs)\n\n if annotate:\n display.annotate()\n if draw_cross:\n display.draw_cross()\n if title is not None and not title == '':\n display.title(title)\n if (cbar_vmax is not None) or (cbar_vmin is not None):\n if hasattr(display, '_cbar'):\n cbar = display._cbar\n cbar_tick_locs = cbar.locator.locs\n if cbar_vmax is None:\n cbar_vmax = cbar_tick_locs.max()\n if cbar_vmin is None:\n cbar_vmin = cbar_tick_locs.min()\n new_tick_locs = np.linspace(cbar_vmin, cbar_vmax,\n len(cbar_tick_locs))\n cbar.ax.set_ylim(cbar.norm(cbar_vmin), cbar.norm(cbar_vmax))\n outline = cbar_outline_get_xy(cbar.outline)\n outline[:2, 1] += cbar.norm(cbar_vmin)\n outline[2:6, 1] -= (1. - cbar.norm(cbar_vmax))\n outline[6:, 1] += cbar.norm(cbar_vmin)\n cbar_outline_set_xy(cbar.outline, outline)\n cbar.set_ticks(new_tick_locs, update_ticks=True)\n\n if output_file is not None:\n display.savefig(output_file)\n display.close()\n display = None\n return display\n\n\ndef plot_img(img, cut_coords=None, output_file=None, display_mode='ortho',\n figure=None, axes=None, title=None, threshold=None,\n annotate=True, draw_cross=True, black_bg=False, colorbar=False, **kwargs):\n \"\"\" Plot cuts of a given image (by default Frontal, Axial, and Lateral)\n\n Parameters\n ----------\n img: Niimg-like object\n See http://nilearn.github.io/building_blocks/manipulating_mr_images.html#niimg.\n cut_coords: None, a tuple of floats, or an integer\n The MNI coordinates of the point where the cut is performed\n If display_mode is 'ortho', this should be a 3-tuple: (x, y, z)\n For display_mode == 'x', 'y', or 'z', then these are the\n coordinates of each cut in the corresponding direction.\n If None is given, the cuts is calculated automaticaly.\n If display_mode is 'x', 'y' or 'z', cut_coords can be an integer,\n in which case it specifies the number of cuts to perform\n output_file: string, or None, optional\n The name of an image file to export the plot to. Valid extensions\n are .png, .pdf, .svg. If output_file is not None, the plot\n is saved to a file, and the display is closed.\n display_mode: {'ortho', 'x', 'y', 'z'}\n Choose the direction of the cuts: 'x' - saggital, 'y' - coronal,\n 'z' - axial, 'ortho' - three cuts are performed in orthogonal\n directions.\n figure : integer or matplotlib figure, optional\n Matplotlib figure used or its number. If None is given, a\n new figure is created.\n axes : matplotlib axes or 4 tuple of float: (xmin, ymin, width, height), optional\n The axes, or the coordinates, in matplotlib figure space,\n of the axes used to display the plot. If None, the complete\n figure is used.\n title : string, optional\n The title displayed on the figure.\n threshold : a number, None, or 'auto'\n If None is given, the image is not thresholded.\n If a number is given, it is used to threshold the image:\n values below the threshold (in absolute value) are plotted\n as transparent. If auto is given, the threshold is determined\n magically by analysis of the image.\n annotate: boolean, optional\n If annotate is True, positions and left/right annotation\n are added to the plot.\n draw_cross: boolean, optional\n If draw_cross is True, a cross is drawn on the plot to\n indicate the cut plosition.\n black_bg: boolean, optional\n If True, the background of the image is set to be black. If\n you wish to save figures with a black background, you\n will need to pass \"facecolor='k', edgecolor='k'\" to pylab's\n savefig.\n colorbar: boolean, optional\n If True, display a colorbar on the right of the plots.\n kwargs: extra keyword arguments, optional\n Extra keyword arguments passed to pylab.imshow\n \"\"\"\n display = _plot_img_with_bg(img, cut_coords=cut_coords,\n output_file=output_file, display_mode=display_mode,\n figure=figure, axes=axes, title=title,\n threshold=threshold, annotate=annotate,\n draw_cross=draw_cross, resampling_interpolation='continuous',\n black_bg=black_bg, colorbar=colorbar, **kwargs)\n\n return display\n\n\n################################################################################\n# Anatomy image for background\n\n# A constant class to serve as a sentinel for the default MNI template\nclass _MNI152Template(SpatialImage):\n \"\"\" This class is a constant pointing to the MNI152 Template\n provided by nilearn\n \"\"\"\n\n data = None\n affine = None\n vmax = None\n _shape = None\n\n def __init__(self, data=None, affine=None, header=None):\n # Comply with spatial image requirements while allowing empty init\n pass\n\n def load(self):\n if self.data is None:\n anat_img = load_mni152_template()\n data = anat_img.get_data()\n data = data.astype(np.float)\n anat_mask = ndimage.morphology.binary_fill_holes(data > 0)\n data = np.ma.masked_array(data, np.logical_not(anat_mask))\n self.affine = anat_img.get_affine()\n self.data = data\n self.vmax = data.max()\n self._shape = anat_img.shape\n\n def get_data(self):\n self.load()\n return self.data\n\n def get_affine(self):\n self.load()\n return self.affine\n \n @property\n def shape(self):\n self.load()\n return self._shape\n \n def get_shape(self):\n self.load()\n return self._shape\n\n def __str__(self):\n return \"<MNI152Template>\"\n\n\n# The constant that we use as a default in functions\nMNI152TEMPLATE = _MNI152Template()\n\n\ndef _load_anat(anat_img=MNI152TEMPLATE, dim=False, black_bg='auto'):\n \"\"\" Internal function used to load anatomy, for optional diming\n \"\"\"\n vmin = None\n vmax = None\n if anat_img is not False and anat_img is not None:\n if anat_img is MNI152TEMPLATE:\n anat_img.load()\n # We special-case the 'canonical anat', as we don't need\n # to do a few transforms to it.\n vmin = 0\n vmax = anat_img.vmax\n if black_bg == 'auto':\n black_bg = False\n else:\n anat_img = _utils.check_niimg_3d(anat_img)\n if dim or black_bg == 'auto':\n # We need to inspect the values of the image\n data = anat_img.get_data()\n vmin = data.min()\n vmax = data.max()\n if black_bg == 'auto':\n # Guess if the background is rather black or light based on\n # the values of voxels near the border\n border_size = 2\n border_data = np.concatenate([\n data[:border_size, :, :].ravel(),\n data[-border_size:, :, :].ravel(),\n data[:, :border_size, :].ravel(),\n data[:, -border_size:, :].ravel(),\n data[:, :, :border_size].ravel(),\n data[:, :, -border_size:].ravel(),\n ])\n background = np.median(border_data)\n if background > .5 * (vmin + vmax):\n black_bg = False\n else:\n black_bg = True\n if dim:\n vmean = .5 * (vmin + vmax)\n ptp = .5 * (vmax - vmin)\n if black_bg:\n if not isinstance(dim, numbers.Number):\n dim = .8\n vmax = vmean + (1 + dim) * ptp\n else:\n if not isinstance(dim, numbers.Number):\n dim = .6\n vmin = vmean - (1 + dim) * ptp\n if black_bg == 'auto':\n # No anatomy given: no need to turn black_bg on\n black_bg = False\n return anat_img, black_bg, vmin, vmax\n\n\n################################################################################\n# Usage-specific functions\n\n\ndef plot_anat(anat_img=MNI152TEMPLATE, cut_coords=None,\n output_file=None, display_mode='ortho', figure=None,\n axes=None, title=None, annotate=True, draw_cross=True,\n black_bg='auto', dim=False, cmap=plt.cm.gray, **kwargs):\n \"\"\" Plot cuts of an anatomical image (by default 3 cuts:\n Frontal, Axial, and Lateral)\n\n Parameters\n ----------\n anat_img : Niimg-like object\n See http://nilearn.github.io/building_blocks/manipulating_mr_images.html#niimg.\n The anatomical image to be used as a background. If None is\n given, nilearn tries to find a T1 template.\n cut_coords: None, a tuple of floats, or an integer\n The MNI coordinates of the point where the cut is performed\n If display_mode is 'ortho', this should be a 3-tuple: (x, y, z)\n For display_mode == 'x', 'y', or 'z', then these are the\n coordinates of each cut in the corresponding direction.\n If None is given, the cuts is calculated automaticaly.\n If display_mode is 'x', 'y' or 'z', cut_coords can be an integer,\n in which case it specifies the number of cuts to perform\n output_file: string, or None, optional\n The name of an image file to export the plot to. Valid extensions\n are .png, .pdf, .svg. If output_file is not None, the plot\n is saved to a file, and the display is closed.\n display_mode: {'ortho', 'x', 'y', 'z'}\n Choose the direction of the cuts: 'x' - saggital, 'y' - coronal,\n 'z' - axial, 'ortho' - three cuts are performed in orthogonal\n directions.\n figure : integer or matplotlib figure, optional\n Matplotlib figure used or its number. If None is given, a\n new figure is created.\n axes : matplotlib axes or 4 tuple of float: (xmin, ymin, width, height), optional\n The axes, or the coordinates, in matplotlib figure space,\n of the axes used to display the plot. If None, the complete\n figure is used.\n title : string, optional\n The title displayed on the figure.\n annotate: boolean, optional\n If annotate is True, positions and left/right annotation\n are added to the plot.\n draw_cross: boolean, optional\n If draw_cross is True, a cross is drawn on the plot to\n indicate the cut plosition.\n black_bg: boolean, optional\n If True, the background of the image is set to be black. If\n you wish to save figures with a black background, you\n will need to pass \"facecolor='k', edgecolor='k'\" to pylab's\n savefig.\n cmap: matplotlib colormap, optional\n The colormap for the anat\n\n Notes\n -----\n Arrays should be passed in numpy convention: (x, y, z)\n ordered.\n \"\"\"\n anat_img, black_bg, vmin, vmax = _load_anat(anat_img,\n dim=dim, black_bg=black_bg)\n # vmin and/or vmax could have been provided in the kwargs\n vmin = kwargs.pop('vmin', vmin)\n vmax = kwargs.pop('vmax', vmax)\n display = plot_img(anat_img, cut_coords=cut_coords,\n output_file=output_file, display_mode=display_mode,\n figure=figure, axes=axes, title=title,\n threshold=None, annotate=annotate,\n draw_cross=draw_cross, black_bg=black_bg,\n vmin=vmin, vmax=vmax, cmap=cmap, **kwargs)\n return display\n\n\ndef plot_epi(epi_img=None, cut_coords=None, output_file=None,\n display_mode='ortho', figure=None, axes=None, title=None,\n annotate=True, draw_cross=True, black_bg=True,\n cmap=plt.cm.spectral, **kwargs):\n \"\"\" Plot cuts of an EPI image (by default 3 cuts:\n Frontal, Axial, and Lateral)\n\n Parameters\n ----------\n epi_img : a nifti-image like object or a filename\n The EPI (T2*) image\n cut_coords: None, a tuple of floats, or an integer\n The MNI coordinates of the point where the cut is performed\n If display_mode is 'ortho', this should be a 3-tuple: (x, y, z)\n For display_mode == 'x', 'y', or 'z', then these are the\n coordinates of each cut in the corresponding direction.\n If None is given, the cuts is calculated automaticaly.\n If display_mode is 'x', 'y' or 'z', cut_coords can be an integer,\n in which case it specifies the number of cuts to perform\n output_file: string, or None, optional\n The name of an image file to export the plot to. Valid extensions\n are .png, .pdf, .svg. If output_file is not None, the plot\n is saved to a file, and the display is closed.\n display_mode: {'ortho', 'x', 'y', 'z'}\n Choose the direction of the cuts: 'x' - saggital, 'y' - coronal,\n 'z' - axial, 'ortho' - three cuts are performed in orthogonal\n directions.\n figure : integer or matplotlib figure, optional\n Matplotlib figure used or its number. If None is given, a\n new figure is created.\n axes : matplotlib axes or 4 tuple of float: (xmin, ymin, width, height), optional\n The axes, or the coordinates, in matplotlib figure space,\n of the axes used to display the plot. If None, the complete\n figure is used.\n title : string, optional\n The title displayed on the figure.\n annotate: boolean, optional\n If annotate is True, positions and left/right annotation\n are added to the plot.\n draw_cross: boolean, optional\n If draw_cross is True, a cross is drawn on the plot to\n indicate the cut plosition.\n black_bg: boolean, optional\n If True, the background of the image is set to be black. If\n you wish to save figures with a black background, you\n will need to pass \"facecolor='k', edgecolor='k'\" to pylab's\n savefig.\n cmap: matplotlib colormap, optional\n The colormap for specified image\n threshold : a number, None, or 'auto'\n If None is given, the image is not thresholded.\n If a number is given, it is used to threshold the image:\n values below the threshold (in absolute value) are plotted\n as transparent. If auto is given, the threshold is determined\n magically by analysis of the image.\n\n Notes\n -----\n Arrays should be passed in numpy convention: (x, y, z)\n ordered.\n \"\"\"\n display = plot_img(epi_img, cut_coords=cut_coords,\n output_file=output_file, display_mode=display_mode,\n figure=figure, axes=axes, title=title,\n threshold=None, annotate=annotate,\n draw_cross=draw_cross, black_bg=black_bg,\n cmap=cmap, **kwargs)\n return display\n\n\ndef plot_roi(roi_img, bg_img=MNI152TEMPLATE, cut_coords=None,\n output_file=None, display_mode='ortho', figure=None, axes=None,\n title=None, annotate=True, draw_cross=True, black_bg='auto',\n alpha=0.7, cmap=plt.cm.gist_ncar, dim=True, **kwargs):\n \"\"\" Plot cuts of an ROI/mask image (by default 3 cuts: Frontal, Axial, and\n Lateral)\n\n Parameters\n ----------\n roi_img : Niimg-like object\n See http://nilearn.github.io/building_blocks/manipulating_mr_images.html#niimg.\n The ROI/mask image, it could be binary mask or an atlas or ROIs\n with integer values.\n bg_img : Niimg-like object\n See http://nilearn.github.io/building_blocks/manipulating_mr_images.html#niimg.\n The background image that the ROI/mask will be plotted on top of. If\n not specified MNI152 template will be used.\n cut_coords: None, or a tuple of floats\n The MNI coordinates of the point where the cut is performed, in\n MNI coordinates and order.\n If display_mode is 'ortho', this should be a 3-tuple: (x, y, z)\n For display_mode == 'x', 'y', or 'z', then these are the\n coordinates of each cut in the corresponding direction.\n If None is given, the cuts is calculated automaticaly.\n output_file: string, or None, optional\n The name of an image file to export the plot to. Valid extensions\n are .png, .pdf, .svg. If output_file is not None, the plot\n is saved to a file, and the display is closed.\n display_mode: {'ortho', 'x', 'y', 'z'}\n Choose the direction of the cuts: 'x' - saggital, 'y' - coronal,\n 'z' - axial, 'ortho' - three cuts are performed in orthogonal\n directions.\n figure : integer or matplotlib figure, optional\n Matplotlib figure used or its number. If None is given, a\n new figure is created.\n axes : matplotlib axes or 4 tuple of float: (xmin, ymin, width, height), optional\n The axes, or the coordinates, in matplotlib figure space,\n of the axes used to display the plot. If None, the complete\n figure is used.\n title : string, optional\n The title displayed on the figure.\n annotate: boolean, optional\n If annotate is True, positions and left/right annotation\n are added to the plot.\n draw_cross: boolean, optional\n If draw_cross is True, a cross is drawn on the plot to\n indicate the cut plosition.\n black_bg: boolean, optional\n If True, the background of the image is set to be black. If\n you wish to save figures with a black background, you\n will need to pass \"facecolor='k', edgecolor='k'\" to pylab's\n savefig.\n threshold : a number, None, or 'auto'\n If None is given, the image is not thresholded.\n If a number is given, it is used to threshold the image:\n values below the threshold (in absolute value) are plotted\n as transparent. If auto is given, the threshold is determined\n magically by analysis of the image.\n\n \"\"\"\n bg_img, black_bg, bg_vmin, bg_vmax = _load_anat(bg_img, dim=dim,\n black_bg=black_bg)\n\n display = _plot_img_with_bg(img=roi_img, bg_img=bg_img,\n cut_coords=cut_coords,\n output_file=output_file,\n display_mode=display_mode,\n figure=figure, axes=axes, title=title,\n annotate=annotate, draw_cross=draw_cross,\n black_bg=black_bg, threshold=0.5,\n bg_vmin=bg_vmin, bg_vmax=bg_vmax,\n resampling_interpolation='nearest',\n alpha=alpha, cmap=cmap, **kwargs)\n return display\n\n\ndef plot_stat_map(stat_map_img, bg_img=MNI152TEMPLATE, cut_coords=None,\n output_file=None, display_mode='ortho', colorbar=True,\n figure=None, axes=None, title=None, threshold=1e-6,\n annotate=True, draw_cross=True, black_bg='auto',\n cmap=cm.cold_hot, symmetric_cbar=\"auto\",\n dim=True, **kwargs):\n \"\"\" Plot cuts of an ROI/mask image (by default 3 cuts: Frontal, Axial, and\n Lateral)\n\n Parameters\n ----------\n stat_map_img : Niimg-like object\n See http://nilearn.github.io/building_blocks/manipulating_mr_images.html#niimg.\n The statistical map image\n bg_img : Niimg-like object\n See http://nilearn.github.io/building_blocks/manipulating_mr_images.html#niimg.\n The background image that the ROI/mask will be plotted on top of. If\n not specified MNI152 template will be used.\n cut_coords : None, a tuple of floats, or an integer\n The MNI coordinates of the point where the cut is performed\n If display_mode is 'ortho', this should be a 3-tuple: (x, y, z)\n For display_mode == 'x', 'y', or 'z', then these are the\n coordinates of each cut in the corresponding direction.\n If None is given, the cuts is calculated automaticaly.\n If display_mode is 'x', 'y' or 'z', cut_coords can be an integer,\n in which case it specifies the number of cuts to perform\n output_file : string, or None, optional\n The name of an image file to export the plot to. Valid extensions\n are .png, .pdf, .svg. If output_file is not None, the plot\n is saved to a file, and the display is closed.\n display_mode : {'ortho', 'x', 'y', 'z'}\n Choose the direction of the cuts: 'x' - saggital, 'y' - coronal,\n 'z' - axial, 'ortho' - three cuts are performed in orthogonal\n directions.\n colorbar : boolean, optional\n If True, display a colorbar on the right of the plots.\n figure : integer or matplotlib figure, optional\n Matplotlib figure used or its number. If None is given, a\n new figure is created.\n axes : matplotlib axes or 4 tuple of float: (xmin, ymin, width, height), optional\n The axes, or the coordinates, in matplotlib figure space,\n of the axes used to display the plot. If None, the complete\n figure is used.\n title : string, optional\n The title displayed on the figure.\n threshold : a number, None, or 'auto'\n If None is given, the image is not thresholded.\n If a number is given, it is used to threshold the image:\n values below the threshold (in absolute value) are plotted\n as transparent. If auto is given, the threshold is determined\n magically by analysis of the image.\n annotate: boolean, optional\n If annotate is True, positions and left/right annotation\n are added to the plot.\n draw_cross: boolean, optional\n If draw_cross is True, a cross is drawn on the plot to\n indicate the cut plosition.\n black_bg: boolean, optional\n If True, the background of the image is set to be black. If\n you wish to save figures with a black background, you\n will need to pass \"facecolor='k', edgecolor='k'\" to pylab's\n savefig.\n cmap: matplotlib colormap, optional\n The colormap for specified image. The ccolormap *must* be\n symmetrical.\n symmetric_cbar: boolean or 'auto', optional, default 'auto'\n Specifies whether the colorbar should range from -vmax to vmax\n or from 0 to vmax. Setting to 'auto' will select the latter if\n the whole image is non-negative.\n\n Notes\n -----\n Arrays should be passed in numpy convention: (x, y, z)\n ordered.\n \"\"\"\n # dim the background\n bg_img, black_bg, bg_vmin, bg_vmax = _load_anat(bg_img, dim=dim,\n black_bg=black_bg)\n\n # make sure that the color range is symmetrical\n if ('vmax' not in kwargs) or (symmetric_cbar in ['auto', False]):\n stat_map_img = _utils.check_niimg_3d(stat_map_img)\n stat_map_data = stat_map_img.get_data()\n # Avoid dealing with masked_array:\n if hasattr(stat_map_data, '_mask'):\n stat_map_data = np.asarray(stat_map_data[\n np.logical_not(stat_map_data._mask)])\n stat_map_max = np.nanmax(stat_map_data)\n stat_map_min = np.nanmin(stat_map_data)\n\n if symmetric_cbar == 'auto':\n symmetric_cbar = (stat_map_min < 0) and (stat_map_max > 0)\n\n if 'vmax' in kwargs:\n vmax = kwargs.pop('vmax')\n else:\n vmax = max(-stat_map_min, stat_map_max)\n\n if 'vmin' in kwargs:\n raise ValueError('plot_stat_map does not accept a \"vmin\" '\n 'argument, as it uses a symmetrical range '\n 'defined via the vmax argument. To threshold '\n 'the map, use the \"threshold\" argument')\n vmin = -vmax\n\n if not symmetric_cbar:\n negative_range = (stat_map_max <= 0)\n positive_range = (stat_map_min >= 0)\n if positive_range:\n cbar_vmin = 0\n cbar_vmax = None\n elif negative_range:\n cbar_vmax = 0\n cbar_vmin = None\n else:\n cbar_vmin = stat_map_min\n cbar_vmax = stat_map_max\n else:\n cbar_vmin, cbar_vmax = None, None\n\n display = _plot_img_with_bg(img=stat_map_img, bg_img=bg_img,\n cut_coords=cut_coords,\n output_file=output_file,\n display_mode=display_mode,\n figure=figure, axes=axes, title=title,\n annotate=annotate, draw_cross=draw_cross,\n black_bg=black_bg, threshold=threshold,\n bg_vmin=bg_vmin, bg_vmax=bg_vmax, cmap=cmap,\n vmin=vmin, vmax=vmax, colorbar=colorbar,\n cbar_vmin=cbar_vmin, cbar_vmax=cbar_vmax,\n resampling_interpolation='continuous',\n **kwargs)\n\n return display\n\n\ndef plot_glass_brain(stat_map_img,\n output_file=None, display_mode='ortho', colorbar=False,\n figure=None, axes=None, title=None, threshold='auto',\n annotate=True,\n black_bg=False,\n cmap=None,\n alpha=0.7,\n **kwargs):\n \"\"\"Plot 2d projections of an ROI/mask image (by default 3 projections:\n Frontal, Axial, and Lateral). The brain glass schematics\n are added on top of the image.\n\n Parameters\n ----------\n stat_map_img : Niimg-like object\n See http://nilearn.github.io/building_blocks/manipulating_mr_images.html#niimg.\n The statistical map image. It needs to be in MNI space\n in order to align with the brain schematics.\n output_file : string, or None, optional\n The name of an image file to export the plot to. Valid extensions\n are .png, .pdf, .svg. If output_file is not None, the plot\n is saved to a file, and the display is closed.\n display_mode : {'ortho', 'x', 'y', 'z'}\n Choose the direction of the cuts: 'x' - saggital, 'y' - coronal,\n 'z' - axial, 'ortho' - three cuts are performed in orthogonal\n directions.\n colorbar : boolean, optional\n If True, display a colorbar on the right of the plots.\n figure : integer or matplotlib figure, optional\n Matplotlib figure used or its number. If None is given, a\n new figure is created.\n axes : matplotlib axes or 4 tuple of float: (xmin, ymin, width, height), optional\n The axes, or the coordinates, in matplotlib figure space,\n of the axes used to display the plot. If None, the complete\n figure is used.\n title : string, optional\n The title displayed on the figure.\n threshold : a number, None, or 'auto'\n If None is given, the image is not thresholded.\n If a number is given, it is used to threshold the image:\n values below the threshold (in absolute value) are plotted\n as transparent. If auto is given, the threshold is determined\n magically by analysis of the image.\n annotate: boolean, optional\n If annotate is True, positions and left/right annotation\n are added to the plot.\n black_bg: boolean, optional\n If True, the background of the image is set to be black. If\n you wish to save figures with a black background, you\n will need to pass \"facecolor='k', edgecolor='k'\" to pylab's\n savefig.\n cmap: matplotlib colormap, optional\n The colormap for specified image\n alpha: float between 0 and 1\n Alpha transparency for the brain schematics\n\n Notes\n -----\n Arrays should be passed in numpy convention: (x, y, z)\n ordered.\n\n \"\"\"\n if cmap is None:\n cmap = plt.cm.hot if black_bg else plt.cm.hot_r\n\n def display_factory(display_mode):\n return functools.partial(get_projector(display_mode), alpha=alpha)\n\n display = _plot_img_with_bg(img=stat_map_img,\n output_file=output_file,\n display_mode=display_mode,\n figure=figure, axes=axes, title=title,\n annotate=annotate,\n black_bg=black_bg, threshold=threshold,\n cmap=cmap, colorbar=colorbar,\n display_factory=display_factory,\n resampling_interpolation='continuous',\n **kwargs)\n\n return display\n\n\ndef plot_connectome(adjacency_matrix, node_coords,\n node_color='auto', node_size=50,\n edge_cmap=cm.bwr,\n edge_vmin=None, edge_vmax=None,\n edge_threshold=None,\n output_file=None, display_mode='ortho',\n figure=None, axes=None, title=None,\n annotate=True, black_bg=False,\n alpha=0.7,\n edge_kwargs=None, node_kwargs=None):\n \"\"\"Plot connectome on top of the brain glass schematics.\n\n Parameters\n ----------\n adjacency_matrix: numpy array of shape (n, n)\n represents the link strengths of the graph. Assumed to be\n a symmetric matrix.\n node_coords: numpy array_like of shape (n, 3)\n 3d coordinates of the graph nodes in world space.\n node_color: color or sequence of colors\n color(s) of the nodes.\n node_size: scalar or array_like\n size(s) of the nodes in points^2.\n edge_cmap: colormap\n colormap used for representing the strength of the edges.\n edge_vmin: float, optional, default: None\n edge_vmax: float, optional, default: None\n If not None, either or both of these values will be used to\n as the minimum and maximum values to color edges. If None are\n supplied the maximum absolute value within the given threshold\n will be used as minimum (multiplied by -1) and maximum\n coloring levels.\n edge_threshold: str or number\n If it is a number only the edges with a value greater than\n edge_threshold will be shown.\n If it is a string it must finish with a percent sign,\n e.g. \"25.3%\", and only the edges with a abs(value) above\n the given percentile will be shown.\n output_file : string, or None, optional\n The name of an image file to export the plot to. Valid extensions\n are .png, .pdf, .svg. If output_file is not None, the plot\n is saved to a file, and the display is closed.\n display_mode : {'ortho', 'x', 'y', 'z'}\n Choose the direction of the cuts: 'x' - saggital, 'y' - coronal,\n 'z' - axial, 'ortho' - three cuts are performed in orthogonal\n directions.\n figure : integer or matplotlib figure, optional\n Matplotlib figure used or its number. If None is given, a\n new figure is created.\n axes : matplotlib axes or 4 tuple of float: (xmin, ymin, width, height), optional\n The axes, or the coordinates, in matplotlib figure space,\n of the axes used to display the plot. If None, the complete\n figure is used.\n title : string, optional\n The title displayed on the figure.\n annotate: boolean, optional\n If annotate is True, positions and left/right annotation\n are added to the plot.\n black_bg: boolean, optional\n If True, the background of the image is set to be black. If\n you wish to save figures with a black background, you\n will need to pass \"facecolor='k', edgecolor='k'\" to pylab's\n savefig.\n alpha: float between 0 and 1\n Alpha transparency for the brain schematics.\n edge_kwargs: dict\n will be passed as kwargs for each edge matlotlib Line2D.\n node_kwargs: dict\n will be passed as kwargs to the plt.scatter call that plots all\n the nodes in one go\n\n \"\"\"\n display = plot_glass_brain(None,\n display_mode=display_mode,\n figure=figure, axes=axes, title=title,\n annotate=annotate,\n black_bg=black_bg)\n\n display.add_graph(adjacency_matrix, node_coords,\n node_color=node_color, node_size=node_size,\n edge_cmap=edge_cmap,\n edge_vmin=edge_vmin, edge_vmax=edge_vmax,\n edge_threshold=edge_threshold,\n edge_kwargs=edge_kwargs, node_kwargs=node_kwargs)\n\n if output_file is not None:\n display.savefig(output_file)\n display.close()\n display = None\n\n return display\n" ]
[ [ "sklearn.externals.joblib.Memory" ], [ "numpy.random.RandomState" ], [ "matplotlib.pyplot.imshow", "numpy.recfromcsv", "numpy.corrcoef", "matplotlib.pyplot.show", "numpy.vstack", "matplotlib.pyplot.figure" ], [ "numpy.nanmax", "numpy.logical_not", "scipy.ndimage.morphology.binary_fill_holes", "numpy.isnan", "numpy.median", "numpy.nanmin", "numpy.nan_to_num", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "0.15", "1.4", "0.16", "1.0", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "0.10", "0.17", "1.3" ], "tensorflow": [] } ]
Mrrrat/asr_project_template
[ "50d264684d90bc45c59f3e9be5766fabaf090d25" ]
[ "hw_asr/trainer/trainer.py" ]
[ "import random\nfrom random import shuffle\n\nimport PIL\nimport torch\nimport torch.nn.functional as F\nfrom torch.nn.utils import clip_grad_norm_\nfrom torchvision.transforms import ToTensor\nfrom tqdm import tqdm\n\nfrom hw_asr.base import BaseTrainer\nfrom hw_asr.logger.utils import plot_spectrogram_to_buf\nfrom hw_asr.metric.utils import calc_cer, calc_wer\nfrom hw_asr.utils import inf_loop, MetricTracker\n\n\nclass Trainer(BaseTrainer):\n \"\"\"\n Trainer class\n \"\"\"\n\n def __init__(\n self,\n model,\n criterion,\n metrics,\n optimizer,\n config,\n device,\n data_loader,\n text_encoder,\n valid_data_loader=None,\n lr_scheduler=None,\n len_epoch=None,\n skip_oom=True,\n ):\n super().__init__(model, criterion, metrics, optimizer, config, device)\n self.skip_oom = skip_oom\n self.text_encoder = text_encoder\n self.config = config\n self.data_loader = data_loader\n if len_epoch is None:\n # epoch-based training\n self.len_epoch = len(self.data_loader)\n else:\n # iteration-based training\n ###OBO\n #self.data_loader = inf_loop(data_loader)\n ###OBO\n self.len_epoch = len_epoch\n self.valid_data_loader = valid_data_loader\n ###OBO\n self.do_validation = False #self.valid_data_loader is not None\n ###OBO\n self.lr_scheduler = lr_scheduler\n self.log_step = 10\n\n self.train_metrics = MetricTracker(\n \"loss\", \"grad norm\", *[m.name for m in self.metrics], writer=self.writer\n )\n self.valid_metrics = MetricTracker(\n \"loss\", *[m.name for m in self.metrics], writer=self.writer\n )\n\n @staticmethod\n def move_batch_to_device(batch, device: torch.device):\n \"\"\"\n Move all necessary tensors to the HPU\n \"\"\"\n for tensor_for_gpu in [\"spectrogram\", \"text_encoded\"]:\n batch[tensor_for_gpu] = batch[tensor_for_gpu].to(device)\n return batch\n\n def _clip_grad_norm(self):\n if self.config[\"trainer\"].get(\"grad_norm_clip\", None) is not None:\n clip_grad_norm_(\n self.model.parameters(), self.config[\"trainer\"][\"grad_norm_clip\"]\n )\n\n def _train_epoch(self, epoch):\n \"\"\"\n Training logic for an epoch\n\n :param epoch: Integer, current training epoch.\n :return: A log that contains average loss and metric in this epoch.\n \"\"\"\n self.model.train()\n self.train_metrics.reset()\n self.writer.add_scalar(\"epoch\", epoch)\n for batch_idx, batch in enumerate(\n tqdm(self.data_loader, desc=\"train\", total=self.len_epoch)\n ):\n ###OBO\n if batch_idx >= self.len_epoch:\n break\n ###OBO\n try:\n batch = self.process_batch(\n batch,\n is_train=True,\n metrics=self.train_metrics,\n )\n except RuntimeError as e:\n if \"out of memory\" in str(e) and self.skip_oom:\n self.logger.warning(\"OOM on batch. Skipping batch.\")\n for p in self.model.parameters():\n if p.grad is not None:\n del p.grad # free some memory\n torch.cuda.empty_cache()\n continue\n else:\n raise e\n self.train_metrics.update(\"grad norm\", self.get_grad_norm())\n if batch_idx % self.log_step == 0:\n self.writer.set_step((epoch - 1) * self.len_epoch + batch_idx)\n self.logger.debug(\n \"Train Epoch: {} {} Loss: {:.6f}\".format(\n epoch, self._progress(batch_idx), batch[\"loss\"].item()\n )\n )\n self.writer.add_scalar(\n \"learning rate\", self.lr_scheduler.get_last_lr()[0]\n )\n self._log_predictions(part=\"train\", **batch)\n self._log_spectrogram(batch[\"spectrogram\"])\n self._log_scalars(self.train_metrics)\n\n log = self.train_metrics.result()\n\n if self.do_validation:\n val_log = self._valid_epoch(epoch)\n log.update(**{\"val_\" + k: v for k, v in val_log.items()})\n\n return log\n\n def process_batch(self, batch, is_train: bool, metrics: MetricTracker):\n batch = self.move_batch_to_device(batch, self.device)\n if is_train:\n self.optimizer.zero_grad()\n outputs = self.model(**batch)\n if type(outputs) is dict:\n batch.update(outputs)\n else:\n batch[\"logits\"] = outputs\n\n batch[\"log_probs\"] = F.log_softmax(batch[\"logits\"], dim=-1)\n batch[\"log_probs_length\"] = self.model.transform_input_lengths(\n batch[\"spectrogram_length\"]\n )\n batch[\"loss\"] = self.criterion(**batch)\n if is_train:\n batch[\"loss\"].backward()\n self._clip_grad_norm()\n self.optimizer.step()\n if self.lr_scheduler is not None:\n self.lr_scheduler.step()\n\n metrics.update(\"loss\", batch[\"loss\"].item())\n for met in self.metrics:\n metrics.update(met.name, met(**batch))\n return batch\n\n def _valid_epoch(self, epoch):\n \"\"\"\n Validate after training an epoch\n\n :param epoch: Integer, current training epoch.\n :return: A log that contains information about validation\n \"\"\"\n self.model.eval()\n self.valid_metrics.reset()\n with torch.no_grad():\n for batch_idx, batch in tqdm(\n enumerate(self.valid_data_loader),\n desc=\"validation\",\n total=len(self.valid_data_loader),\n ):\n batch = self.process_batch(\n batch,\n is_train=False,\n metrics=self.valid_metrics,\n )\n self.writer.set_step(epoch * self.len_epoch, \"valid\")\n self._log_scalars(self.valid_metrics)\n self._log_predictions(part=\"val\", **batch)\n self._log_spectrogram(batch[\"spectrogram\"])\n\n # add histogram of model parameters to the tensorboard\n for name, p in self.model.named_parameters():\n self.writer.add_histogram(name, p, bins=\"auto\")\n return self.valid_metrics.result()\n\n def _progress(self, batch_idx):\n base = \"[{}/{} ({:.0f}%)]\"\n if hasattr(self.data_loader, \"n_samples\"):\n current = batch_idx * self.data_loader.batch_size\n total = self.data_loader.n_samples\n else:\n current = batch_idx\n total = self.len_epoch\n return base.format(current, total, 100.0 * current / total)\n\n def _log_predictions(\n self,\n text,\n log_probs,\n log_probs_length,\n examples_to_log=20,\n *args,\n **kwargs,\n ):\n # TODO: implement logging of beam search results\n if self.writer is None:\n return\n argmax_inds = log_probs.cpu().argmax(-1)\n argmax_inds = [\n inds[: int(ind_len)]\n for inds, ind_len in zip(argmax_inds, log_probs_length)\n ]\n argmax_texts_raw = [self.text_encoder.decode(inds) for inds in argmax_inds]\n argmax_texts = [self.text_encoder.ctc_decode(inds) for inds in argmax_inds]\n tuples = list(zip(argmax_texts, text, argmax_texts_raw))\n shuffle(tuples)\n to_log_pred = []\n to_log_pred_raw = []\n for pred, target, raw_pred in tuples[:examples_to_log]:\n wer = calc_wer(target, pred) * 100\n cer = calc_cer(target, pred) * 100\n to_log_pred.append(\n f\"true: '{target}' | pred: '{pred}' \"\n f\"| wer: {wer:.2f} | cer: {cer:.2f}\"\n )\n to_log_pred_raw.append(f\"true: '{target}' | pred: '{raw_pred}'\\n\")\n self.writer.add_text(f\"predictions\", \"< < < < > > > >\".join(to_log_pred))\n self.writer.add_text(\n f\"predictions_raw\", \"< < < < > > > >\".join(to_log_pred_raw)\n )\n\n def _log_spectrogram(self, spectrogram_batch):\n spectrogram = random.choice(spectrogram_batch)\n image = PIL.Image.open(plot_spectrogram_to_buf(spectrogram.cpu().log()))\n self.writer.add_image(\"spectrogram\", ToTensor()(image))\n\n @torch.no_grad()\n def get_grad_norm(self, norm_type=2):\n parameters = self.model.parameters()\n if isinstance(parameters, torch.Tensor):\n parameters = [parameters]\n parameters = [p for p in parameters if p.grad is not None]\n total_norm = torch.norm(\n torch.stack(\n [torch.norm(p.grad.detach(), norm_type).cpu() for p in parameters]\n ),\n norm_type,\n )\n return total_norm.item()\n\n def _log_scalars(self, metric_tracker: MetricTracker):\n if self.writer is None:\n return\n for metric_name in metric_tracker.keys():\n self.writer.add_scalar(f\"{metric_name}\", metric_tracker.avg(metric_name))\n" ]
[ [ "torch.no_grad", "torch.cuda.empty_cache", "torch.nn.functional.log_softmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ahgamut/cliquematch
[ "3065b9edfa8ed3dc9986b7152913436ada26d195" ]
[ "tests/test_align.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\n cliquematch.test_align\n ~~~~~~~~~~~~~~~~~~~~~~\n\n testcases for cliquematch.AlignGraph\n\n :copyright: (c) 2020 by Gautham Venkatasubramanian.\n :license: see LICENSE for more details.\n\"\"\"\nimport pytest\nimport numpy as np\nimport cliquematch\nimport random\n\n\nclass TestAlignGraph(object):\n \"\"\"\n Test out properties of AlignGraph using random numpy arrays\n\n AlignGraph is essentially the same as A2AGraph, so we only\n\n * test that edges can be built using control points and a mask\n * test dfs and heuristic work correctly using above construction\n \"\"\"\n\n np.random.seed(824)\n mask = np.ones((200, 200), dtype=np.bool)\n S1 = np.float64(np.random.uniform(0, 100, (20, 2)))\n S2 = np.float64(np.random.uniform(0, 100, (20, 2)))\n\n def test_dfs(self):\n S1 = self.S1\n subset = list(x for x in range(20))\n random.shuffle(subset)\n subset = subset[:10]\n rotmat = np.array(\n [\n [np.cos(np.pi / 3), -np.sin(np.pi / 3)],\n [np.sin(np.pi / 3), np.cos(np.pi / 3)],\n ]\n )\n S1_sub = S1[subset, :]\n S2 = np.float64(np.matmul(S1_sub, rotmat) + [100, 100])\n print(S1_sub, S2)\n G = cliquematch.AlignGraph(S1, S2)\n G.epsilon = 0.001\n G.build_edges_with_filter(S2, self.mask, percentage=0.8)\n ans = G.get_correspondence(\n use_heuristic=False, use_dfs=True, upper_bound=10, return_indices=False\n )\n\n assert abs(ans[\"theta\"] - np.pi / 3) < 1e-5\n assert abs(ans[\"dx\"] - 100) < 1e-5\n assert abs(ans[\"dy\"] - 100) < 1e-5\n\n def test_heuristic(self):\n S1 = self.S1\n subset = list(x for x in range(20))\n random.shuffle(subset)\n subset = subset[:10]\n rotmat = np.array(\n [\n [np.cos(np.pi / 3), -np.sin(np.pi / 3)],\n [np.sin(np.pi / 3), np.cos(np.pi / 3)],\n ]\n )\n S1_sub = S1[subset, :]\n S2 = np.float64(np.matmul(S1_sub, rotmat) + [100, 100])\n G = cliquematch.AlignGraph(S1, S2)\n G.epsilon = 0.001\n G.build_edges_with_filter(S2, self.mask, percentage=0.8)\n ans = G.get_correspondence(\n use_heuristic=True, use_dfs=False, upper_bound=10, return_indices=False\n )\n print(G)\n\n assert abs(ans[\"theta\"] - np.pi / 3) < 1e-5\n assert abs(ans[\"dx\"] - 100) < 1e-5\n assert abs(ans[\"dy\"] - 100) < 1e-5\n\n def test_filter(self):\n S1 = self.S1\n subset = list(x for x in range(20))\n random.shuffle(subset)\n subset = subset[:10]\n rotmat = np.array(\n [\n [np.cos(np.pi / 3), -np.sin(np.pi / 3)],\n [np.sin(np.pi / 3), np.cos(np.pi / 3)],\n ]\n )\n S1_sub = S1[subset, :]\n S2 = np.float64(np.matmul(S1_sub, rotmat) + [100, 100])\n G = cliquematch.AlignGraph(S1, S2)\n G.epsilon = 0.001\n G.build_edges_with_filter(S2, self.mask, percentage=0.8)\n ans = G.get_correspondence(\n use_heuristic=False, use_dfs=True, upper_bound=10, return_indices=False\n )\n print(G)\n\n G2 = cliquematch.AlignGraph(S1, S2)\n G2.epsilon = 0.001\n filt = cliquematch.wrappers.aligngraph.MaskFilter(S2, self.mask, percentage=0.8)\n G2.build_edges_with_condition(filt, False)\n ans2 = G2.get_correspondence(\n use_dfs=True, use_heuristic=False, upper_bound=10, return_indices=False\n )\n print(G2)\n assert abs(ans[\"theta\"] - ans2[\"theta\"]) < 1e-8\n assert abs(ans[\"dx\"] - ans2[\"dx\"]) < 1e-8\n assert abs(ans[\"dy\"] - ans2[\"dy\"]) < 1e-8\n" ]
[ [ "numpy.random.seed", "numpy.matmul", "numpy.cos", "numpy.ones", "numpy.sin", "numpy.random.uniform" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
emorynlp/levi-graph-amr-parser
[ "f71f1056c13181b8db31d6136451fb8d57114819", "f71f1056c13181b8db31d6136451fb8d57114819", "f71f1056c13181b8db31d6136451fb8d57114819" ]
[ "elit/components/parsers/second_order/tree_crf_dependency_parser.py", "amr_parser/dump_attention.py", "elit/components/taggers/transformers/transformer_transform_tf.py" ]
[ "# -*- coding:utf-8 -*-\n# Author: hankcs\n# Date: 2020-05-08 20:51\nimport functools\nimport os\nfrom typing import Union, Any, List\n\nimport torch\nfrom alnlp.modules.util import lengths_to_mask\nfrom torch import nn\nfrom torch.optim import Adam\nfrom torch.optim.lr_scheduler import ExponentialLR\nfrom torch.utils.data import DataLoader\n\nfrom elit.common.constant import UNK, IDX\nfrom elit.common.dataset import PadSequenceDataLoader\nfrom elit.common.structure import History\nfrom elit.common.torch_component import TorchComponent\nfrom elit.common.transform import LowerCase, FieldLength, PunctuationMask, TransformList\nfrom elit.common.vocab import Vocab, VocabCounter\nfrom elit.common.conll import CoNLLWord, CoNLLSentence\nfrom elit.components.parsers.constituency.treecrf import CRF2oDependency\nfrom elit.components.parsers.second_order.model import DependencyModel\nfrom elit.components.parsers.second_order.treecrf_decoder import TreeCRFDecoder\nfrom elit.datasets.parsing.conll_dataset import CoNLLParsingDataset, append_bos, get_sibs\nfrom elit.layers.embeddings.contextual_word_embedding import ContextualWordEmbedding, ContextualWordEmbeddingModule\nfrom elit.layers.embeddings.embedding import Embedding, EmbeddingList, ConcatModuleList\nfrom elit.layers.embeddings.util import index_word2vec_with_vocab\nfrom elit.layers.transformers.pt_imports import AutoModel_\nfrom elit.layers.transformers.utils import build_optimizer_scheduler_with_transformer\nfrom elit.metrics.parsing.attachmentscore import AttachmentScore\nfrom elit.transform.transformer_tokenizer import TransformerSequenceTokenizer\nfrom elit.utils.time_util import CountdownTimer\nfrom elit.common.util import merge_locals_kwargs, merge_dict, reorder\n\n\nclass TreeConditionalRandomFieldDependencyParser(TorchComponent):\n def __init__(self) -> None:\n super().__init__()\n self.model: DependencyModel = self.model\n self._transformer_transform = None\n\n def predict(self, data: Any, batch_size=None, batch_max_tokens=None, output_format='conllx', **kwargs):\n if not data:\n return []\n use_pos = self.use_pos\n flat = self.input_is_flat(data, use_pos)\n if flat:\n data = [data]\n samples = self.build_samples(data, use_pos)\n if not batch_max_tokens:\n batch_max_tokens = self.config.batch_max_tokens\n if not batch_size:\n batch_size = self.config.batch_size\n dataloader = self.build_dataloader(samples,\n device=self.devices[0], shuffle=False,\n **merge_dict(self.config,\n batch_size=batch_size,\n batch_max_tokens=batch_max_tokens,\n overwrite=True,\n **kwargs))\n predictions, build_data, data, order = self.before_outputs(data)\n for batch in dataloader:\n arc_scores, rel_scores, mask, puncts = self.feed_batch(batch)\n self.collect_outputs(arc_scores, rel_scores, mask, batch, predictions, order, data, use_pos,\n build_data)\n outputs = self.post_outputs(predictions, data, order, use_pos, build_data)\n if flat:\n return outputs[0]\n return outputs\n\n def build_samples(self, data, use_pos=None):\n samples = []\n for idx, each in enumerate(data):\n sample = {IDX: idx}\n if use_pos:\n token, pos = zip(*each)\n sample.update({'FORM': list(token), 'CPOS': list(pos)})\n else:\n token = each\n sample.update({'FORM': list(token)})\n samples.append(sample)\n return samples\n\n def input_is_flat(self, data, use_pos=None):\n if use_pos:\n flat = isinstance(data[0], (list, tuple)) and isinstance(data[0][0], str)\n else:\n flat = isinstance(data[0], str)\n return flat\n\n def before_outputs(self, data):\n predictions, order = [], []\n build_data = data is None\n if build_data:\n data = []\n return predictions, build_data, data, order\n\n def post_outputs(self, predictions, data, order, use_pos, build_data):\n predictions = reorder(predictions, order)\n if build_data:\n data = reorder(data, order)\n outputs = []\n self.predictions_to_human(predictions, outputs, data, use_pos)\n return outputs\n\n def predictions_to_human(self, predictions, outputs, data, use_pos):\n for d, (arcs, rels) in zip(data, predictions):\n sent = CoNLLSentence()\n for idx, (cell, a, r) in enumerate(zip(d, arcs, rels)):\n if use_pos:\n token, pos = cell\n else:\n token, pos = cell, None\n sent.append(CoNLLWord(idx + 1, token, cpos=pos, head=a, deprel=self.vocabs['rel'][r]))\n outputs.append(sent)\n\n def collect_outputs(self, arc_scores, rel_scores, mask, batch, predictions, order, data, use_pos,\n build_data):\n lens = [len(token) - 1 for token in batch['token']]\n arc_preds, rel_preds = self.decode(arc_scores, rel_scores, mask, batch)\n self.collect_outputs_extend(predictions, arc_preds, rel_preds, lens, mask)\n order.extend(batch[IDX])\n if build_data:\n if use_pos:\n data.extend(zip(batch['FORM'], batch['CPOS']))\n else:\n data.extend(batch['FORM'])\n\n def collect_outputs_extend(self, predictions: list, arc_preds, rel_preds, lens, mask):\n predictions.extend(zip([seq.tolist() for seq in arc_preds[mask].split(lens)],\n [seq.tolist() for seq in rel_preds[mask].split(lens)]))\n\n def fit(self,\n trn_data,\n dev_data,\n save_dir,\n embed,\n n_mlp_arc=500,\n n_mlp_rel=100,\n n_mlp_sib=100,\n mlp_dropout=.33,\n lr=2e-3,\n transformer_lr=5e-5,\n mu=.9,\n nu=.9,\n epsilon=1e-12,\n grad_norm=5.0,\n decay=.75,\n decay_steps=5000,\n weight_decay=0,\n warmup_steps=0.1,\n separate_optimizer=True,\n patience=100,\n lowercase=False,\n epochs=50000,\n tree=False,\n proj=True,\n mbr=True,\n partial=False,\n punct=False,\n min_freq=2,\n logger=None,\n verbose=True,\n unk=UNK,\n max_sequence_length=512,\n batch_size=None,\n sampler_builder=None,\n gradient_accumulation=1,\n devices: Union[float, int, List[int]] = None,\n transform=None,\n eval_trn=False,\n bos='\\0',\n **kwargs):\n return super().fit(**merge_locals_kwargs(locals(), kwargs))\n\n def execute_training_loop(self, trn, dev, devices, epochs, logger, patience, save_dir, optimizer,\n gradient_accumulation, **kwargs):\n optimizer, scheduler, transformer_optimizer, transformer_scheduler = optimizer\n criterion = self.build_criterion()\n best_e, best_metric = 0, self.build_metric()\n timer = CountdownTimer(epochs)\n history = History()\n ratio_width = len(f'{len(trn) // gradient_accumulation}/{len(trn) // gradient_accumulation}')\n for epoch in range(1, epochs + 1):\n # train one epoch and update the parameters\n logger.info(f\"[yellow]Epoch {epoch} / {epochs}:[/yellow]\")\n self.fit_dataloader(trn, optimizer, scheduler, criterion, epoch, logger, history,\n transformer_optimizer, transformer_scheduler,\n gradient_accumulation=gradient_accumulation, eval_trn=self.config.eval_trn)\n loss, dev_metric = self.evaluate_dataloader(dev, criterion, ratio_width=ratio_width, logger=logger)\n timer.update()\n # logger.info(f\"{'Dev' + ' ' * ratio_width} loss: {loss:.4f} {dev_metric}\")\n # save the model if it is the best so far\n report = f\"{timer.elapsed_human} / {timer.total_time_human} ETA: {timer.eta_human}\"\n if dev_metric > best_metric:\n best_e, best_metric = epoch, dev_metric\n self.save_weights(save_dir)\n report += ' ([red]saved[/red])'\n else:\n if patience != epochs:\n report += f' ({epoch - best_e}/{patience})'\n else:\n report += f' ({epoch - best_e})'\n logger.info(report)\n if patience is not None and epoch - best_e >= patience:\n logger.info(f'LAS has stopped improving for {patience} epochs, early stop.')\n break\n timer.stop()\n if not best_e:\n self.save_weights(save_dir)\n elif best_e != epoch:\n self.load_weights(save_dir)\n logger.info(f\"Max score of dev is {best_metric.score:.2%} at epoch {best_e}\")\n logger.info(f\"Average time of each epoch is {timer.elapsed_average_human}\")\n logger.info(f\"{timer.elapsed_human} elapsed\")\n\n def build_optimizer(self, epochs, trn, gradient_accumulation, **kwargs):\n config = self.config\n model = self.model\n if isinstance(model, nn.DataParallel):\n model = model.module\n transformer = self._get_transformer_builder()\n if transformer and transformer.trainable:\n transformer = self._get_transformer()\n optimizer = Adam(set(model.parameters()) - set(transformer.parameters()),\n config.lr,\n (config.mu, config.nu),\n config.epsilon)\n if self.config.transformer_lr:\n num_training_steps = len(trn) * epochs // gradient_accumulation\n if not self.config.separate_optimizer:\n optimizer, scheduler = build_optimizer_scheduler_with_transformer(model,\n transformer,\n config.lr,\n config.transformer_lr,\n num_training_steps,\n config.warmup_steps,\n config.weight_decay,\n config.epsilon)\n transformer_optimizer, transformer_scheduler = None, None\n else:\n transformer_optimizer, transformer_scheduler = \\\n build_optimizer_scheduler_with_transformer(transformer,\n transformer,\n config.lr,\n config.transformer_lr,\n num_training_steps,\n config.warmup_steps,\n config.weight_decay,\n config.epsilon)\n else:\n transformer.requires_grad_(False)\n transformer_optimizer, transformer_scheduler = None, None\n else:\n optimizer = Adam(model.parameters(),\n config.lr,\n (config.mu, config.nu),\n config.epsilon)\n transformer_optimizer, transformer_scheduler = None, None\n if self.config.separate_optimizer:\n scheduler = ExponentialLR(optimizer, config.decay ** (1 / config.decay_steps))\n # noinspection PyUnboundLocalVariable\n optimizer = Adam(model.parameters(), **{'lr': 0.002, 'betas': (0.9, 0.9), 'eps': 1e-12})\n scheduler = ExponentialLR(optimizer, **{'gamma': 0.9999424652406974})\n return optimizer, scheduler, transformer_optimizer, transformer_scheduler\n\n # noinspection PyMethodOverriding\n def build_dataloader(self,\n data,\n shuffle,\n device,\n embed: Embedding,\n training=False,\n logger=None,\n gradient_accumulation=1,\n sampler_builder=None,\n batch_size=None,\n bos='\\0',\n **kwargs) -> DataLoader:\n first_transform = TransformList(functools.partial(append_bos, bos=bos))\n embed_transform = embed.transform(vocabs=self.vocabs)\n transformer_transform = self._get_transformer_transform_from_transforms(embed_transform)\n if embed_transform:\n if transformer_transform and isinstance(embed_transform, TransformList):\n embed_transform.remove(transformer_transform)\n\n first_transform.append(embed_transform)\n dataset = self.build_dataset(data, first_transform=first_transform)\n if self.config.get('transform', None):\n dataset.append_transform(self.config.transform)\n\n if self.vocabs.mutable:\n self.build_vocabs(dataset, logger, self._transformer_trainable())\n if transformer_transform and isinstance(embed_transform, TransformList):\n embed_transform.append(transformer_transform)\n\n dataset.append_transform(FieldLength('token', 'sent_length'))\n if isinstance(data, str):\n dataset.purge_cache()\n if len(dataset) > 1000 and isinstance(data, str):\n timer = CountdownTimer(len(dataset))\n self.cache_dataset(dataset, timer, training, logger)\n if sampler_builder:\n lens = [sample['sent_length'] for sample in dataset]\n sampler = sampler_builder.build(lens, shuffle, gradient_accumulation)\n else:\n sampler = None\n loader = PadSequenceDataLoader(dataset=dataset,\n batch_sampler=sampler,\n batch_size=batch_size,\n pad=self.get_pad_dict(),\n device=device,\n vocabs=self.vocabs)\n return loader\n\n def cache_dataset(self, dataset, timer, training=False, logger=None):\n for each in dataset:\n timer.log('Preprocessing and caching samples [blink][yellow]...[/yellow][/blink]')\n\n def get_pad_dict(self):\n return {'arc': 0}\n\n def build_dataset(self, data, first_transform=None):\n if not first_transform:\n first_transform = append_bos\n transform = [first_transform, get_sibs]\n if self.config.get('lowercase', False):\n transform.append(LowerCase('token'))\n transform.append(self.vocabs)\n if not self.config.punct:\n transform.append(PunctuationMask('token', 'punct_mask'))\n return CoNLLParsingDataset(data, transform=transform)\n\n def build_tokenizer_transform(self):\n return TransformerSequenceTokenizer(self.transformer_tokenizer, 'token', '',\n ret_token_span=True, cls_is_bos=True,\n max_seq_length=self.config.get('max_sequence_length',\n 512),\n truncate_long_sequences=False)\n\n def build_vocabs(self, dataset, logger=None, transformer=False):\n rel_vocab = self.vocabs.get('rel', None)\n if rel_vocab is None:\n rel_vocab = Vocab(unk_token=None, pad_token=self.config.get('pad_rel', None))\n self.vocabs.put(rel=rel_vocab)\n\n timer = CountdownTimer(len(dataset))\n if transformer:\n token_vocab = None\n else:\n self.vocabs.token = token_vocab = VocabCounter(unk_token=self.config.get('unk', UNK))\n for i, sample in enumerate(dataset):\n timer.log('Building vocab [blink][yellow]...[/yellow][/blink]', ratio_percentage=True)\n min_freq = self.config.get('min_freq', None)\n if min_freq:\n token_vocab.trim(min_freq)\n rel_vocab.set_unk_as_safe_unk() # Some relation in dev set is OOV\n self.vocabs.lock()\n self.vocabs.summary(logger=logger)\n if token_vocab:\n self.config.n_words = len(self.vocabs['token'])\n self.config.n_rels = len(self.vocabs['rel'])\n if token_vocab:\n self.config.pad_index = self.vocabs['token'].pad_idx\n self.config.unk_index = self.vocabs['token'].unk_idx\n\n # noinspection PyMethodOverriding\n def build_model(self, embed: Embedding, encoder, n_mlp_arc, n_mlp_rel, mlp_dropout, n_mlp_sib, training=True,\n **kwargs) -> torch.nn.Module:\n model = DependencyModel(\n embed=embed.module(vocabs=self.vocabs),\n encoder=encoder,\n decoder=TreeCRFDecoder(encoder.get_output_dim(), n_mlp_arc, n_mlp_sib, n_mlp_rel, mlp_dropout,\n len(self.vocabs['rel']))\n )\n return model\n\n def build_embeddings(self, training=True):\n pretrained_embed = None\n if self.config.get('pretrained_embed', None):\n pretrained_embed = index_word2vec_with_vocab(self.config.pretrained_embed, self.vocabs['token'],\n init='zeros', normalize=True)\n transformer = self.config.transformer\n if transformer:\n transformer = AutoModel_.from_pretrained(transformer, training=training)\n return pretrained_embed, transformer\n\n # noinspection PyMethodOverriding\n def fit_dataloader(self,\n trn,\n optimizer,\n scheduler,\n criterion,\n epoch,\n logger,\n history: History,\n transformer_optimizer=None,\n transformer_scheduler=None,\n gradient_accumulation=1,\n eval_trn=False,\n **kwargs):\n self.model.train()\n\n timer = CountdownTimer(history.num_training_steps(len(trn), gradient_accumulation))\n metric = self.build_metric(training=True)\n total_loss = 0\n for idx, batch in enumerate(trn):\n optimizer.zero_grad()\n (s_arc, s_sib, s_rel), mask, puncts = self.feed_batch(batch)\n arcs, sibs, rels = batch['arc'], batch['sib_id'], batch['rel_id']\n\n loss, s_arc = self.compute_loss(s_arc, s_sib, s_rel, arcs, sibs, rels, mask)\n if gradient_accumulation > 1:\n loss /= gradient_accumulation\n loss.backward()\n total_loss += loss.item()\n if eval_trn:\n arc_preds, rel_preds = self.decode(s_arc, s_sib, s_rel, mask)\n self.update_metric(arc_preds, rel_preds, arcs, rels, mask, puncts, metric)\n if history.step(gradient_accumulation):\n self._step(optimizer, scheduler, transformer_optimizer, transformer_scheduler)\n report = self._report(total_loss / (timer.current + 1), metric if eval_trn else None)\n lr = scheduler.get_last_lr()[0]\n report += f' lr: {lr:.4e}'\n timer.log(report, ratio_percentage=False, logger=logger)\n del loss\n\n def _step(self, optimizer, scheduler, transformer_optimizer, transformer_scheduler):\n if self.config.get('grad_norm', None):\n nn.utils.clip_grad_norm_(self.model.parameters(),\n self.config.grad_norm)\n optimizer.step()\n scheduler.step()\n if self._transformer_transform and self.config.transformer_lr and transformer_optimizer:\n transformer_optimizer.step()\n transformer_optimizer.zero_grad()\n transformer_scheduler.step()\n\n def feed_batch(self, batch):\n words, feats, lens, puncts = batch.get('token_id', None), batch.get('pos_id', None), batch['sent_length'], \\\n batch.get('punct_mask', None)\n mask = lengths_to_mask(lens)\n logits = self.model(batch, mask)\n if self.model.training:\n mask = mask.clone()\n # ignore the first token of each sentence\n mask[:, 0] = 0\n return logits, mask, puncts\n\n def _report(self, loss, metric: AttachmentScore = None):\n return f'loss: {loss:.4f} {metric}' if metric else f'loss: {loss:.4f}'\n\n def compute_loss(self, s_arc, s_sib, s_rel, arcs, sibs, rels, mask):\n crf: CRF2oDependency = self.model.decoder.crf\n return crf.loss(s_arc, s_sib, s_rel, arcs, sibs, rels, mask, self.config.mbr, self.config.partial)\n\n # noinspection PyUnboundLocalVariable\n @torch.no_grad()\n def evaluate_dataloader(self, loader: PadSequenceDataLoader, criterion, logger=None, filename=None, output=False,\n ratio_width=None,\n metric=None,\n **kwargs):\n self.model.eval()\n\n total_loss = 0\n if not metric:\n metric = self.build_metric()\n\n timer = CountdownTimer(len(loader))\n for batch in loader:\n (s_arc, s_sib, s_rel), mask, puncts = self.feed_batch(batch)\n arcs, sibs, rels = batch['arc'], batch['sib_id'], batch['rel_id']\n loss, s_arc = self.compute_loss(s_arc, s_sib, s_rel, arcs, sibs, rels, mask)\n total_loss += float(loss)\n arc_preds, rel_preds = self.decode(s_arc, s_sib, s_rel, mask)\n self.update_metric(arc_preds, rel_preds, arcs, rels, mask, puncts, metric)\n report = self._report(total_loss / (timer.current + 1), metric)\n if filename:\n report = f'{os.path.basename(filename)} ' + report\n timer.log(report, ratio_percentage=False, logger=logger, ratio_width=ratio_width)\n total_loss /= len(loader)\n\n return total_loss, metric\n\n def update_metric(self, arc_preds, rel_preds, arcs, rels, mask, puncts, metric):\n # ignore all punctuation if not specified\n if not self.config.punct:\n mask &= puncts\n metric(arc_preds, rel_preds, arcs, rels, mask)\n\n def decode(self, s_arc, s_sib, s_rel, mask):\n crf: CRF2oDependency = self.model.decoder.crf\n return crf.decode(s_arc, s_sib, s_rel, mask, self.config.tree and not self.model.training, self.config.mbr,\n self.config.proj)\n\n def build_criterion(self, **kwargs):\n return None\n\n def build_metric(self, **kwargs):\n return AttachmentScore()\n\n def _get_transformer_transform_from_transforms(self, transform: Union[\n TransformList, TransformerSequenceTokenizer]) -> TransformerSequenceTokenizer:\n def _get():\n if isinstance(transform, TransformerSequenceTokenizer):\n # noinspection PyTypeChecker\n return transform\n elif isinstance(transform, TransformList):\n # noinspection PyTypeChecker,PyArgumentList\n for each in transform:\n if isinstance(each, TransformerSequenceTokenizer):\n return each\n\n if self._transformer_transform is None:\n self._transformer_transform = _get()\n return self._transformer_transform\n\n def _get_transformer(self):\n embed = self.model.embed\n if isinstance(embed, ContextualWordEmbeddingModule):\n return embed\n if isinstance(embed, ConcatModuleList):\n for each in embed:\n if isinstance(each, ContextualWordEmbeddingModule):\n return each\n\n def _get_transformer_builder(self):\n embed: Embedding = self.config.embed\n if isinstance(embed, ContextualWordEmbedding):\n return embed\n if isinstance(embed, EmbeddingList):\n for each in embed.to_list():\n if isinstance(embed, ContextualWordEmbedding):\n return each\n\n def _transformer_trainable(self):\n builder = self._get_transformer_builder()\n if not builder:\n return False\n return builder.trainable\n", "import torch\nimport os\n\nfrom elit.components.amr.amr_parser.data import REL\nfrom elit.datasets.parsing.amr import unlinearize, remove_unconnected_components, un_kahn\n\nif os.environ.get('USE_TF', None) is None:\n os.environ[\"USE_TF\"] = 'NO' # saves time loading transformers\nfrom amr_parser.data import Vocab, DataLoader, DUM, END, CLS, NIL, seperate_concept_from_rel\nfrom amr_parser.parser import Parser\nfrom amr_parser.postprocess import PostProcessor\nfrom amr_parser.extract import LexicalMap\nfrom amr_parser.utils import move_to_device\nfrom amr_parser.bert_utils import BertEncoderTokenizer, BertEncoder, load_bert\nfrom amr_parser.match import match\n\nimport argparse, os, re\n\n\ndef parse_config():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--load_path', type=str, required=True)\n parser.add_argument('--test_data', type=str, required=True)\n parser.add_argument('--device', type=int, default=0)\n\n return parser.parse_args()\n\n\ndef show_progress(model, dev_data):\n model.eval()\n loss_acm = 0.\n for batch in dev_data:\n batch = move_to_device(batch, model.device)\n concept_loss, arc_loss, rel_loss = model(batch)\n loss = concept_loss + arc_loss + rel_loss\n loss_acm += loss.item()\n print('total loss', loss_acm)\n return loss_acm\n\n\ndef load_ckpt_without_bert(model, test_model):\n ckpt = torch.load(test_model, map_location=torch.device('cpu'))['model']\n for k, v in model.state_dict().items():\n if k.startswith('bert_encoder'):\n ckpt[k] = v\n model.load_state_dict(ckpt, strict=False)\n\n\ndef predict(load_path, test_data, device=0, test_batch_size=6666):\n test_models = []\n if os.path.isdir(load_path):\n for file in os.listdir(load_path):\n fname = os.path.join(load_path, file)\n if os.path.isfile(fname):\n test_models.append(fname)\n model_args = torch.load(fname)['args']\n else:\n test_models.append(load_path)\n model_args = torch.load(load_path, map_location=torch.device('cpu'))['args']\n # if 'joint' in test_data or 'joint' in load_path:\n # model_args.tok_vocab = '/home/hhe43/elit/data/amr/amr_2.0/tok_vocab'\n # model_args.lem_vocab = '/home/hhe43/elit/data/amr/amr_2.0/lem_vocab'\n # model_args.pos_vocab = '/home/hhe43/elit/data/amr/amr_2.0/pos_vocab'\n # model_args.ner_vocab = '/home/hhe43/elit/data/amr/amr_2.0/ner_vocab'\n # model_args.predictable_concept_vocab = '/home/hhe43/elit/data/amr/amr_2.0/predictable_concept_vocab'\n # model_args.concept_vocab = '/home/hhe43/elit/data/amr/amr_2.0/concept_vocab'\n # model_args.rel_vocab = '/home/hhe43/elit/data/amr/amr_2.0/rel_vocab'\n # model_args.word_char_vocab = '/home/hhe43/elit/data/amr/amr_2.0/word_char_vocab'\n # model_args.concept_char_vocab = '/home/hhe43/elit/data/amr/amr_2.0/concept_char_vocab'\n\n vocabs = dict()\n vocabs['tok'] = Vocab(model_args.tok_vocab, 5, [CLS])\n vocabs['lem'] = Vocab(model_args.lem_vocab, 5, [CLS])\n vocabs['pos'] = Vocab(model_args.pos_vocab, 5, [CLS])\n vocabs['ner'] = Vocab(model_args.ner_vocab, 5, [CLS])\n vocabs['predictable_concept'] = Vocab(model_args.predictable_concept_vocab, 5, [DUM, END])\n vocabs['concept'] = Vocab(model_args.concept_vocab, 5, [DUM, END])\n vocabs['rel'] = Vocab(model_args.rel_vocab, 50, [NIL])\n vocabs['word_char'] = Vocab(model_args.word_char_vocab, 100, [CLS, END])\n vocabs['concept_char'] = Vocab(model_args.concept_char_vocab, 100, [CLS, END])\n if hasattr(model_args, 'separate_rel') and model_args.separate_rel:\n seperate_concept_from_rel(vocabs)\n lexical_mapping = LexicalMap()\n bert_encoder = None\n if model_args.with_bert:\n bert_path = model_args.bert_path\n if 'bert-base-cased' in bert_path:\n bert_path = 'bert-base-cased'\n bert_tokenizer = BertEncoderTokenizer.from_pretrained(bert_path, do_lower_case=False)\n bert_encoder = load_bert(bert_path)\n vocabs['bert_tokenizer'] = bert_tokenizer\n if device < 0:\n device = torch.device('cpu')\n else:\n device = torch.device('cuda', device)\n levi_graph = model_args.levi_graph if hasattr(model_args, 'levi_graph') else False\n model = Parser(vocabs,\n model_args.word_char_dim, model_args.word_dim, model_args.pos_dim, model_args.ner_dim,\n model_args.concept_char_dim, model_args.concept_dim,\n model_args.cnn_filters, model_args.char2word_dim, model_args.char2concept_dim,\n model_args.embed_dim, model_args.ff_embed_dim, model_args.num_heads, model_args.dropout,\n model_args.snt_layers, model_args.graph_layers, model_args.inference_layers, model_args.rel_dim,\n bert_encoder=bert_encoder, device=device,\n joint_arc_concept=hasattr(model_args, 'model_args') and model_args.joint_arc_concept,\n levi_graph=levi_graph)\n test_data = DataLoader(vocabs, lexical_mapping, test_data, test_batch_size, for_train=False,\n levi_graph=levi_graph)\n for test_model in test_models:\n load_ckpt_without_bert(model, test_model)\n model = model.to(device)\n model.train()\n for batch in test_data:\n batch = move_to_device(batch, model.device)\n concept_loss, arc_loss, rel_loss, graph_arc_loss = model(batch)\n break\n\n\ndef main():\n args = parse_config()\n load_path = args.load_path\n device = args.device\n\n predict(load_path, args.test_data, device)\n\n\nif __name__ == \"__main__\":\n main()\n", "# -*- coding:utf-8 -*-\n# Author: hankcs\n# Date: 2019-12-29 15:14\nfrom typing import Union, Tuple, List, Iterable\n\nimport tensorflow as tf\n\nfrom elit.common.structure import SerializableDict\nfrom elit.common.transform_tf import Transform\nfrom elit.common.vocab_tf import VocabTF\nfrom elit.layers.transformers.utils_tf import convert_examples_to_features\nfrom elit.transform.tsv import TsvTaggingFormat\n\n\nclass TransformerTransform(TsvTaggingFormat, Transform):\n def __init__(self,\n tokenizer=None,\n config: SerializableDict = None,\n map_x=False, map_y=False, **kwargs) -> None:\n super().__init__(config, map_x, map_y, **kwargs)\n self._tokenizer = tokenizer\n self.tag_vocab: VocabTF = None\n self.special_token_ids = None\n self.pad = '[PAD]'\n self.unk = '[UNK]'\n\n @property\n def max_seq_length(self):\n # -2 for special tokens [CLS] and [SEP]\n return self.config.get('max_seq_length', 128) - 2\n\n @property\n def tokenizer(self):\n return self._tokenizer\n\n @tokenizer.setter\n def tokenizer(self, tokenizer):\n self._tokenizer = tokenizer\n vocab = tokenizer._vocab if hasattr(tokenizer, '_vocab') else tokenizer.vocab\n if self.pad not in vocab:\n # English albert use <pad> instead of [PAD]\n self.pad = '<pad>'\n if self.unk not in vocab:\n self.unk = '<unk>'\n self.special_token_ids = tf.constant([vocab[token] for token in [self.pad, '[CLS]', '[SEP]']],\n dtype=tf.int32)\n\n def fit(self, trn_path: str, **kwargs) -> int:\n self.tag_vocab = VocabTF(unk_token=None)\n num_samples = 0\n for words, tags in self.file_to_inputs(trn_path, gold=True):\n num_samples += 1\n self.tag_vocab.update(tags)\n return num_samples\n\n def create_types_shapes_values(self) -> Tuple[Tuple, Tuple, Tuple]:\n max_seq_length = self.config.get('max_seq_length', 128)\n types = (tf.int32, tf.int32, tf.int32), tf.int32\n # (input_ids, input_mask, segment_ids), label_ids\n shapes = ([max_seq_length], [max_seq_length], [max_seq_length]), [None]\n values = (0, 0, 0), self.tag_vocab.pad_idx\n return types, shapes, values\n\n def lock_vocabs(self):\n super().lock_vocabs()\n\n def inputs_to_samples(self, inputs, gold=False):\n max_seq_length = self.config.get('max_seq_length', 128)\n tokenizer = self._tokenizer\n xlnet = False\n roberta = False\n pad_token = self.pad\n cls_token = '[CLS]'\n sep_token = '[SEP]'\n unk_token = self.unk\n\n pad_label_idx = self.tag_vocab.pad_idx\n pad_token = tokenizer.convert_tokens_to_ids([pad_token])[0]\n for sample in inputs:\n if gold:\n words, tags = sample\n else:\n words, tags = sample, [self.tag_vocab.idx_to_token[1]] * len(sample)\n\n input_ids, input_mask, segment_ids, label_ids = convert_examples_to_features(words,\n max_seq_length, tokenizer,\n tags,\n self.tag_vocab.token_to_idx,\n cls_token_at_end=xlnet,\n # xlnet has a cls token at the end\n cls_token=cls_token,\n cls_token_segment_id=2 if xlnet else 0,\n sep_token=sep_token,\n sep_token_extra=roberta,\n # roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805\n pad_on_left=xlnet,\n # pad on the left for xlnet\n pad_token_id=pad_token,\n pad_token_segment_id=4 if xlnet else 0,\n pad_token_label_id=pad_label_idx,\n unk_token=unk_token)\n\n if None in input_ids:\n print(input_ids)\n if None in input_mask:\n print(input_mask)\n if None in segment_ids:\n print(input_mask)\n yield (input_ids, input_mask, segment_ids), label_ids\n\n def x_to_idx(self, x) -> Union[tf.Tensor, Tuple]:\n raise NotImplementedError('transformers has its own tagger, not need to convert idx for x')\n\n def y_to_idx(self, y) -> tf.Tensor:\n raise NotImplementedError('transformers has its own tagger, not need to convert idx for y')\n\n def input_is_single_sample(self, input: Union[List[str], List[List[str]]]) -> bool:\n return isinstance(input[0], str)\n\n def Y_to_outputs(self, Y: Union[tf.Tensor, Tuple[tf.Tensor]], gold=False, X=None, inputs=None, batch=None,\n **kwargs) -> Iterable:\n assert batch is not None, 'Need the batch to know actual length of Y'\n label_mask = batch[1]\n\n Y = tf.argmax(Y, axis=-1)\n Y = Y[label_mask > 0]\n tags = [self.tag_vocab.idx_to_token[tid] for tid in Y]\n offset = 0\n for words in inputs:\n yield tags[offset:offset + len(words)]\n offset += len(words)\n" ]
[ [ "torch.no_grad", "torch.optim.lr_scheduler.ExponentialLR" ], [ "torch.device", "torch.load" ], [ "tensorflow.argmax", "tensorflow.constant" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
UtrechtUniversity/SWORDS-UU
[ "d9b45706566054541625ec363e41bdf97f58c6b1" ]
[ "collect_variables/scripts/download_stats.py" ]
[ "\"\"\"Collect download statistics.\n\"\"\"\nimport re\nimport datetime\nimport argparse\nimport json\n\nimport requests\nimport pandas as pd\n\nPYPI_STATS = \"https://pypistats.org/api/packages/{}/recent\"\nCRAN_STATS = \"https://cranlogs.r-pkg.org/downloads/total/last-month/{}\"\nNPM_STATS = \"https://api.npmjs.org/downloads/point/last-month/{}\"\n\nif __name__ == '__main__':\n # Initiate the parser\n parser = argparse.ArgumentParser()\n\n # Add arguments to be parsed\n parser.add_argument(\n \"--input\",\n help=\"The file name of the repositories data.\",\n default=\"../collect_variables/results/all_variables.json\")\n parser.add_argument(\"--output\",\n help=\"The file name to export the data to.\",\n default=\"results/download_stats.csv\")\n\n # Read arguments from the command line\n args = parser.parse_args()\n\n # d = requests.get(args.input)\n\n with open(args.input) as f: # pylint: disable=unspecified-encoding\n\n result = []\n\n # for line in d.text.splitlines():\n for line in f.readlines():\n\n repo = json.loads(line)\n name = repo[\"repository_name\"]\n\n try:\n readme = repo[\"readme\"]\n except KeyError:\n continue\n\n # python\n matches = re.finditer(r\"pip install( -+.*)* (.*?)[\\\\\\s]\", str(readme))\n\n for match in matches:\n\n if name == match.group(2):\n print(f\"Download stats for Python module '{name}'\")\n try:\n stats = requests.get(PYPI_STATS.format(name))\n print(stats.json()[\"data\"][\"last_month\"])\n result.append({\n \"repository_name\":\n name,\n \"owner\":\n repo[\"owner\"],\n \"last_month\":\n stats.json()[\"data\"][\"last_month\"],\n \"date\":\n str(datetime.date.today())\n })\n except Exception as err: # pylint: disable=broad-except\n pass\n break\n # R\n matches = re.finditer(r\"install\\.packages\\([\\\"\\'](.*?)[\\\"\\']\\)\",\n str(readme))\n\n for match in matches:\n\n if name == match.group(1):\n print(f\"Download stats for R package '{name}'\")\n try:\n stats = requests.get(CRAN_STATS.format(name))\n print(stats.json()[0][\"downloads\"])\n result.append({\n \"repository_name\":\n name,\n \"owner\":\n repo[\"owner\"],\n \"last_month\":\n stats.json()[0][\"downloads\"],\n \"date\":\n str(datetime.date.today())\n })\n\n except Exception as err: # pylint: disable=broad-except\n raise err\n break\n\n # JS\n matches = re.finditer(r\"npm (i|install)( -+.*)* (.*)\",\n str(readme))\n\n for match in matches:\n if name in match.group(3):\n print(f\"Download stats for npm package '{name}'\")\n try:\n if \"@\" in match.group(3):\n stats = requests.get(NPM_STATS.format(match.group(3)))\n else:\n stats = requests.get(NPM_STATS.format(name))\n print(stats.json()[\"downloads\"])\n result.append({\n \"repository_name\":\n name,\n \"owner\":\n repo[\"owner\"],\n \"last_month\":\n stats.json()[\"downloads\"],\n \"date\":\n str(datetime.date.today())\n })\n\n except Exception as err: # pylint: disable=broad-except\n print(\"Repository does not exist\")\n break\n\n df_stats = pd.DataFrame(result).sort_values([\"owner\", \"repository_name\"], inplace=True)\n print(df_stats)\n\n df_stats.to_csv(args.output, index=None)\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
krantiparida/beyond-image-to-depth
[ "dcdef5122fa456a92bd58ead4eea0a777158c535" ]
[ "util/util.py" ]
[ "#!/usr/bin/env python\nimport numpy as np\nimport os\n\ndef compute_errors(gt, pred):\n \"\"\"Computation of error metrics between predicted and ground truth depths\n \"\"\"\n # select only the values that are greater than zero\n mask = gt > 0\n pred = pred[mask]\n gt = gt[mask]\n\n thresh = np.maximum((gt / pred), (pred / gt))\n a1 = (thresh < 1.25 ).mean()\n a2 = (thresh < 1.25 ** 2).mean()\n a3 = (thresh < 1.25 ** 3).mean()\n\n rmse = (gt - pred) ** 2\n rmse = np.sqrt(rmse.mean())\n if rmse != rmse:\n rmse = 0.0\n if a1 != a1:\n a1=0.0\n if a2 != a2:\n a2=0.0\n if a3 != a3:\n a3=0.0\n \n abs_rel = np.mean(np.abs(gt - pred) / gt)\n log_10 = (np.abs(np.log10(gt)-np.log10(pred))).mean()\n mae = (np.abs(gt-pred)).mean()\n if abs_rel != abs_rel:\n abs_rel=0.0\n if log_10 != log_10:\n log_10=0.0\n if mae != mae:\n mae=0.0\n \n return abs_rel, rmse, a1, a2, a3, log_10, mae\n\nclass TextWrite(object):\n ''' Wrting the values to a text file \n '''\n def __init__(self, filename):\n self.filename = filename\n self.file = open(self.filename, \"w+\")\n self.file.close()\n self.str_write = ''\n \n def add_line_csv(self, data_list):\n str_tmp = []\n for item in data_list:\n if isinstance(item, int):\n str_tmp.append(\"{:03d}\".format(item))\n if isinstance(item, str):\n str_tmp.append(item)\n if isinstance(item, float):\n str_tmp.append(\"{:.6f}\".format(item))\n \n self.str_write = \",\".join(str_tmp) + \"\\n\"\n \n def add_line_txt(self, content, size=None, maxLength = 10, heading=False):\n if size == None:\n size = [1 for i in range(len(content))]\n if heading: \n str_tmp = '|'.join(list(map(lambda x,s:x.center((s*maxLength)+(s-1)), content, size)))\n else:\n str_tmp = '|'.join(list(map(lambda x,s:x.rjust((s*maxLength)+(s-1)), content, size)))\n self.str_write += str_tmp + \"\\n\" \n\n def write_line(self): \n self.file = open(self.filename, \"a\")\n self.file.write(self.str_write)\n self.file.close()\n self.str_write = ''\n\ndef mkdirs(paths):\n if isinstance(paths, list) and not isinstance(paths, str):\n for path in paths:\n mkdir(path)\n else:\n mkdir(paths)\n\ndef mkdir(path):\n if not os.path.exists(path):\n os.makedirs(path)" ]
[ [ "numpy.log10", "numpy.maximum", "numpy.abs" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lucidrains/ddpm-proteins
[ "88bfacbd3cbdc4e38585fab420106f56e890c5f7" ]
[ "ddpm_proteins/utils.py" ]
[ "import os\nfrom PIL import Image\nimport seaborn as sn\nimport matplotlib.pyplot as plt\n\nimport torch\nimport torch.nn.functional as F\n\nfrom sidechainnet.utils.sequence import ProteinVocabulary\nfrom einops import rearrange\n\n# general functions\n\ndef exists(val):\n return val is not None\n\ndef default(val, d):\n return val if exists(val) else d\n\ndef broadcat(tensors, dim = -1):\n num_tensors = len(tensors)\n shape_lens = set(list(map(lambda t: len(t.shape), tensors)))\n assert len(shape_lens) == 1, 'tensors must all have the same number of dimensions'\n shape_len = list(shape_lens)[0]\n\n dim = (dim + shape_len) if dim < 0 else dim\n dims = list(zip(*map(lambda t: list(t.shape), tensors)))\n\n expandable_dims = [(i, val) for i, val in enumerate(dims) if i != dim]\n assert all([*map(lambda t: len(set(t[1])) <= 2, expandable_dims)]), 'invalid dimensions for broadcastable concatentation'\n max_dims = list(map(lambda t: (t[0], max(t[1])), expandable_dims))\n expanded_dims = list(map(lambda t: (t[0], (t[1],) * num_tensors), max_dims))\n expanded_dims.insert(dim, (dim, dims[dim]))\n expandable_shapes = list(zip(*map(lambda t: t[1], expanded_dims)))\n tensors = list(map(lambda t: t[0].expand(*t[1]), zip(tensors, expandable_shapes)))\n return torch.cat(tensors, dim = dim)\n\n# singleton msa transformer\n\nmsa_instances = None\n\ndef get_msa_transformer():\n global msa_instances\n if not exists(msa_instances):\n msa_model, alphabet = torch.hub.load(\"facebookresearch/esm\", \"esm_msa1_t12_100M_UR50S\")\n batch_converter = alphabet.get_batch_converter()\n return msa_model, batch_converter\n return msa_instances\n\n# MSA embedding related functions\n\nVOCAB = ProteinVocabulary()\n\ndef ids_to_aa_str(x):\n assert isinstance(x, list), 'input must be a list'\n id2aa = VOCAB._int2char\n is_char = lambda c: isinstance(c, str) and len(c) == 1\n out = []\n\n for el in x:\n if isinstance(el, list):\n out.append(ids_to_aa_str(el))\n elif isinstance(el, int):\n out.append(id2aa[el])\n else:\n raise TypeError('type must be either list or character')\n\n if all(map(is_char, out)):\n return ''.join(out)\n\n return out\n\ndef aa_str_to_embed_input(x):\n assert isinstance(x, list), 'input must be a list'\n out = []\n\n for el in x:\n if isinstance(el, list):\n out.append(aa_str_to_embed_input(el))\n elif isinstance(el, str):\n out.append((None, el))\n else:\n raise TypeError('type must be either list or string')\n\n return out\n\ndef apc(x):\n a1 = x.sum(-1, keepdims=True)\n a2 = x.sum(-2, keepdims=True)\n a12 = x.sum((-1, -2), keepdims=True)\n avg = a1 * a2\n avg.div_(a12)\n normalized = x - avg\n return normalized\n\ndef symmetrize(x):\n return x + x.transpose(-1, -2)\n\ndef pad_image_to(tensor, size, value = 0.):\n remainder = size - tensor.shape[-1]\n tensor = F.pad(tensor, (0, remainder, 0, remainder), value = value)\n return tensor\n\n# getting a single MSA attention embedding, with caching\n\nCACHE_PATH = default(os.getenv('CACHE_PATH'), os.path.expanduser('~/.cache.ddpm-proteins'))\nFETCH_FROM_CACHE = not exists(os.getenv('CLEAR_CACHE'))\n\nos.makedirs(CACHE_PATH, exist_ok = True)\n\[email protected]_grad()\ndef get_msa_attention_embedding(\n model,\n batch_converter,\n aa_str,\n id,\n fetch_msas_fn = lambda t: [],\n cache = True\n):\n device = next(model.parameters()).device\n\n cache_full_path = os.path.join(CACHE_PATH, f'{id}.pt')\n if cache and FETCH_FROM_CACHE and os.path.exists(cache_full_path):\n try:\n loaded = torch.load(cache_full_path).to(device)\n except:\n loaded = None\n\n if exists(loaded):\n return loaded\n\n msas = default(fetch_msas_fn(aa_str), [])\n seq_with_msas = [aa_str, *msas]\n\n embed_inputs = aa_str_to_embed_input(seq_with_msas)\n _, _, msa_batch_tokens = batch_converter(embed_inputs)\n\n results = model(msa_batch_tokens.to(device), need_head_weights = True)\n\n attentions = results['row_attentions']\n attentions = attentions[..., 1:, 1:]\n attentions = rearrange(attentions, 'b l h m n -> b (l h) m n')\n attentions = apc(symmetrize(attentions))\n\n if cache:\n print(f'caching to {cache_full_path}')\n torch.save(attentions, cache_full_path)\n\n return attentions\n\ndef get_msa_attention_embeddings(\n model,\n batch_converter,\n seqs,\n ids,\n fetch_msas_fn = lambda t: [],\n cache = True\n):\n n = seqs.shape[1]\n seqs = rearrange(seqs, 'b n -> b () n')\n aa_strs = ids_to_aa_str(seqs.cpu().tolist())\n embeds_list = [get_msa_attention_embedding(model, batch_converter, aa, seq_id, cache = cache) for aa, seq_id in zip(aa_strs, ids)]\n embeds_list = [pad_image_to(embed, n) for embed in embeds_list]\n embeds = torch.cat(embeds_list, dim = 0)\n return embeds\n\n# training utils\n\ndef cycle(loader, thres = 256):\n while True:\n for data in loader:\n if data.seqs.shape[1] <= thres:\n yield data\n\ndef save_heatmap(tensor, filepath, dpi = 200, return_image = False):\n heatmap = sn.heatmap(tensor.cpu().numpy())\n figure = heatmap.get_figure() \n figure.savefig(filepath, dpi = dpi)\n plt.clf()\n\n if not return_image:\n return\n return Image.open(filepath)\n" ]
[ [ "torch.cat", "torch.load", "matplotlib.pyplot.clf", "torch.no_grad", "torch.hub.load", "torch.nn.functional.pad", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
HQDragon/pandas
[ "8713f2d1237a471a4f42f3fb547887bc022a5b94", "8713f2d1237a471a4f42f3fb547887bc022a5b94", "8713f2d1237a471a4f42f3fb547887bc022a5b94", "8713f2d1237a471a4f42f3fb547887bc022a5b94", "8713f2d1237a471a4f42f3fb547887bc022a5b94" ]
[ "pandas/tests/indexes/test_numpy_compat.py", "pandas/core/indexes/timedeltas.py", "pandas/tests/indexes/datetimes/test_datetimelike.py", "pandas/tests/indexes/timedeltas/test_constructors.py", "pandas/tests/indexing/test_loc.py" ]
[ "import numpy as np\nimport pytest\n\nfrom pandas.compat import np_version_under1p17, np_version_under1p18\n\nfrom pandas import (\n DatetimeIndex,\n Float64Index,\n Index,\n Int64Index,\n PeriodIndex,\n TimedeltaIndex,\n UInt64Index,\n)\nimport pandas._testing as tm\nfrom pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin\n\n\[email protected](\n \"func\",\n [\n np.exp,\n np.exp2,\n np.expm1,\n np.log,\n np.log2,\n np.log10,\n np.log1p,\n np.sqrt,\n np.sin,\n np.cos,\n np.tan,\n np.arcsin,\n np.arccos,\n np.arctan,\n np.sinh,\n np.cosh,\n np.tanh,\n np.arcsinh,\n np.arccosh,\n np.arctanh,\n np.deg2rad,\n np.rad2deg,\n ],\n ids=lambda x: x.__name__,\n)\ndef test_numpy_ufuncs_basic(index, func):\n # test ufuncs of numpy, see:\n # https://numpy.org/doc/stable/reference/ufuncs.html\n\n if isinstance(index, DatetimeIndexOpsMixin):\n with tm.external_error_raised((TypeError, AttributeError)):\n with np.errstate(all=\"ignore\"):\n func(index)\n elif isinstance(index, (Float64Index, Int64Index, UInt64Index)):\n # coerces to float (e.g. np.sin)\n with np.errstate(all=\"ignore\"):\n result = func(index)\n exp = Index(func(index.values), name=index.name)\n\n tm.assert_index_equal(result, exp)\n assert isinstance(result, Float64Index)\n else:\n # raise AttributeError or TypeError\n if len(index) == 0:\n pass\n else:\n with tm.external_error_raised((TypeError, AttributeError)):\n with np.errstate(all=\"ignore\"):\n func(index)\n\n\[email protected](\n \"func\", [np.isfinite, np.isinf, np.isnan, np.signbit], ids=lambda x: x.__name__\n)\ndef test_numpy_ufuncs_other(index, func, request):\n # test ufuncs of numpy, see:\n # https://numpy.org/doc/stable/reference/ufuncs.html\n if isinstance(index, (DatetimeIndex, TimedeltaIndex)):\n if isinstance(index, DatetimeIndex) and index.tz is not None:\n if func in [np.isfinite, np.isnan, np.isinf]:\n if not np_version_under1p17:\n mark = pytest.mark.xfail(reason=\"__array_ufunc__ is not defined\")\n request.node.add_marker(mark)\n\n if not np_version_under1p18 and func in [np.isfinite, np.isinf, np.isnan]:\n # numpy 1.18(dev) changed isinf and isnan to not raise on dt64/tfd64\n result = func(index)\n assert isinstance(result, np.ndarray)\n\n elif not np_version_under1p17 and func in [np.isfinite]:\n # ok under numpy >= 1.17\n # Results in bool array\n result = func(index)\n assert isinstance(result, np.ndarray)\n else:\n with tm.external_error_raised(TypeError):\n func(index)\n\n elif isinstance(index, PeriodIndex):\n with tm.external_error_raised(TypeError):\n func(index)\n\n elif isinstance(index, (Float64Index, Int64Index, UInt64Index)):\n # Results in bool array\n result = func(index)\n assert isinstance(result, np.ndarray)\n assert not isinstance(result, Index)\n else:\n if len(index) == 0:\n pass\n else:\n with tm.external_error_raised(TypeError):\n func(index)\n", "\"\"\" implement the TimedeltaIndex \"\"\"\n\nfrom pandas._libs import index as libindex, lib\nfrom pandas._libs.tslibs import Timedelta, to_offset\nfrom pandas._typing import DtypeObj, Optional\nfrom pandas.errors import InvalidIndexError\n\nfrom pandas.core.dtypes.common import TD64NS_DTYPE, is_scalar, is_timedelta64_dtype\n\nfrom pandas.core.arrays import datetimelike as dtl\nfrom pandas.core.arrays.timedeltas import TimedeltaArray\nimport pandas.core.common as com\nfrom pandas.core.indexes.base import Index, maybe_extract_name\nfrom pandas.core.indexes.datetimelike import DatetimeTimedeltaMixin\nfrom pandas.core.indexes.extension import inherit_names\n\n\n@inherit_names(\n [\"__neg__\", \"__pos__\", \"__abs__\", \"total_seconds\", \"round\", \"floor\", \"ceil\"]\n + TimedeltaArray._field_ops,\n TimedeltaArray,\n wrap=True,\n)\n@inherit_names(\n [\n \"_bool_ops\",\n \"_object_ops\",\n \"_field_ops\",\n \"_datetimelike_ops\",\n \"_datetimelike_methods\",\n \"_other_ops\",\n \"components\",\n \"to_pytimedelta\",\n \"sum\",\n \"std\",\n \"median\",\n \"_format_native_types\",\n ],\n TimedeltaArray,\n)\nclass TimedeltaIndex(DatetimeTimedeltaMixin):\n \"\"\"\n Immutable ndarray of timedelta64 data, represented internally as int64, and\n which can be boxed to timedelta objects.\n\n Parameters\n ----------\n data : array-like (1-dimensional), optional\n Optional timedelta-like data to construct index with.\n unit : unit of the arg (D,h,m,s,ms,us,ns) denote the unit, optional\n Which is an integer/float number.\n freq : str or pandas offset object, optional\n One of pandas date offset strings or corresponding objects. The string\n 'infer' can be passed in order to set the frequency of the index as the\n inferred frequency upon creation.\n copy : bool\n Make a copy of input ndarray.\n name : object\n Name to be stored in the index.\n\n Attributes\n ----------\n days\n seconds\n microseconds\n nanoseconds\n components\n inferred_freq\n\n Methods\n -------\n to_pytimedelta\n to_series\n round\n floor\n ceil\n to_frame\n mean\n\n See Also\n --------\n Index : The base pandas Index type.\n Timedelta : Represents a duration between two dates or times.\n DatetimeIndex : Index of datetime64 data.\n PeriodIndex : Index of Period data.\n timedelta_range : Create a fixed-frequency TimedeltaIndex.\n\n Notes\n -----\n To learn more about the frequency strings, please see `this link\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.\n \"\"\"\n\n _typ = \"timedeltaindex\"\n\n _data_cls = TimedeltaArray\n _engine_type = libindex.TimedeltaEngine\n\n _comparables = [\"name\", \"freq\"]\n _attributes = [\"name\", \"freq\"]\n _is_numeric_dtype = False\n\n _data: TimedeltaArray\n\n # -------------------------------------------------------------------\n # Constructors\n\n def __new__(\n cls,\n data=None,\n unit=None,\n freq=lib.no_default,\n closed=None,\n dtype=TD64NS_DTYPE,\n copy=False,\n name=None,\n ):\n name = maybe_extract_name(name, data, cls)\n\n if is_scalar(data):\n raise TypeError(\n f\"{cls.__name__}() must be called with a \"\n f\"collection of some kind, {repr(data)} was passed\"\n )\n\n if unit in {\"Y\", \"y\", \"M\"}:\n raise ValueError(\n \"Units 'M', 'Y', and 'y' are no longer supported, as they do not \"\n \"represent unambiguous timedelta values durations.\"\n )\n\n if isinstance(data, TimedeltaArray) and freq is lib.no_default:\n if copy:\n data = data.copy()\n return cls._simple_new(data, name=name)\n\n if isinstance(data, TimedeltaIndex) and freq is lib.no_default and name is None:\n if copy:\n return data.copy()\n else:\n return data._view()\n\n # - Cases checked above all return/raise before reaching here - #\n\n tdarr = TimedeltaArray._from_sequence_not_strict(\n data, freq=freq, unit=unit, dtype=dtype, copy=copy\n )\n return cls._simple_new(tdarr, name=name)\n\n # -------------------------------------------------------------------\n\n def _is_comparable_dtype(self, dtype: DtypeObj) -> bool:\n \"\"\"\n Can we compare values of the given dtype to our own?\n \"\"\"\n return is_timedelta64_dtype(dtype)\n\n # -------------------------------------------------------------------\n # Indexing Methods\n\n def get_loc(self, key, method=None, tolerance=None):\n \"\"\"\n Get integer location for requested label\n\n Returns\n -------\n loc : int, slice, or ndarray[int]\n \"\"\"\n if not is_scalar(key):\n raise InvalidIndexError(key)\n\n try:\n key = self._data._validate_scalar(key, unbox=False)\n except TypeError as err:\n raise KeyError(key) from err\n\n return Index.get_loc(self, key, method, tolerance)\n\n def _maybe_cast_slice_bound(self, label, side: str, kind):\n \"\"\"\n If label is a string, cast it to timedelta according to resolution.\n\n Parameters\n ----------\n label : object\n side : {'left', 'right'}\n kind : {'loc', 'getitem'} or None\n\n Returns\n -------\n label : object\n \"\"\"\n assert kind in [\"loc\", \"getitem\", None]\n\n if isinstance(label, str):\n parsed = Timedelta(label)\n lbound = parsed.round(parsed.resolution_string)\n if side == \"left\":\n return lbound\n else:\n return lbound + to_offset(parsed.resolution_string) - Timedelta(1, \"ns\")\n elif not isinstance(label, self._data._recognized_scalars):\n raise self._invalid_indexer(\"slice\", label)\n\n return label\n\n # -------------------------------------------------------------------\n\n @property\n def inferred_type(self) -> str:\n return \"timedelta64\"\n\n\ndef timedelta_range(\n start=None,\n end=None,\n periods: Optional[int] = None,\n freq=None,\n name=None,\n closed=None,\n) -> TimedeltaIndex:\n \"\"\"\n Return a fixed frequency TimedeltaIndex, with day as the default\n frequency.\n\n Parameters\n ----------\n start : str or timedelta-like, default None\n Left bound for generating timedeltas.\n end : str or timedelta-like, default None\n Right bound for generating timedeltas.\n periods : int, default None\n Number of periods to generate.\n freq : str or DateOffset, default 'D'\n Frequency strings can have multiples, e.g. '5H'.\n name : str, default None\n Name of the resulting TimedeltaIndex.\n closed : str, default None\n Make the interval closed with respect to the given frequency to\n the 'left', 'right', or both sides (None).\n\n Returns\n -------\n TimedeltaIndex\n\n Notes\n -----\n Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,\n exactly three must be specified. If ``freq`` is omitted, the resulting\n ``TimedeltaIndex`` will have ``periods`` linearly spaced elements between\n ``start`` and ``end`` (closed on both sides).\n\n To learn more about the frequency strings, please see `this link\n <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.\n\n Examples\n --------\n >>> pd.timedelta_range(start='1 day', periods=4)\n TimedeltaIndex(['1 days', '2 days', '3 days', '4 days'],\n dtype='timedelta64[ns]', freq='D')\n\n The ``closed`` parameter specifies which endpoint is included. The default\n behavior is to include both endpoints.\n\n >>> pd.timedelta_range(start='1 day', periods=4, closed='right')\n TimedeltaIndex(['2 days', '3 days', '4 days'],\n dtype='timedelta64[ns]', freq='D')\n\n The ``freq`` parameter specifies the frequency of the TimedeltaIndex.\n Only fixed frequencies can be passed, non-fixed frequencies such as\n 'M' (month end) will raise.\n\n >>> pd.timedelta_range(start='1 day', end='2 days', freq='6H')\n TimedeltaIndex(['1 days 00:00:00', '1 days 06:00:00', '1 days 12:00:00',\n '1 days 18:00:00', '2 days 00:00:00'],\n dtype='timedelta64[ns]', freq='6H')\n\n Specify ``start``, ``end``, and ``periods``; the frequency is generated\n automatically (linearly spaced).\n\n >>> pd.timedelta_range(start='1 day', end='5 days', periods=4)\n TimedeltaIndex(['1 days 00:00:00', '2 days 08:00:00', '3 days 16:00:00',\n '5 days 00:00:00'],\n dtype='timedelta64[ns]', freq=None)\n \"\"\"\n if freq is None and com.any_none(periods, start, end):\n freq = \"D\"\n\n freq, _ = dtl.maybe_infer_freq(freq)\n tdarr = TimedeltaArray._generate_range(start, end, periods, freq, closed=closed)\n return TimedeltaIndex._simple_new(tdarr, name=name)\n", "\"\"\" generic tests from the Datetimelike class \"\"\"\nimport pytest\n\nfrom pandas import DatetimeIndex, date_range\nimport pandas._testing as tm\nfrom pandas.tests.indexes.datetimelike import DatetimeLike\n\n\nclass TestDatetimeIndex(DatetimeLike):\n _holder = DatetimeIndex\n\n @pytest.fixture(\n params=[tm.makeDateIndex(10), date_range(\"20130110\", periods=10, freq=\"-1D\")],\n ids=[\"index_inc\", \"index_dec\"],\n )\n def index(self, request):\n return request.param\n\n def create_index(self) -> DatetimeIndex:\n return date_range(\"20130101\", periods=5)\n\n def test_format(self):\n # GH35439\n idx = self.create_index()\n expected = [f\"{x:%Y-%m-%d}\" for x in idx]\n assert idx.format() == expected\n\n def test_shift(self):\n pass # handled in test_ops\n\n def test_pickle_compat_construction(self):\n pass\n\n def test_intersection(self):\n pass # handled in test_setops\n\n def test_union(self):\n pass # handled in test_setops\n", "from datetime import timedelta\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import Timedelta, TimedeltaIndex, timedelta_range, to_timedelta\nimport pandas._testing as tm\nfrom pandas.core.arrays.timedeltas import TimedeltaArray, sequence_to_td64ns\n\n\nclass TestTimedeltaIndex:\n def test_array_of_dt64_nat_raises(self):\n # GH#39462\n nat = np.datetime64(\"NaT\", \"ns\")\n arr = np.array([nat], dtype=object)\n\n # TODO: should be TypeError?\n msg = \"Invalid type for timedelta scalar\"\n with pytest.raises(ValueError, match=msg):\n TimedeltaIndex(arr)\n\n with pytest.raises(ValueError, match=msg):\n TimedeltaArray._from_sequence(arr)\n\n with pytest.raises(ValueError, match=msg):\n sequence_to_td64ns(arr)\n\n @pytest.mark.parametrize(\"unit\", [\"Y\", \"y\", \"M\"])\n def test_unit_m_y_raises(self, unit):\n msg = \"Units 'M', 'Y', and 'y' are no longer supported\"\n with pytest.raises(ValueError, match=msg):\n TimedeltaIndex([1, 3, 7], unit)\n\n def test_int64_nocopy(self):\n # GH#23539 check that a copy isn't made when we pass int64 data\n # and copy=False\n arr = np.arange(10, dtype=np.int64)\n tdi = TimedeltaIndex(arr, copy=False)\n assert tdi._data._data.base is arr\n\n def test_infer_from_tdi(self):\n # GH#23539\n # fast-path for inferring a frequency if the passed data already\n # has one\n tdi = timedelta_range(\"1 second\", periods=10 ** 7, freq=\"1s\")\n\n result = TimedeltaIndex(tdi, freq=\"infer\")\n assert result.freq == tdi.freq\n\n # check that inferred_freq was not called by checking that the\n # value has not been cached\n assert \"inferred_freq\" not in getattr(result, \"_cache\", {})\n\n def test_infer_from_tdi_mismatch(self):\n # GH#23539\n # fast-path for invalidating a frequency if the passed data already\n # has one and it does not match the `freq` input\n tdi = timedelta_range(\"1 second\", periods=100, freq=\"1s\")\n\n msg = (\n \"Inferred frequency .* from passed values does \"\n \"not conform to passed frequency\"\n )\n with pytest.raises(ValueError, match=msg):\n TimedeltaIndex(tdi, freq=\"D\")\n\n with pytest.raises(ValueError, match=msg):\n # GH#23789\n TimedeltaArray(tdi, freq=\"D\")\n\n with pytest.raises(ValueError, match=msg):\n TimedeltaIndex(tdi._data, freq=\"D\")\n\n with pytest.raises(ValueError, match=msg):\n TimedeltaArray(tdi._data, freq=\"D\")\n\n def test_dt64_data_invalid(self):\n # GH#23539\n # passing tz-aware DatetimeIndex raises, naive or ndarray[datetime64]\n # raise as of GH#29794\n dti = pd.date_range(\"2016-01-01\", periods=3)\n\n msg = \"cannot be converted to timedelta64\"\n with pytest.raises(TypeError, match=msg):\n TimedeltaIndex(dti.tz_localize(\"Europe/Brussels\"))\n\n with pytest.raises(TypeError, match=msg):\n TimedeltaIndex(dti)\n\n with pytest.raises(TypeError, match=msg):\n TimedeltaIndex(np.asarray(dti))\n\n def test_float64_ns_rounded(self):\n # GH#23539 without specifying a unit, floats are regarded as nanos,\n # and fractional portions are truncated\n tdi = TimedeltaIndex([2.3, 9.7])\n expected = TimedeltaIndex([2, 9])\n tm.assert_index_equal(tdi, expected)\n\n # integral floats are non-lossy\n tdi = TimedeltaIndex([2.0, 9.0])\n expected = TimedeltaIndex([2, 9])\n tm.assert_index_equal(tdi, expected)\n\n # NaNs get converted to NaT\n tdi = TimedeltaIndex([2.0, np.nan])\n expected = TimedeltaIndex([Timedelta(nanoseconds=2), pd.NaT])\n tm.assert_index_equal(tdi, expected)\n\n def test_float64_unit_conversion(self):\n # GH#23539\n tdi = TimedeltaIndex([1.5, 2.25], unit=\"D\")\n expected = TimedeltaIndex([Timedelta(days=1.5), Timedelta(days=2.25)])\n tm.assert_index_equal(tdi, expected)\n\n def test_construction_base_constructor(self):\n arr = [Timedelta(\"1 days\"), pd.NaT, Timedelta(\"3 days\")]\n tm.assert_index_equal(pd.Index(arr), TimedeltaIndex(arr))\n tm.assert_index_equal(pd.Index(np.array(arr)), TimedeltaIndex(np.array(arr)))\n\n arr = [np.nan, pd.NaT, Timedelta(\"1 days\")]\n tm.assert_index_equal(pd.Index(arr), TimedeltaIndex(arr))\n tm.assert_index_equal(pd.Index(np.array(arr)), TimedeltaIndex(np.array(arr)))\n\n def test_constructor(self):\n expected = TimedeltaIndex(\n [\n \"1 days\",\n \"1 days 00:00:05\",\n \"2 days\",\n \"2 days 00:00:02\",\n \"0 days 00:00:03\",\n ]\n )\n result = TimedeltaIndex(\n [\n \"1 days\",\n \"1 days, 00:00:05\",\n np.timedelta64(2, \"D\"),\n timedelta(days=2, seconds=2),\n pd.offsets.Second(3),\n ]\n )\n tm.assert_index_equal(result, expected)\n\n # unicode\n result = TimedeltaIndex(\n [\n \"1 days\",\n \"1 days, 00:00:05\",\n np.timedelta64(2, \"D\"),\n timedelta(days=2, seconds=2),\n pd.offsets.Second(3),\n ]\n )\n\n expected = TimedeltaIndex(\n [\"0 days 00:00:00\", \"0 days 00:00:01\", \"0 days 00:00:02\"]\n )\n tm.assert_index_equal(TimedeltaIndex(range(3), unit=\"s\"), expected)\n expected = TimedeltaIndex(\n [\"0 days 00:00:00\", \"0 days 00:00:05\", \"0 days 00:00:09\"]\n )\n tm.assert_index_equal(TimedeltaIndex([0, 5, 9], unit=\"s\"), expected)\n expected = TimedeltaIndex(\n [\"0 days 00:00:00.400\", \"0 days 00:00:00.450\", \"0 days 00:00:01.200\"]\n )\n tm.assert_index_equal(TimedeltaIndex([400, 450, 1200], unit=\"ms\"), expected)\n\n def test_constructor_iso(self):\n # GH #21877\n expected = timedelta_range(\"1s\", periods=9, freq=\"s\")\n durations = [f\"P0DT0H0M{i}S\" for i in range(1, 10)]\n result = to_timedelta(durations)\n tm.assert_index_equal(result, expected)\n\n def test_constructor_coverage(self):\n rng = timedelta_range(\"1 days\", periods=10.5)\n exp = timedelta_range(\"1 days\", periods=10)\n tm.assert_index_equal(rng, exp)\n\n msg = \"periods must be a number, got foo\"\n with pytest.raises(TypeError, match=msg):\n timedelta_range(start=\"1 days\", periods=\"foo\", freq=\"D\")\n\n msg = (\n r\"TimedeltaIndex\\(\\) must be called with a collection of some kind, \"\n \"'1 days' was passed\"\n )\n with pytest.raises(TypeError, match=msg):\n TimedeltaIndex(\"1 days\")\n\n # generator expression\n gen = (timedelta(i) for i in range(10))\n result = TimedeltaIndex(gen)\n expected = TimedeltaIndex([timedelta(i) for i in range(10)])\n tm.assert_index_equal(result, expected)\n\n # NumPy string array\n strings = np.array([\"1 days\", \"2 days\", \"3 days\"])\n result = TimedeltaIndex(strings)\n expected = to_timedelta([1, 2, 3], unit=\"d\")\n tm.assert_index_equal(result, expected)\n\n from_ints = TimedeltaIndex(expected.asi8)\n tm.assert_index_equal(from_ints, expected)\n\n # non-conforming freq\n msg = (\n \"Inferred frequency None from passed values does not conform to \"\n \"passed frequency D\"\n )\n with pytest.raises(ValueError, match=msg):\n TimedeltaIndex([\"1 days\", \"2 days\", \"4 days\"], freq=\"D\")\n\n msg = (\n \"Of the four parameters: start, end, periods, and freq, exactly \"\n \"three must be specified\"\n )\n with pytest.raises(ValueError, match=msg):\n timedelta_range(periods=10, freq=\"D\")\n\n def test_constructor_name(self):\n idx = timedelta_range(start=\"1 days\", periods=1, freq=\"D\", name=\"TEST\")\n assert idx.name == \"TEST\"\n\n # GH10025\n idx2 = TimedeltaIndex(idx, name=\"something else\")\n assert idx2.name == \"something else\"\n\n def test_constructor_no_precision_raises(self):\n # GH-24753, GH-24739\n\n msg = \"with no precision is not allowed\"\n with pytest.raises(ValueError, match=msg):\n TimedeltaIndex([\"2000\"], dtype=\"timedelta64\")\n\n with pytest.raises(ValueError, match=msg):\n pd.Index([\"2000\"], dtype=\"timedelta64\")\n\n def test_constructor_wrong_precision_raises(self):\n msg = r\"dtype timedelta64\\[us\\] cannot be converted to timedelta64\\[ns\\]\"\n with pytest.raises(ValueError, match=msg):\n TimedeltaIndex([\"2000\"], dtype=\"timedelta64[us]\")\n\n def test_explicit_none_freq(self):\n # Explicitly passing freq=None is respected\n tdi = timedelta_range(1, periods=5)\n assert tdi.freq is not None\n\n result = TimedeltaIndex(tdi, freq=None)\n assert result.freq is None\n\n result = TimedeltaIndex(tdi._data, freq=None)\n assert result.freq is None\n\n def test_from_categorical(self):\n tdi = timedelta_range(1, periods=5)\n\n cat = pd.Categorical(tdi)\n\n result = TimedeltaIndex(cat)\n tm.assert_index_equal(result, tdi)\n\n ci = pd.CategoricalIndex(tdi)\n result = TimedeltaIndex(ci)\n tm.assert_index_equal(result, tdi)\n", "\"\"\" test label based indexing with loc \"\"\"\nfrom datetime import datetime, time, timedelta\nfrom io import StringIO\nimport re\n\nfrom dateutil.tz import gettz\nimport numpy as np\nimport pytest\n\nimport pandas.util._test_decorators as td\n\nimport pandas as pd\nfrom pandas import (\n Categorical,\n CategoricalIndex,\n DataFrame,\n DatetimeIndex,\n Index,\n IndexSlice,\n MultiIndex,\n Series,\n SparseDtype,\n Timedelta,\n Timestamp,\n date_range,\n timedelta_range,\n to_datetime,\n to_timedelta,\n)\nimport pandas._testing as tm\nfrom pandas.api.types import is_scalar\nfrom pandas.tests.indexing.common import Base\n\n\nclass TestLoc(Base):\n def test_loc_getitem_int(self):\n\n # int label\n self.check_result(\"loc\", 2, typs=[\"labels\"], fails=KeyError)\n\n def test_loc_getitem_label(self):\n\n # label\n self.check_result(\"loc\", \"c\", typs=[\"empty\"], fails=KeyError)\n\n def test_loc_getitem_label_out_of_range(self):\n\n # out of range label\n self.check_result(\n \"loc\", \"f\", typs=[\"ints\", \"uints\", \"labels\", \"mixed\", \"ts\"], fails=KeyError\n )\n self.check_result(\"loc\", \"f\", typs=[\"floats\"], fails=KeyError)\n self.check_result(\"loc\", \"f\", typs=[\"floats\"], fails=KeyError)\n self.check_result(\"loc\", 20, typs=[\"ints\", \"uints\", \"mixed\"], fails=KeyError)\n self.check_result(\"loc\", 20, typs=[\"labels\"], fails=KeyError)\n self.check_result(\"loc\", 20, typs=[\"ts\"], axes=0, fails=KeyError)\n self.check_result(\"loc\", 20, typs=[\"floats\"], axes=0, fails=KeyError)\n\n def test_loc_getitem_label_list(self):\n # list of labels\n self.check_result(\n \"loc\", [0, 1, 2], typs=[\"ints\", \"uints\", \"floats\"], fails=KeyError\n )\n self.check_result(\n \"loc\", [1, 3.0, \"A\"], typs=[\"ints\", \"uints\", \"floats\"], fails=KeyError\n )\n\n def test_loc_getitem_label_list_with_missing(self):\n self.check_result(\"loc\", [0, 1, 2], typs=[\"empty\"], fails=KeyError)\n self.check_result(\n \"loc\", [0, 2, 10], typs=[\"ints\", \"uints\", \"floats\"], axes=0, fails=KeyError\n )\n\n self.check_result(\n \"loc\", [3, 6, 7], typs=[\"ints\", \"uints\", \"floats\"], axes=1, fails=KeyError\n )\n\n # GH 17758 - MultiIndex and missing keys\n self.check_result(\n \"loc\", [(1, 3), (1, 4), (2, 5)], typs=[\"multi\"], axes=0, fails=KeyError\n )\n\n def test_loc_getitem_label_list_fails(self):\n # fails\n self.check_result(\n \"loc\", [20, 30, 40], typs=[\"ints\", \"uints\"], axes=1, fails=KeyError\n )\n\n def test_loc_getitem_label_array_like(self):\n # TODO: test something?\n # array like\n pass\n\n def test_loc_getitem_bool(self):\n # boolean indexers\n b = [True, False, True, False]\n\n self.check_result(\"loc\", b, typs=[\"empty\"], fails=IndexError)\n\n def test_loc_getitem_label_slice(self):\n\n # label slices (with ints)\n\n # real label slices\n\n # GH 14316\n\n self.check_result(\n \"loc\",\n slice(1, 3),\n typs=[\"labels\", \"mixed\", \"empty\", \"ts\", \"floats\"],\n fails=TypeError,\n )\n\n self.check_result(\n \"loc\", slice(\"20130102\", \"20130104\"), typs=[\"ts\"], axes=1, fails=TypeError\n )\n\n self.check_result(\"loc\", slice(2, 8), typs=[\"mixed\"], axes=0, fails=TypeError)\n self.check_result(\"loc\", slice(2, 8), typs=[\"mixed\"], axes=1, fails=KeyError)\n\n self.check_result(\n \"loc\", slice(2, 4, 2), typs=[\"mixed\"], axes=0, fails=TypeError\n )\n\n def test_setitem_from_duplicate_axis(self):\n # GH#34034\n df = DataFrame(\n [[20, \"a\"], [200, \"a\"], [200, \"a\"]],\n columns=[\"col1\", \"col2\"],\n index=[10, 1, 1],\n )\n df.loc[1, \"col1\"] = np.arange(2)\n expected = DataFrame(\n [[20, \"a\"], [0, \"a\"], [1, \"a\"]], columns=[\"col1\", \"col2\"], index=[10, 1, 1]\n )\n tm.assert_frame_equal(df, expected)\n\n\nclass TestLoc2:\n # TODO: better name, just separating out things that rely on base class\n\n def test_loc_getitem_missing_unicode_key(self):\n df = DataFrame({\"a\": [1]})\n with pytest.raises(KeyError, match=\"\\u05d0\"):\n df.loc[:, \"\\u05d0\"] # should not raise UnicodeEncodeError\n\n def test_loc_getitem_dups(self):\n # GH 5678\n # repeated getitems on a dup index returning a ndarray\n df = DataFrame(\n np.random.random_sample((20, 5)), index=[\"ABCDE\"[x % 5] for x in range(20)]\n )\n expected = df.loc[\"A\", 0]\n result = df.loc[:, 0].loc[\"A\"]\n tm.assert_series_equal(result, expected)\n\n def test_loc_getitem_dups2(self):\n\n # GH4726\n # dup indexing with iloc/loc\n df = DataFrame(\n [[1, 2, \"foo\", \"bar\", Timestamp(\"20130101\")]],\n columns=[\"a\", \"a\", \"a\", \"a\", \"a\"],\n index=[1],\n )\n expected = Series(\n [1, 2, \"foo\", \"bar\", Timestamp(\"20130101\")],\n index=[\"a\", \"a\", \"a\", \"a\", \"a\"],\n name=1,\n )\n\n result = df.iloc[0]\n tm.assert_series_equal(result, expected)\n\n result = df.loc[1]\n tm.assert_series_equal(result, expected)\n\n def test_loc_setitem_dups(self):\n\n # GH 6541\n df_orig = DataFrame(\n {\n \"me\": list(\"rttti\"),\n \"foo\": list(\"aaade\"),\n \"bar\": np.arange(5, dtype=\"float64\") * 1.34 + 2,\n \"bar2\": np.arange(5, dtype=\"float64\") * -0.34 + 2,\n }\n ).set_index(\"me\")\n\n indexer = (\n \"r\",\n [\"bar\", \"bar2\"],\n )\n df = df_orig.copy()\n df.loc[indexer] *= 2.0\n tm.assert_series_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])\n\n indexer = (\n \"r\",\n \"bar\",\n )\n df = df_orig.copy()\n df.loc[indexer] *= 2.0\n assert df.loc[indexer] == 2.0 * df_orig.loc[indexer]\n\n indexer = (\n \"t\",\n [\"bar\", \"bar2\"],\n )\n df = df_orig.copy()\n df.loc[indexer] *= 2.0\n tm.assert_frame_equal(df.loc[indexer], 2.0 * df_orig.loc[indexer])\n\n def test_loc_setitem_slice(self):\n # GH10503\n\n # assigning the same type should not change the type\n df1 = DataFrame({\"a\": [0, 1, 1], \"b\": Series([100, 200, 300], dtype=\"uint32\")})\n ix = df1[\"a\"] == 1\n newb1 = df1.loc[ix, \"b\"] + 1\n df1.loc[ix, \"b\"] = newb1\n expected = DataFrame(\n {\"a\": [0, 1, 1], \"b\": Series([100, 201, 301], dtype=\"uint32\")}\n )\n tm.assert_frame_equal(df1, expected)\n\n # assigning a new type should get the inferred type\n df2 = DataFrame({\"a\": [0, 1, 1], \"b\": [100, 200, 300]}, dtype=\"uint64\")\n ix = df1[\"a\"] == 1\n newb2 = df2.loc[ix, \"b\"]\n df1.loc[ix, \"b\"] = newb2\n expected = DataFrame({\"a\": [0, 1, 1], \"b\": [100, 200, 300]}, dtype=\"uint64\")\n tm.assert_frame_equal(df2, expected)\n\n def test_loc_setitem_dtype(self):\n # GH31340\n df = DataFrame({\"id\": [\"A\"], \"a\": [1.2], \"b\": [0.0], \"c\": [-2.5]})\n cols = [\"a\", \"b\", \"c\"]\n df.loc[:, cols] = df.loc[:, cols].astype(\"float32\")\n\n expected = DataFrame(\n {\"id\": [\"A\"], \"a\": [1.2], \"b\": [0.0], \"c\": [-2.5]}, dtype=\"float32\"\n ) # id is inferred as object\n\n tm.assert_frame_equal(df, expected)\n\n def test_getitem_label_list_with_missing(self):\n s = Series(range(3), index=[\"a\", \"b\", \"c\"])\n\n # consistency\n with pytest.raises(KeyError, match=\"with any missing labels\"):\n s[[\"a\", \"d\"]]\n\n s = Series(range(3))\n with pytest.raises(KeyError, match=\"with any missing labels\"):\n s[[0, 3]]\n\n @pytest.mark.parametrize(\"index\", [[True, False], [True, False, True, False]])\n def test_loc_getitem_bool_diff_len(self, index):\n # GH26658\n s = Series([1, 2, 3])\n msg = f\"Boolean index has wrong length: {len(index)} instead of {len(s)}\"\n with pytest.raises(IndexError, match=msg):\n _ = s.loc[index]\n\n def test_loc_getitem_int_slice(self):\n # TODO: test something here?\n pass\n\n def test_loc_to_fail(self):\n\n # GH3449\n df = DataFrame(\n np.random.random((3, 3)), index=[\"a\", \"b\", \"c\"], columns=[\"e\", \"f\", \"g\"]\n )\n\n # raise a KeyError?\n msg = (\n r\"\\\"None of \\[Int64Index\\(\\[1, 2\\], dtype='int64'\\)\\] are \"\n r\"in the \\[index\\]\\\"\"\n )\n with pytest.raises(KeyError, match=msg):\n df.loc[[1, 2], [1, 2]]\n\n # GH 7496\n # loc should not fallback\n\n s = Series(dtype=object)\n s.loc[1] = 1\n s.loc[\"a\"] = 2\n\n with pytest.raises(KeyError, match=r\"^-1$\"):\n s.loc[-1]\n\n msg = (\n r\"\\\"None of \\[Int64Index\\(\\[-1, -2\\], dtype='int64'\\)\\] are \"\n r\"in the \\[index\\]\\\"\"\n )\n with pytest.raises(KeyError, match=msg):\n s.loc[[-1, -2]]\n\n msg = r\"\\\"None of \\[Index\\(\\['4'\\], dtype='object'\\)\\] are in the \\[index\\]\\\"\"\n with pytest.raises(KeyError, match=msg):\n s.loc[[\"4\"]]\n\n s.loc[-1] = 3\n with pytest.raises(KeyError, match=\"with any missing labels\"):\n s.loc[[-1, -2]]\n\n s[\"a\"] = 2\n msg = (\n r\"\\\"None of \\[Int64Index\\(\\[-2\\], dtype='int64'\\)\\] are \"\n r\"in the \\[index\\]\\\"\"\n )\n with pytest.raises(KeyError, match=msg):\n s.loc[[-2]]\n\n del s[\"a\"]\n\n with pytest.raises(KeyError, match=msg):\n s.loc[[-2]] = 0\n\n # inconsistency between .loc[values] and .loc[values,:]\n # GH 7999\n df = DataFrame([[\"a\"], [\"b\"]], index=[1, 2], columns=[\"value\"])\n\n msg = (\n r\"\\\"None of \\[Int64Index\\(\\[3\\], dtype='int64'\\)\\] are \"\n r\"in the \\[index\\]\\\"\"\n )\n with pytest.raises(KeyError, match=msg):\n df.loc[[3], :]\n\n with pytest.raises(KeyError, match=msg):\n df.loc[[3]]\n\n def test_loc_getitem_list_with_fail(self):\n # 15747\n # should KeyError if *any* missing labels\n\n s = Series([1, 2, 3])\n\n s.loc[[2]]\n\n with pytest.raises(\n KeyError,\n match=re.escape(\n \"\\\"None of [Int64Index([3], dtype='int64')] are in the [index]\\\"\"\n ),\n ):\n s.loc[[3]]\n\n # a non-match and a match\n with pytest.raises(KeyError, match=\"with any missing labels\"):\n s.loc[[2, 3]]\n\n def test_loc_index(self):\n # gh-17131\n # a boolean index should index like a boolean numpy array\n\n df = DataFrame(\n np.random.random(size=(5, 10)),\n index=[\"alpha_0\", \"alpha_1\", \"alpha_2\", \"beta_0\", \"beta_1\"],\n )\n\n mask = df.index.map(lambda x: \"alpha\" in x)\n expected = df.loc[np.array(mask)]\n\n result = df.loc[mask]\n tm.assert_frame_equal(result, expected)\n\n result = df.loc[mask.values]\n tm.assert_frame_equal(result, expected)\n\n result = df.loc[pd.array(mask, dtype=\"boolean\")]\n tm.assert_frame_equal(result, expected)\n\n def test_loc_general(self):\n\n df = DataFrame(\n np.random.rand(4, 4),\n columns=[\"A\", \"B\", \"C\", \"D\"],\n index=[\"A\", \"B\", \"C\", \"D\"],\n )\n\n # want this to work\n result = df.loc[:, \"A\":\"B\"].iloc[0:2, :]\n assert (result.columns == [\"A\", \"B\"]).all()\n assert (result.index == [\"A\", \"B\"]).all()\n\n # mixed type\n result = DataFrame({\"a\": [Timestamp(\"20130101\")], \"b\": [1]}).iloc[0]\n expected = Series([Timestamp(\"20130101\"), 1], index=[\"a\", \"b\"], name=0)\n tm.assert_series_equal(result, expected)\n assert result.dtype == object\n\n @pytest.fixture\n def frame_for_consistency(self):\n return DataFrame(\n {\n \"date\": date_range(\"2000-01-01\", \"2000-01-5\"),\n \"val\": Series(range(5), dtype=np.int64),\n }\n )\n\n def test_loc_setitem_consistency(self, frame_for_consistency):\n # GH 6149\n # coerce similarly for setitem and loc when rows have a null-slice\n expected = DataFrame(\n {\n \"date\": Series(0, index=range(5), dtype=np.int64),\n \"val\": Series(range(5), dtype=np.int64),\n }\n )\n df = frame_for_consistency.copy()\n df.loc[:, \"date\"] = 0\n tm.assert_frame_equal(df, expected)\n\n df = frame_for_consistency.copy()\n df.loc[:, \"date\"] = np.array(0, dtype=np.int64)\n tm.assert_frame_equal(df, expected)\n\n df = frame_for_consistency.copy()\n df.loc[:, \"date\"] = np.array([0, 0, 0, 0, 0], dtype=np.int64)\n tm.assert_frame_equal(df, expected)\n\n def test_loc_setitem_consistency_dt64_to_str(self, frame_for_consistency):\n # GH 6149\n # coerce similarly for setitem and loc when rows have a null-slice\n\n expected = DataFrame(\n {\n \"date\": Series(\"foo\", index=range(5)),\n \"val\": Series(range(5), dtype=np.int64),\n }\n )\n df = frame_for_consistency.copy()\n df.loc[:, \"date\"] = \"foo\"\n tm.assert_frame_equal(df, expected)\n\n def test_loc_setitem_consistency_dt64_to_float(self, frame_for_consistency):\n # GH 6149\n # coerce similarly for setitem and loc when rows have a null-slice\n expected = DataFrame(\n {\n \"date\": Series(1.0, index=range(5)),\n \"val\": Series(range(5), dtype=np.int64),\n }\n )\n df = frame_for_consistency.copy()\n df.loc[:, \"date\"] = 1.0\n tm.assert_frame_equal(df, expected)\n\n def test_loc_setitem_consistency_single_row(self):\n # GH 15494\n # setting on frame with single row\n df = DataFrame({\"date\": Series([Timestamp(\"20180101\")])})\n df.loc[:, \"date\"] = \"string\"\n expected = DataFrame({\"date\": Series([\"string\"])})\n tm.assert_frame_equal(df, expected)\n\n def test_loc_setitem_consistency_empty(self):\n # empty (essentially noops)\n expected = DataFrame(columns=[\"x\", \"y\"])\n expected[\"x\"] = expected[\"x\"].astype(np.int64)\n df = DataFrame(columns=[\"x\", \"y\"])\n df.loc[:, \"x\"] = 1\n tm.assert_frame_equal(df, expected)\n\n df = DataFrame(columns=[\"x\", \"y\"])\n df[\"x\"] = 1\n tm.assert_frame_equal(df, expected)\n\n def test_loc_setitem_consistency_slice_column_len(self):\n # .loc[:,column] setting with slice == len of the column\n # GH10408\n data = \"\"\"Level_0,,,Respondent,Respondent,Respondent,OtherCat,OtherCat\nLevel_1,,,Something,StartDate,EndDate,Yes/No,SomethingElse\nRegion,Site,RespondentID,,,,,\nRegion_1,Site_1,3987227376,A,5/25/2015 10:59,5/25/2015 11:22,Yes,\nRegion_1,Site_1,3980680971,A,5/21/2015 9:40,5/21/2015 9:52,Yes,Yes\nRegion_1,Site_2,3977723249,A,5/20/2015 8:27,5/20/2015 8:41,Yes,\nRegion_1,Site_2,3977723089,A,5/20/2015 8:33,5/20/2015 9:09,Yes,No\"\"\"\n\n df = pd.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1, 2])\n df.loc[:, (\"Respondent\", \"StartDate\")] = pd.to_datetime(\n df.loc[:, (\"Respondent\", \"StartDate\")]\n )\n df.loc[:, (\"Respondent\", \"EndDate\")] = pd.to_datetime(\n df.loc[:, (\"Respondent\", \"EndDate\")]\n )\n df.loc[:, (\"Respondent\", \"Duration\")] = (\n df.loc[:, (\"Respondent\", \"EndDate\")]\n - df.loc[:, (\"Respondent\", \"StartDate\")]\n )\n\n df.loc[:, (\"Respondent\", \"Duration\")] = df.loc[\n :, (\"Respondent\", \"Duration\")\n ].astype(\"timedelta64[s]\")\n expected = Series(\n [1380, 720, 840, 2160.0], index=df.index, name=(\"Respondent\", \"Duration\")\n )\n tm.assert_series_equal(df[(\"Respondent\", \"Duration\")], expected)\n\n @pytest.mark.parametrize(\"unit\", [\"Y\", \"M\", \"D\", \"h\", \"m\", \"s\", \"ms\", \"us\"])\n def test_loc_assign_non_ns_datetime(self, unit):\n # GH 27395, non-ns dtype assignment via .loc should work\n # and return the same result when using simple assignment\n df = DataFrame(\n {\n \"timestamp\": [\n np.datetime64(\"2017-02-11 12:41:29\"),\n np.datetime64(\"1991-11-07 04:22:37\"),\n ]\n }\n )\n\n df.loc[:, unit] = df.loc[:, \"timestamp\"].values.astype(f\"datetime64[{unit}]\")\n df[\"expected\"] = df.loc[:, \"timestamp\"].values.astype(f\"datetime64[{unit}]\")\n expected = Series(df.loc[:, \"expected\"], name=unit)\n tm.assert_series_equal(df.loc[:, unit], expected)\n\n def test_loc_modify_datetime(self):\n # see gh-28837\n df = DataFrame.from_dict(\n {\"date\": [1485264372711, 1485265925110, 1540215845888, 1540282121025]}\n )\n\n df[\"date_dt\"] = pd.to_datetime(df[\"date\"], unit=\"ms\", cache=True)\n\n df.loc[:, \"date_dt_cp\"] = df.loc[:, \"date_dt\"]\n df.loc[[2, 3], \"date_dt_cp\"] = df.loc[[2, 3], \"date_dt\"]\n\n expected = DataFrame(\n [\n [1485264372711, \"2017-01-24 13:26:12.711\", \"2017-01-24 13:26:12.711\"],\n [1485265925110, \"2017-01-24 13:52:05.110\", \"2017-01-24 13:52:05.110\"],\n [1540215845888, \"2018-10-22 13:44:05.888\", \"2018-10-22 13:44:05.888\"],\n [1540282121025, \"2018-10-23 08:08:41.025\", \"2018-10-23 08:08:41.025\"],\n ],\n columns=[\"date\", \"date_dt\", \"date_dt_cp\"],\n )\n\n columns = [\"date_dt\", \"date_dt_cp\"]\n expected[columns] = expected[columns].apply(pd.to_datetime)\n\n tm.assert_frame_equal(df, expected)\n\n def test_loc_setitem_frame(self):\n df = DataFrame(np.random.randn(4, 4), index=list(\"abcd\"), columns=list(\"ABCD\"))\n\n result = df.iloc[0, 0]\n\n df.loc[\"a\", \"A\"] = 1\n result = df.loc[\"a\", \"A\"]\n assert result == 1\n\n result = df.iloc[0, 0]\n assert result == 1\n\n df.loc[:, \"B\":\"D\"] = 0\n expected = df.loc[:, \"B\":\"D\"]\n result = df.iloc[:, 1:]\n tm.assert_frame_equal(result, expected)\n\n # GH 6254\n # setting issue\n df = DataFrame(index=[3, 5, 4], columns=[\"A\"])\n df.loc[[4, 3, 5], \"A\"] = np.array([1, 2, 3], dtype=\"int64\")\n expected = DataFrame({\"A\": Series([1, 2, 3], index=[4, 3, 5])}).reindex(\n index=[3, 5, 4]\n )\n tm.assert_frame_equal(df, expected)\n\n # GH 6252\n # setting with an empty frame\n keys1 = [\"@\" + str(i) for i in range(5)]\n val1 = np.arange(5, dtype=\"int64\")\n\n keys2 = [\"@\" + str(i) for i in range(4)]\n val2 = np.arange(4, dtype=\"int64\")\n\n index = list(set(keys1).union(keys2))\n df = DataFrame(index=index)\n df[\"A\"] = np.nan\n df.loc[keys1, \"A\"] = val1\n\n df[\"B\"] = np.nan\n df.loc[keys2, \"B\"] = val2\n\n expected = DataFrame(\n {\"A\": Series(val1, index=keys1), \"B\": Series(val2, index=keys2)}\n ).reindex(index=index)\n tm.assert_frame_equal(df, expected)\n\n # GH 8669\n # invalid coercion of nan -> int\n df = DataFrame({\"A\": [1, 2, 3], \"B\": np.nan})\n df.loc[df.B > df.A, \"B\"] = df.A\n expected = DataFrame({\"A\": [1, 2, 3], \"B\": np.nan})\n tm.assert_frame_equal(df, expected)\n\n # GH 6546\n # setting with mixed labels\n df = DataFrame({1: [1, 2], 2: [3, 4], \"a\": [\"a\", \"b\"]})\n\n result = df.loc[0, [1, 2]]\n expected = Series([1, 3], index=[1, 2], dtype=object, name=0)\n tm.assert_series_equal(result, expected)\n\n expected = DataFrame({1: [5, 2], 2: [6, 4], \"a\": [\"a\", \"b\"]})\n df.loc[0, [1, 2]] = [5, 6]\n tm.assert_frame_equal(df, expected)\n\n def test_loc_setitem_frame_multiples(self):\n # multiple setting\n df = DataFrame(\n {\"A\": [\"foo\", \"bar\", \"baz\"], \"B\": Series(range(3), dtype=np.int64)}\n )\n rhs = df.loc[1:2]\n rhs.index = df.index[0:2]\n df.loc[0:1] = rhs\n expected = DataFrame(\n {\"A\": [\"bar\", \"baz\", \"baz\"], \"B\": Series([1, 2, 2], dtype=np.int64)}\n )\n tm.assert_frame_equal(df, expected)\n\n # multiple setting with frame on rhs (with M8)\n df = DataFrame(\n {\n \"date\": date_range(\"2000-01-01\", \"2000-01-5\"),\n \"val\": Series(range(5), dtype=np.int64),\n }\n )\n expected = DataFrame(\n {\n \"date\": [\n Timestamp(\"20000101\"),\n Timestamp(\"20000102\"),\n Timestamp(\"20000101\"),\n Timestamp(\"20000102\"),\n Timestamp(\"20000103\"),\n ],\n \"val\": Series([0, 1, 0, 1, 2], dtype=np.int64),\n }\n )\n rhs = df.loc[0:2]\n rhs.index = df.index[2:5]\n df.loc[2:4] = rhs\n tm.assert_frame_equal(df, expected)\n\n @pytest.mark.parametrize(\n \"indexer\", [[\"A\"], slice(None, \"A\", None), np.array([\"A\"])]\n )\n @pytest.mark.parametrize(\"value\", [[\"Z\"], np.array([\"Z\"])])\n def test_loc_setitem_with_scalar_index(self, indexer, value):\n # GH #19474\n # assigning like \"df.loc[0, ['A']] = ['Z']\" should be evaluated\n # elementwisely, not using \"setter('A', ['Z'])\".\n\n df = DataFrame([[1, 2], [3, 4]], columns=[\"A\", \"B\"])\n df.loc[0, indexer] = value\n result = df.loc[0, \"A\"]\n\n assert is_scalar(result) and result == \"Z\"\n\n @pytest.mark.parametrize(\n \"index,box,expected\",\n [\n (\n ([0, 2], [\"A\", \"B\", \"C\", \"D\"]),\n 7,\n DataFrame(\n [[7, 7, 7, 7], [3, 4, np.nan, np.nan], [7, 7, 7, 7]],\n columns=[\"A\", \"B\", \"C\", \"D\"],\n ),\n ),\n (\n (1, [\"C\", \"D\"]),\n [7, 8],\n DataFrame(\n [[1, 2, np.nan, np.nan], [3, 4, 7, 8], [5, 6, np.nan, np.nan]],\n columns=[\"A\", \"B\", \"C\", \"D\"],\n ),\n ),\n (\n (1, [\"A\", \"B\", \"C\"]),\n np.array([7, 8, 9], dtype=np.int64),\n DataFrame(\n [[1, 2, np.nan], [7, 8, 9], [5, 6, np.nan]], columns=[\"A\", \"B\", \"C\"]\n ),\n ),\n (\n (slice(1, 3, None), [\"B\", \"C\", \"D\"]),\n [[7, 8, 9], [10, 11, 12]],\n DataFrame(\n [[1, 2, np.nan, np.nan], [3, 7, 8, 9], [5, 10, 11, 12]],\n columns=[\"A\", \"B\", \"C\", \"D\"],\n ),\n ),\n (\n (slice(1, 3, None), [\"C\", \"A\", \"D\"]),\n np.array([[7, 8, 9], [10, 11, 12]], dtype=np.int64),\n DataFrame(\n [[1, 2, np.nan, np.nan], [8, 4, 7, 9], [11, 6, 10, 12]],\n columns=[\"A\", \"B\", \"C\", \"D\"],\n ),\n ),\n (\n (slice(None, None, None), [\"A\", \"C\"]),\n DataFrame([[7, 8], [9, 10], [11, 12]], columns=[\"A\", \"C\"]),\n DataFrame(\n [[7, 2, 8], [9, 4, 10], [11, 6, 12]], columns=[\"A\", \"B\", \"C\"]\n ),\n ),\n ],\n )\n def test_loc_setitem_missing_columns(self, index, box, expected):\n # GH 29334\n df = DataFrame([[1, 2], [3, 4], [5, 6]], columns=[\"A\", \"B\"])\n df.loc[index] = box\n tm.assert_frame_equal(df, expected)\n\n def test_loc_coercion(self):\n\n # 12411\n df = DataFrame({\"date\": [Timestamp(\"20130101\").tz_localize(\"UTC\"), pd.NaT]})\n expected = df.dtypes\n\n result = df.iloc[[0]]\n tm.assert_series_equal(result.dtypes, expected)\n\n result = df.iloc[[1]]\n tm.assert_series_equal(result.dtypes, expected)\n\n # 12045\n import datetime\n\n df = DataFrame(\n {\"date\": [datetime.datetime(2012, 1, 1), datetime.datetime(1012, 1, 2)]}\n )\n expected = df.dtypes\n\n result = df.iloc[[0]]\n tm.assert_series_equal(result.dtypes, expected)\n\n result = df.iloc[[1]]\n tm.assert_series_equal(result.dtypes, expected)\n\n # 11594\n df = DataFrame({\"text\": [\"some words\"] + [None] * 9})\n expected = df.dtypes\n\n result = df.iloc[0:2]\n tm.assert_series_equal(result.dtypes, expected)\n\n result = df.iloc[3:]\n tm.assert_series_equal(result.dtypes, expected)\n\n def test_setitem_new_key_tz(self):\n # GH#12862 should not raise on assigning the second value\n vals = [\n pd.to_datetime(42).tz_localize(\"UTC\"),\n pd.to_datetime(666).tz_localize(\"UTC\"),\n ]\n expected = Series(vals, index=[\"foo\", \"bar\"])\n\n ser = Series(dtype=object)\n ser[\"foo\"] = vals[0]\n ser[\"bar\"] = vals[1]\n\n tm.assert_series_equal(ser, expected)\n\n ser = Series(dtype=object)\n ser.loc[\"foo\"] = vals[0]\n ser.loc[\"bar\"] = vals[1]\n\n tm.assert_series_equal(ser, expected)\n\n def test_loc_non_unique(self):\n # GH3659\n # non-unique indexer with loc slice\n # https://groups.google.com/forum/?fromgroups#!topic/pydata/zTm2No0crYs\n\n # these are going to raise because the we are non monotonic\n df = DataFrame(\n {\"A\": [1, 2, 3, 4, 5, 6], \"B\": [3, 4, 5, 6, 7, 8]}, index=[0, 1, 0, 1, 2, 3]\n )\n msg = \"'Cannot get left slice bound for non-unique label: 1'\"\n with pytest.raises(KeyError, match=msg):\n df.loc[1:]\n msg = \"'Cannot get left slice bound for non-unique label: 0'\"\n with pytest.raises(KeyError, match=msg):\n df.loc[0:]\n msg = \"'Cannot get left slice bound for non-unique label: 1'\"\n with pytest.raises(KeyError, match=msg):\n df.loc[1:2]\n\n # monotonic are ok\n df = DataFrame(\n {\"A\": [1, 2, 3, 4, 5, 6], \"B\": [3, 4, 5, 6, 7, 8]}, index=[0, 1, 0, 1, 2, 3]\n ).sort_index(axis=0)\n result = df.loc[1:]\n expected = DataFrame({\"A\": [2, 4, 5, 6], \"B\": [4, 6, 7, 8]}, index=[1, 1, 2, 3])\n tm.assert_frame_equal(result, expected)\n\n result = df.loc[0:]\n tm.assert_frame_equal(result, df)\n\n result = df.loc[1:2]\n expected = DataFrame({\"A\": [2, 4, 5], \"B\": [4, 6, 7]}, index=[1, 1, 2])\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.arm_slow\n def test_loc_non_unique_memory_error(self):\n\n # GH 4280\n # non_unique index with a large selection triggers a memory error\n\n columns = list(\"ABCDEFG\")\n\n def gen_test(length, l2):\n return pd.concat(\n [\n DataFrame(\n np.random.randn(length, len(columns)),\n index=np.arange(length),\n columns=columns,\n ),\n DataFrame(\n np.ones((l2, len(columns))), index=[0] * l2, columns=columns\n ),\n ]\n )\n\n def gen_expected(df, mask):\n len_mask = len(mask)\n return pd.concat(\n [\n df.take([0]),\n DataFrame(\n np.ones((len_mask, len(columns))),\n index=[0] * len_mask,\n columns=columns,\n ),\n df.take(mask[1:]),\n ]\n )\n\n df = gen_test(900, 100)\n assert df.index.is_unique is False\n\n mask = np.arange(100)\n result = df.loc[mask]\n expected = gen_expected(df, mask)\n tm.assert_frame_equal(result, expected)\n\n df = gen_test(900000, 100000)\n assert df.index.is_unique is False\n\n mask = np.arange(100000)\n result = df.loc[mask]\n expected = gen_expected(df, mask)\n tm.assert_frame_equal(result, expected)\n\n def test_loc_name(self):\n # GH 3880\n df = DataFrame([[1, 1], [1, 1]])\n df.index.name = \"index_name\"\n result = df.iloc[[0, 1]].index.name\n assert result == \"index_name\"\n\n result = df.loc[[0, 1]].index.name\n assert result == \"index_name\"\n\n def test_loc_empty_list_indexer_is_ok(self):\n\n df = tm.makeCustomDataframe(5, 2)\n # vertical empty\n tm.assert_frame_equal(\n df.loc[:, []], df.iloc[:, :0], check_index_type=True, check_column_type=True\n )\n # horizontal empty\n tm.assert_frame_equal(\n df.loc[[], :], df.iloc[:0, :], check_index_type=True, check_column_type=True\n )\n # horizontal empty\n tm.assert_frame_equal(\n df.loc[[]], df.iloc[:0, :], check_index_type=True, check_column_type=True\n )\n\n def test_identity_slice_returns_new_object(self):\n # GH13873\n original_df = DataFrame({\"a\": [1, 2, 3]})\n sliced_df = original_df.loc[:]\n assert sliced_df is not original_df\n assert original_df[:] is not original_df\n\n # should be a shallow copy\n original_df[\"a\"] = [4, 4, 4]\n assert (sliced_df[\"a\"] == 4).all()\n\n # These should not return copies\n assert original_df is original_df.loc[:, :]\n df = DataFrame(np.random.randn(10, 4))\n assert df[0] is df.loc[:, 0]\n\n # Same tests for Series\n original_series = Series([1, 2, 3, 4, 5, 6])\n sliced_series = original_series.loc[:]\n assert sliced_series is not original_series\n assert original_series[:] is not original_series\n\n original_series[:3] = [7, 8, 9]\n assert all(sliced_series[:3] == [7, 8, 9])\n\n @pytest.mark.xfail(reason=\"accidental fix reverted - GH37497\")\n def test_loc_copy_vs_view(self):\n # GH 15631\n x = DataFrame(zip(range(3), range(3)), columns=[\"a\", \"b\"])\n\n y = x.copy()\n q = y.loc[:, \"a\"]\n q += 2\n\n tm.assert_frame_equal(x, y)\n\n z = x.copy()\n q = z.loc[x.index, \"a\"]\n q += 2\n\n tm.assert_frame_equal(x, z)\n\n def test_loc_uint64(self):\n # GH20722\n # Test whether loc accept uint64 max value as index.\n s = Series([1, 2], index=[np.iinfo(\"uint64\").max - 1, np.iinfo(\"uint64\").max])\n\n result = s.loc[np.iinfo(\"uint64\").max - 1]\n expected = s.iloc[0]\n assert result == expected\n\n result = s.loc[[np.iinfo(\"uint64\").max - 1]]\n expected = s.iloc[[0]]\n tm.assert_series_equal(result, expected)\n\n result = s.loc[[np.iinfo(\"uint64\").max - 1, np.iinfo(\"uint64\").max]]\n tm.assert_series_equal(result, s)\n\n def test_loc_setitem_empty_append_expands_rows(self):\n # GH6173, various appends to an empty dataframe\n\n data = [1, 2, 3]\n expected = DataFrame({\"x\": data, \"y\": [None] * len(data)})\n\n # appends to fit length of data\n df = DataFrame(columns=[\"x\", \"y\"])\n df.loc[:, \"x\"] = data\n tm.assert_frame_equal(df, expected)\n\n def test_loc_setitem_empty_append_expands_rows_mixed_dtype(self):\n # GH#37932 same as test_loc_setitem_empty_append_expands_rows\n # but with mixed dtype so we go through take_split_path\n data = [1, 2, 3]\n expected = DataFrame({\"x\": data, \"y\": [None] * len(data)})\n\n df = DataFrame(columns=[\"x\", \"y\"])\n df[\"x\"] = df[\"x\"].astype(np.int64)\n df.loc[:, \"x\"] = data\n tm.assert_frame_equal(df, expected)\n\n def test_loc_setitem_empty_append_single_value(self):\n # only appends one value\n expected = DataFrame({\"x\": [1.0], \"y\": [np.nan]})\n df = DataFrame(columns=[\"x\", \"y\"], dtype=float)\n df.loc[0, \"x\"] = expected.loc[0, \"x\"]\n tm.assert_frame_equal(df, expected)\n\n def test_loc_setitem_empty_append_raises(self):\n # GH6173, various appends to an empty dataframe\n\n data = [1, 2]\n df = DataFrame(columns=[\"x\", \"y\"])\n df.index = df.index.astype(np.int64)\n msg = (\n r\"None of \\[Int64Index\\(\\[0, 1\\], dtype='int64'\\)\\] \"\n r\"are in the \\[index\\]\"\n )\n with pytest.raises(KeyError, match=msg):\n df.loc[[0, 1], \"x\"] = data\n\n msg = \"|\".join(\n [\n \"cannot copy sequence with size 2 to array axis with dimension 0\",\n r\"could not broadcast input array from shape \\(2,\\) into shape \\(0,\\)\",\n ]\n )\n with pytest.raises(ValueError, match=msg):\n df.loc[0:2, \"x\"] = data\n\n def test_indexing_zerodim_np_array(self):\n # GH24924\n df = DataFrame([[1, 2], [3, 4]])\n result = df.loc[np.array(0)]\n s = Series([1, 2], name=0)\n tm.assert_series_equal(result, s)\n\n def test_series_indexing_zerodim_np_array(self):\n # GH24924\n s = Series([1, 2])\n result = s.loc[np.array(0)]\n assert result == 1\n\n def test_loc_reverse_assignment(self):\n # GH26939\n data = [1, 2, 3, 4, 5, 6] + [None] * 4\n expected = Series(data, index=range(2010, 2020))\n\n result = Series(index=range(2010, 2020), dtype=np.float64)\n result.loc[2015:2010:-1] = [6, 5, 4, 3, 2, 1]\n\n tm.assert_series_equal(result, expected)\n\n def test_loc_setitem_str_to_small_float_conversion_type(self):\n # GH#20388\n np.random.seed(13)\n col_data = [str(np.random.random() * 1e-12) for _ in range(5)]\n result = DataFrame(col_data, columns=[\"A\"])\n expected = DataFrame(col_data, columns=[\"A\"], dtype=object)\n tm.assert_frame_equal(result, expected)\n\n # change the dtype of the elements from object to float one by one\n result.loc[result.index, \"A\"] = [float(x) for x in col_data]\n expected = DataFrame(col_data, columns=[\"A\"], dtype=float)\n tm.assert_frame_equal(result, expected)\n\n def test_loc_getitem_time_object(self, frame_or_series):\n rng = date_range(\"1/1/2000\", \"1/5/2000\", freq=\"5min\")\n mask = (rng.hour == 9) & (rng.minute == 30)\n\n obj = DataFrame(np.random.randn(len(rng), 3), index=rng)\n if frame_or_series is Series:\n obj = obj[0]\n\n result = obj.loc[time(9, 30)]\n exp = obj.loc[mask]\n tm.assert_equal(result, exp)\n\n chunk = obj.loc[\"1/4/2000\":]\n result = chunk.loc[time(9, 30)]\n expected = result[-1:]\n\n # Without resetting the freqs, these are 5 min and 1440 min, respectively\n result.index = result.index._with_freq(None)\n expected.index = expected.index._with_freq(None)\n tm.assert_equal(result, expected)\n\n @pytest.mark.parametrize(\"spmatrix_t\", [\"coo_matrix\", \"csc_matrix\", \"csr_matrix\"])\n @pytest.mark.parametrize(\"dtype\", [np.int64, np.float64, complex])\n @td.skip_if_no_scipy\n def test_loc_getitem_range_from_spmatrix(self, spmatrix_t, dtype):\n import scipy.sparse\n\n spmatrix_t = getattr(scipy.sparse, spmatrix_t)\n\n # The bug is triggered by a sparse matrix with purely sparse columns. So the\n # recipe below generates a rectangular matrix of dimension (5, 7) where all the\n # diagonal cells are ones, meaning the last two columns are purely sparse.\n rows, cols = 5, 7\n spmatrix = spmatrix_t(np.eye(rows, cols, dtype=dtype), dtype=dtype)\n df = DataFrame.sparse.from_spmatrix(spmatrix)\n\n # regression test for GH#34526\n itr_idx = range(2, rows)\n result = df.loc[itr_idx].values\n expected = spmatrix.toarray()[itr_idx]\n tm.assert_numpy_array_equal(result, expected)\n\n # regression test for GH#34540\n result = df.loc[itr_idx].dtypes.values\n expected = np.full(cols, SparseDtype(dtype, fill_value=0))\n tm.assert_numpy_array_equal(result, expected)\n\n def test_loc_getitem_listlike_all_retains_sparse(self):\n df = DataFrame({\"A\": pd.array([0, 0], dtype=SparseDtype(\"int64\"))})\n result = df.loc[[0, 1]]\n tm.assert_frame_equal(result, df)\n\n @pytest.mark.parametrize(\"key_type\", [iter, np.array, Series, Index])\n def test_loc_getitem_iterable(self, float_frame, key_type):\n idx = key_type([\"A\", \"B\", \"C\"])\n result = float_frame.loc[:, idx]\n expected = float_frame.loc[:, [\"A\", \"B\", \"C\"]]\n tm.assert_frame_equal(result, expected)\n\n def test_loc_getitem_timedelta_0seconds(self):\n # GH#10583\n df = DataFrame(np.random.normal(size=(10, 4)))\n df.index = timedelta_range(start=\"0s\", periods=10, freq=\"s\")\n expected = df.loc[Timedelta(\"0s\") :, :]\n result = df.loc[\"0s\":, :]\n tm.assert_frame_equal(expected, result)\n\n @pytest.mark.parametrize(\n \"val,expected\", [(2 ** 63 - 1, Series([1])), (2 ** 63, Series([2]))]\n )\n def test_loc_getitem_uint64_scalar(self, val, expected):\n # see GH#19399\n df = DataFrame([1, 2], index=[2 ** 63 - 1, 2 ** 63])\n result = df.loc[val]\n\n expected.name = val\n tm.assert_series_equal(result, expected)\n\n def test_loc_setitem_int_label_with_float64index(self):\n # note labels are floats\n ser = Series([\"a\", \"b\", \"c\"], index=[0, 0.5, 1])\n tmp = ser.copy()\n\n ser.loc[1] = \"zoo\"\n tmp.iloc[2] = \"zoo\"\n\n tm.assert_series_equal(ser, tmp)\n\n @pytest.mark.parametrize(\n \"indexer, expected\",\n [\n # The test name is a misnomer in the 0 case as df.index[indexer]\n # is a scalar.\n (0, [20, 1, 2, 3, 4, 5, 6, 7, 8, 9]),\n (slice(4, 8), [0, 1, 2, 3, 20, 20, 20, 20, 8, 9]),\n ([3, 5], [0, 1, 2, 20, 4, 20, 6, 7, 8, 9]),\n ],\n )\n def test_loc_setitem_listlike_with_timedelta64index(self, indexer, expected):\n # GH#16637\n tdi = to_timedelta(range(10), unit=\"s\")\n df = DataFrame({\"x\": range(10)}, dtype=\"int64\", index=tdi)\n\n df.loc[df.index[indexer], \"x\"] = 20\n\n expected = DataFrame(\n expected,\n index=tdi,\n columns=[\"x\"],\n dtype=\"int64\",\n )\n\n tm.assert_frame_equal(expected, df)\n\n\nclass TestLocWithMultiIndex:\n @pytest.mark.parametrize(\n \"keys, expected\",\n [\n ([\"b\", \"a\"], [[\"b\", \"b\", \"a\", \"a\"], [1, 2, 1, 2]]),\n ([\"a\", \"b\"], [[\"a\", \"a\", \"b\", \"b\"], [1, 2, 1, 2]]),\n (([\"a\", \"b\"], [1, 2]), [[\"a\", \"a\", \"b\", \"b\"], [1, 2, 1, 2]]),\n (([\"a\", \"b\"], [2, 1]), [[\"a\", \"a\", \"b\", \"b\"], [2, 1, 2, 1]]),\n (([\"b\", \"a\"], [2, 1]), [[\"b\", \"b\", \"a\", \"a\"], [2, 1, 2, 1]]),\n (([\"b\", \"a\"], [1, 2]), [[\"b\", \"b\", \"a\", \"a\"], [1, 2, 1, 2]]),\n (([\"c\", \"a\"], [2, 1]), [[\"c\", \"a\", \"a\"], [1, 2, 1]]),\n ],\n )\n @pytest.mark.parametrize(\"dim\", [\"index\", \"columns\"])\n def test_loc_getitem_multilevel_index_order(self, dim, keys, expected):\n # GH#22797\n # Try to respect order of keys given for MultiIndex.loc\n kwargs = {dim: [[\"c\", \"a\", \"a\", \"b\", \"b\"], [1, 1, 2, 1, 2]]}\n df = DataFrame(np.arange(25).reshape(5, 5), **kwargs)\n exp_index = MultiIndex.from_arrays(expected)\n if dim == \"index\":\n res = df.loc[keys, :]\n tm.assert_index_equal(res.index, exp_index)\n elif dim == \"columns\":\n res = df.loc[:, keys]\n tm.assert_index_equal(res.columns, exp_index)\n\n def test_loc_preserve_names(self, multiindex_year_month_day_dataframe_random_data):\n ymd = multiindex_year_month_day_dataframe_random_data\n\n result = ymd.loc[2000]\n result2 = ymd[\"A\"].loc[2000]\n assert result.index.names == ymd.index.names[1:]\n assert result2.index.names == ymd.index.names[1:]\n\n result = ymd.loc[2000, 2]\n result2 = ymd[\"A\"].loc[2000, 2]\n assert result.index.name == ymd.index.names[2]\n assert result2.index.name == ymd.index.names[2]\n\n def test_loc_getitem_multiindex_nonunique_len_zero(self):\n # GH#13691\n mi = MultiIndex.from_product([[0], [1, 1]])\n ser = Series(0, index=mi)\n\n res = ser.loc[[]]\n\n expected = ser[:0]\n tm.assert_series_equal(res, expected)\n\n res2 = ser.loc[ser.iloc[0:0]]\n tm.assert_series_equal(res2, expected)\n\n def test_loc_getitem_access_none_value_in_multiindex(self):\n # GH#34318: test that you can access a None value using .loc\n # through a Multiindex\n\n ser = Series([None], pd.MultiIndex.from_arrays([[\"Level1\"], [\"Level2\"]]))\n result = ser.loc[(\"Level1\", \"Level2\")]\n assert result is None\n\n midx = MultiIndex.from_product([[\"Level1\"], [\"Level2_a\", \"Level2_b\"]])\n ser = Series([None] * len(midx), dtype=object, index=midx)\n result = ser.loc[(\"Level1\", \"Level2_a\")]\n assert result is None\n\n ser = Series([1] * len(midx), dtype=object, index=midx)\n result = ser.loc[(\"Level1\", \"Level2_a\")]\n assert result == 1\n\n def test_loc_setitem_multiindex_slice(self):\n # GH 34870\n\n index = pd.MultiIndex.from_tuples(\n zip(\n [\"bar\", \"bar\", \"baz\", \"baz\", \"foo\", \"foo\", \"qux\", \"qux\"],\n [\"one\", \"two\", \"one\", \"two\", \"one\", \"two\", \"one\", \"two\"],\n ),\n names=[\"first\", \"second\"],\n )\n\n result = Series([1, 1, 1, 1, 1, 1, 1, 1], index=index)\n result.loc[(\"baz\", \"one\"):(\"foo\", \"two\")] = 100\n\n expected = Series([1, 1, 100, 100, 100, 100, 1, 1], index=index)\n\n tm.assert_series_equal(result, expected)\n\n def test_loc_getitem_slice_datetime_objs_with_datetimeindex(self):\n times = date_range(\"2000-01-01\", freq=\"10min\", periods=100000)\n ser = Series(range(100000), times)\n result = ser.loc[datetime(1900, 1, 1) : datetime(2100, 1, 1)]\n tm.assert_series_equal(result, ser)\n\n def test_loc_getitem_sorted_index_level_with_duplicates(self):\n # GH#4516 sorting a MultiIndex with duplicates and multiple dtypes\n mi = MultiIndex.from_tuples(\n [\n (\"foo\", \"bar\"),\n (\"foo\", \"bar\"),\n (\"bah\", \"bam\"),\n (\"bah\", \"bam\"),\n (\"foo\", \"bar\"),\n (\"bah\", \"bam\"),\n ],\n names=[\"A\", \"B\"],\n )\n df = DataFrame(\n [\n [1.0, 1],\n [2.0, 2],\n [3.0, 3],\n [4.0, 4],\n [5.0, 5],\n [6.0, 6],\n ],\n index=mi,\n columns=[\"C\", \"D\"],\n )\n df = df.sort_index(level=0)\n\n expected = DataFrame(\n [[1.0, 1], [2.0, 2], [5.0, 5]], columns=[\"C\", \"D\"], index=mi.take([0, 1, 4])\n )\n\n result = df.loc[(\"foo\", \"bar\")]\n tm.assert_frame_equal(result, expected)\n\n\nclass TestLocSetitemWithExpansion:\n @pytest.mark.slow\n def test_loc_setitem_with_expansion_large_dataframe(self):\n # GH#10692\n result = DataFrame({\"x\": range(10 ** 6)}, dtype=\"int64\")\n result.loc[len(result)] = len(result) + 1\n expected = DataFrame({\"x\": range(10 ** 6 + 1)}, dtype=\"int64\")\n tm.assert_frame_equal(result, expected)\n\n def test_loc_setitem_empty_series(self):\n # GH#5226\n\n # partially set with an empty object series\n ser = Series(dtype=object)\n ser.loc[1] = 1\n tm.assert_series_equal(ser, Series([1], index=[1]))\n ser.loc[3] = 3\n tm.assert_series_equal(ser, Series([1, 3], index=[1, 3]))\n\n ser = Series(dtype=object)\n ser.loc[1] = 1.0\n tm.assert_series_equal(ser, Series([1.0], index=[1]))\n ser.loc[3] = 3.0\n tm.assert_series_equal(ser, Series([1.0, 3.0], index=[1, 3]))\n\n ser = Series(dtype=object)\n ser.loc[\"foo\"] = 1\n tm.assert_series_equal(ser, Series([1], index=[\"foo\"]))\n ser.loc[\"bar\"] = 3\n tm.assert_series_equal(ser, Series([1, 3], index=[\"foo\", \"bar\"]))\n ser.loc[3] = 4\n tm.assert_series_equal(ser, Series([1, 3, 4], index=[\"foo\", \"bar\", 3]))\n\n def test_loc_setitem_incremental_with_dst(self):\n # GH#20724\n base = datetime(2015, 11, 1, tzinfo=gettz(\"US/Pacific\"))\n idxs = [base + timedelta(seconds=i * 900) for i in range(16)]\n result = Series([0], index=[idxs[0]])\n for ts in idxs:\n result.loc[ts] = 1\n expected = Series(1, index=idxs)\n tm.assert_series_equal(result, expected)\n\n def test_loc_setitem_datetime_keys_cast(self):\n # GH#9516\n dt1 = Timestamp(\"20130101 09:00:00\")\n dt2 = Timestamp(\"20130101 10:00:00\")\n\n for conv in [\n lambda x: x,\n lambda x: x.to_datetime64(),\n lambda x: x.to_pydatetime(),\n lambda x: np.datetime64(x),\n ]:\n\n df = DataFrame()\n df.loc[conv(dt1), \"one\"] = 100\n df.loc[conv(dt2), \"one\"] = 200\n\n expected = DataFrame({\"one\": [100.0, 200.0]}, index=[dt1, dt2])\n tm.assert_frame_equal(df, expected)\n\n def test_loc_setitem_categorical_column_retains_dtype(self, ordered):\n # GH16360\n result = DataFrame({\"A\": [1]})\n result.loc[:, \"B\"] = Categorical([\"b\"], ordered=ordered)\n expected = DataFrame({\"A\": [1], \"B\": Categorical([\"b\"], ordered=ordered)})\n tm.assert_frame_equal(result, expected)\n\n\nclass TestLocCallable:\n def test_frame_loc_getitem_callable(self):\n # GH#11485\n df = DataFrame({\"A\": [1, 2, 3, 4], \"B\": list(\"aabb\"), \"C\": [1, 2, 3, 4]})\n # iloc cannot use boolean Series (see GH3635)\n\n # return bool indexer\n res = df.loc[lambda x: x.A > 2]\n tm.assert_frame_equal(res, df.loc[df.A > 2])\n\n res = df.loc[lambda x: x.A > 2]\n tm.assert_frame_equal(res, df.loc[df.A > 2])\n\n res = df.loc[lambda x: x.A > 2]\n tm.assert_frame_equal(res, df.loc[df.A > 2])\n\n res = df.loc[lambda x: x.A > 2]\n tm.assert_frame_equal(res, df.loc[df.A > 2])\n\n res = df.loc[lambda x: x.B == \"b\", :]\n tm.assert_frame_equal(res, df.loc[df.B == \"b\", :])\n\n res = df.loc[lambda x: x.B == \"b\", :]\n tm.assert_frame_equal(res, df.loc[df.B == \"b\", :])\n\n res = df.loc[lambda x: x.A > 2, lambda x: x.columns == \"B\"]\n tm.assert_frame_equal(res, df.loc[df.A > 2, [False, True, False]])\n\n res = df.loc[lambda x: x.A > 2, lambda x: x.columns == \"B\"]\n tm.assert_frame_equal(res, df.loc[df.A > 2, [False, True, False]])\n\n res = df.loc[lambda x: x.A > 2, lambda x: \"B\"]\n tm.assert_series_equal(res, df.loc[df.A > 2, \"B\"])\n\n res = df.loc[lambda x: x.A > 2, lambda x: \"B\"]\n tm.assert_series_equal(res, df.loc[df.A > 2, \"B\"])\n\n res = df.loc[lambda x: x.A > 2, lambda x: [\"A\", \"B\"]]\n tm.assert_frame_equal(res, df.loc[df.A > 2, [\"A\", \"B\"]])\n\n res = df.loc[lambda x: x.A > 2, lambda x: [\"A\", \"B\"]]\n tm.assert_frame_equal(res, df.loc[df.A > 2, [\"A\", \"B\"]])\n\n res = df.loc[lambda x: x.A == 2, lambda x: [\"A\", \"B\"]]\n tm.assert_frame_equal(res, df.loc[df.A == 2, [\"A\", \"B\"]])\n\n res = df.loc[lambda x: x.A == 2, lambda x: [\"A\", \"B\"]]\n tm.assert_frame_equal(res, df.loc[df.A == 2, [\"A\", \"B\"]])\n\n # scalar\n res = df.loc[lambda x: 1, lambda x: \"A\"]\n assert res == df.loc[1, \"A\"]\n\n res = df.loc[lambda x: 1, lambda x: \"A\"]\n assert res == df.loc[1, \"A\"]\n\n def test_frame_loc_getitem_callable_mixture(self):\n # GH#11485\n df = DataFrame({\"A\": [1, 2, 3, 4], \"B\": list(\"aabb\"), \"C\": [1, 2, 3, 4]})\n\n res = df.loc[lambda x: x.A > 2, [\"A\", \"B\"]]\n tm.assert_frame_equal(res, df.loc[df.A > 2, [\"A\", \"B\"]])\n\n res = df.loc[lambda x: x.A > 2, [\"A\", \"B\"]]\n tm.assert_frame_equal(res, df.loc[df.A > 2, [\"A\", \"B\"]])\n\n res = df.loc[[2, 3], lambda x: [\"A\", \"B\"]]\n tm.assert_frame_equal(res, df.loc[[2, 3], [\"A\", \"B\"]])\n\n res = df.loc[[2, 3], lambda x: [\"A\", \"B\"]]\n tm.assert_frame_equal(res, df.loc[[2, 3], [\"A\", \"B\"]])\n\n res = df.loc[3, lambda x: [\"A\", \"B\"]]\n tm.assert_series_equal(res, df.loc[3, [\"A\", \"B\"]])\n\n res = df.loc[3, lambda x: [\"A\", \"B\"]]\n tm.assert_series_equal(res, df.loc[3, [\"A\", \"B\"]])\n\n def test_frame_loc_getitem_callable_labels(self):\n # GH#11485\n df = DataFrame({\"X\": [1, 2, 3, 4], \"Y\": list(\"aabb\")}, index=list(\"ABCD\"))\n\n # return label\n res = df.loc[lambda x: [\"A\", \"C\"]]\n tm.assert_frame_equal(res, df.loc[[\"A\", \"C\"]])\n\n res = df.loc[lambda x: [\"A\", \"C\"]]\n tm.assert_frame_equal(res, df.loc[[\"A\", \"C\"]])\n\n res = df.loc[lambda x: [\"A\", \"C\"], :]\n tm.assert_frame_equal(res, df.loc[[\"A\", \"C\"], :])\n\n res = df.loc[lambda x: [\"A\", \"C\"], lambda x: \"X\"]\n tm.assert_series_equal(res, df.loc[[\"A\", \"C\"], \"X\"])\n\n res = df.loc[lambda x: [\"A\", \"C\"], lambda x: [\"X\"]]\n tm.assert_frame_equal(res, df.loc[[\"A\", \"C\"], [\"X\"]])\n\n # mixture\n res = df.loc[[\"A\", \"C\"], lambda x: \"X\"]\n tm.assert_series_equal(res, df.loc[[\"A\", \"C\"], \"X\"])\n\n res = df.loc[[\"A\", \"C\"], lambda x: [\"X\"]]\n tm.assert_frame_equal(res, df.loc[[\"A\", \"C\"], [\"X\"]])\n\n res = df.loc[lambda x: [\"A\", \"C\"], \"X\"]\n tm.assert_series_equal(res, df.loc[[\"A\", \"C\"], \"X\"])\n\n res = df.loc[lambda x: [\"A\", \"C\"], [\"X\"]]\n tm.assert_frame_equal(res, df.loc[[\"A\", \"C\"], [\"X\"]])\n\n def test_frame_loc_setitem_callable(self):\n # GH#11485\n df = DataFrame({\"X\": [1, 2, 3, 4], \"Y\": list(\"aabb\")}, index=list(\"ABCD\"))\n\n # return label\n res = df.copy()\n res.loc[lambda x: [\"A\", \"C\"]] = -20\n exp = df.copy()\n exp.loc[[\"A\", \"C\"]] = -20\n tm.assert_frame_equal(res, exp)\n\n res = df.copy()\n res.loc[lambda x: [\"A\", \"C\"], :] = 20\n exp = df.copy()\n exp.loc[[\"A\", \"C\"], :] = 20\n tm.assert_frame_equal(res, exp)\n\n res = df.copy()\n res.loc[lambda x: [\"A\", \"C\"], lambda x: \"X\"] = -1\n exp = df.copy()\n exp.loc[[\"A\", \"C\"], \"X\"] = -1\n tm.assert_frame_equal(res, exp)\n\n res = df.copy()\n res.loc[lambda x: [\"A\", \"C\"], lambda x: [\"X\"]] = [5, 10]\n exp = df.copy()\n exp.loc[[\"A\", \"C\"], [\"X\"]] = [5, 10]\n tm.assert_frame_equal(res, exp)\n\n # mixture\n res = df.copy()\n res.loc[[\"A\", \"C\"], lambda x: \"X\"] = np.array([-1, -2])\n exp = df.copy()\n exp.loc[[\"A\", \"C\"], \"X\"] = np.array([-1, -2])\n tm.assert_frame_equal(res, exp)\n\n res = df.copy()\n res.loc[[\"A\", \"C\"], lambda x: [\"X\"]] = 10\n exp = df.copy()\n exp.loc[[\"A\", \"C\"], [\"X\"]] = 10\n tm.assert_frame_equal(res, exp)\n\n res = df.copy()\n res.loc[lambda x: [\"A\", \"C\"], \"X\"] = -2\n exp = df.copy()\n exp.loc[[\"A\", \"C\"], \"X\"] = -2\n tm.assert_frame_equal(res, exp)\n\n res = df.copy()\n res.loc[lambda x: [\"A\", \"C\"], [\"X\"]] = -4\n exp = df.copy()\n exp.loc[[\"A\", \"C\"], [\"X\"]] = -4\n tm.assert_frame_equal(res, exp)\n\n\nclass TestPartialStringSlicing:\n def test_loc_getitem_partial_string_slicing_datetimeindex(self):\n # GH#35509\n df = DataFrame(\n {\"col1\": [\"a\", \"b\", \"c\"], \"col2\": [1, 2, 3]},\n index=to_datetime([\"2020-08-01\", \"2020-07-02\", \"2020-08-05\"]),\n )\n expected = DataFrame(\n {\"col1\": [\"a\", \"c\"], \"col2\": [1, 3]},\n index=to_datetime([\"2020-08-01\", \"2020-08-05\"]),\n )\n result = df.loc[\"2020-08\"]\n tm.assert_frame_equal(result, expected)\n\n def test_loc_getitem_partial_string_slicing_with_periodindex(self):\n pi = pd.period_range(start=\"2017-01-01\", end=\"2018-01-01\", freq=\"M\")\n ser = pi.to_series()\n result = ser.loc[:\"2017-12\"]\n expected = ser.iloc[:-1]\n\n tm.assert_series_equal(result, expected)\n\n def test_loc_getitem_partial_string_slicing_with_timedeltaindex(self):\n ix = timedelta_range(start=\"1 day\", end=\"2 days\", freq=\"1H\")\n ser = ix.to_series()\n result = ser.loc[:\"1 days\"]\n expected = ser.iloc[:-1]\n\n tm.assert_series_equal(result, expected)\n\n def test_loc_getitem_str_timedeltaindex(self):\n # GH#16896\n df = DataFrame({\"x\": range(3)}, index=to_timedelta(range(3), unit=\"days\"))\n expected = df.iloc[0]\n sliced = df.loc[\"0 days\"]\n tm.assert_series_equal(sliced, expected)\n\n @pytest.mark.parametrize(\"indexer_end\", [None, \"2020-01-02 23:59:59.999999999\"])\n def test_loc_getitem_partial_slice_non_monotonicity(\n self, tz_aware_fixture, indexer_end, frame_or_series\n ):\n # GH#33146\n obj = frame_or_series(\n [1] * 5,\n index=DatetimeIndex(\n [\n Timestamp(\"2019-12-30\"),\n Timestamp(\"2020-01-01\"),\n Timestamp(\"2019-12-25\"),\n Timestamp(\"2020-01-02 23:59:59.999999999\"),\n Timestamp(\"2019-12-19\"),\n ],\n tz=tz_aware_fixture,\n ),\n )\n expected = frame_or_series(\n [1] * 2,\n index=DatetimeIndex(\n [\n Timestamp(\"2020-01-01\"),\n Timestamp(\"2020-01-02 23:59:59.999999999\"),\n ],\n tz=tz_aware_fixture,\n ),\n )\n indexer = slice(\"2020-01-01\", indexer_end)\n\n result = obj[indexer]\n tm.assert_equal(result, expected)\n\n result = obj.loc[indexer]\n tm.assert_equal(result, expected)\n\n\nclass TestLabelSlicing:\n def test_loc_getitem_label_slice_across_dst(self):\n # GH#21846\n idx = date_range(\n \"2017-10-29 01:30:00\", tz=\"Europe/Berlin\", periods=5, freq=\"30 min\"\n )\n series2 = Series([0, 1, 2, 3, 4], index=idx)\n\n t_1 = Timestamp(\"2017-10-29 02:30:00+02:00\", tz=\"Europe/Berlin\", freq=\"30min\")\n t_2 = Timestamp(\"2017-10-29 02:00:00+01:00\", tz=\"Europe/Berlin\", freq=\"30min\")\n result = series2.loc[t_1:t_2]\n expected = Series([2, 3], index=idx[2:4])\n tm.assert_series_equal(result, expected)\n\n result = series2[t_1]\n expected = 2\n assert result == expected\n\n def test_loc_getitem_label_slice_period(self):\n ix = pd.period_range(start=\"2017-01-01\", end=\"2018-01-01\", freq=\"M\")\n ser = ix.to_series()\n result = ser.loc[: ix[-2]]\n expected = ser.iloc[:-1]\n\n tm.assert_series_equal(result, expected)\n\n def test_loc_getitem_label_slice_timedelta64(self):\n ix = timedelta_range(start=\"1 day\", end=\"2 days\", freq=\"1H\")\n ser = ix.to_series()\n result = ser.loc[: ix[-2]]\n expected = ser.iloc[:-1]\n\n tm.assert_series_equal(result, expected)\n\n def test_loc_getitem_slice_floats_inexact(self):\n index = [52195.504153, 52196.303147, 52198.369883]\n df = DataFrame(np.random.rand(3, 2), index=index)\n\n s1 = df.loc[52195.1:52196.5]\n assert len(s1) == 2\n\n s1 = df.loc[52195.1:52196.6]\n assert len(s1) == 2\n\n s1 = df.loc[52195.1:52198.9]\n assert len(s1) == 3\n\n def test_loc_getitem_float_slice_float64index(self):\n ser = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float))\n\n assert len(ser.loc[12.0:]) == 8\n assert len(ser.loc[12.5:]) == 7\n\n idx = np.arange(10, 20, dtype=float)\n idx[2] = 12.2\n ser.index = idx\n assert len(ser.loc[12.0:]) == 8\n assert len(ser.loc[12.5:]) == 7\n\n @pytest.mark.parametrize(\n \"start,stop, expected_slice\",\n [\n [np.timedelta64(0, \"ns\"), None, slice(0, 11)],\n [np.timedelta64(1, \"D\"), np.timedelta64(6, \"D\"), slice(1, 7)],\n [None, np.timedelta64(4, \"D\"), slice(0, 5)],\n ],\n )\n def test_loc_getitem_slice_label_td64obj(self, start, stop, expected_slice):\n # GH#20393\n ser = Series(range(11), timedelta_range(\"0 days\", \"10 days\"))\n result = ser.loc[slice(start, stop)]\n expected = ser.iloc[expected_slice]\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\"start\", [\"2018\", \"2020\"])\n def test_loc_getitem_slice_unordered_dt_index(self, frame_or_series, start):\n obj = frame_or_series(\n [1, 2, 3],\n index=[Timestamp(\"2016\"), Timestamp(\"2019\"), Timestamp(\"2017\")],\n )\n with tm.assert_produces_warning(FutureWarning):\n obj.loc[start:\"2022\"]\n\n @pytest.mark.parametrize(\"value\", [1, 1.5])\n def test_loc_getitem_slice_labels_int_in_object_index(self, frame_or_series, value):\n # GH: 26491\n obj = frame_or_series(range(4), index=[value, \"first\", 2, \"third\"])\n result = obj.loc[value:\"third\"]\n expected = frame_or_series(range(4), index=[value, \"first\", 2, \"third\"])\n tm.assert_equal(result, expected)\n\n def test_loc_getitem_slice_columns_mixed_dtype(self):\n # GH: 20975\n df = DataFrame({\"test\": 1, 1: 2, 2: 3}, index=[0])\n expected = DataFrame(\n data=[[2, 3]], index=[0], columns=Index([1, 2], dtype=object)\n )\n tm.assert_frame_equal(df.loc[:, 1:], expected)\n\n\nclass TestLocBooleanMask:\n def test_loc_setitem_bool_mask_timedeltaindex(self):\n # GH#14946\n df = DataFrame({\"x\": range(10)})\n df.index = to_timedelta(range(10), unit=\"s\")\n conditions = [df[\"x\"] > 3, df[\"x\"] == 3, df[\"x\"] < 3]\n expected_data = [\n [0, 1, 2, 3, 10, 10, 10, 10, 10, 10],\n [0, 1, 2, 10, 4, 5, 6, 7, 8, 9],\n [10, 10, 10, 3, 4, 5, 6, 7, 8, 9],\n ]\n for cond, data in zip(conditions, expected_data):\n result = df.copy()\n result.loc[cond, \"x\"] = 10\n\n expected = DataFrame(\n data,\n index=to_timedelta(range(10), unit=\"s\"),\n columns=[\"x\"],\n dtype=\"int64\",\n )\n tm.assert_frame_equal(expected, result)\n\n def test_loc_setitem_mask_with_datetimeindex_tz(self):\n # GH#16889\n # support .loc with alignment and tz-aware DatetimeIndex\n mask = np.array([True, False, True, False])\n\n idx = date_range(\"20010101\", periods=4, tz=\"UTC\")\n df = DataFrame({\"a\": np.arange(4)}, index=idx).astype(\"float64\")\n\n result = df.copy()\n result.loc[mask, :] = df.loc[mask, :]\n tm.assert_frame_equal(result, df)\n\n result = df.copy()\n result.loc[mask] = df.loc[mask]\n tm.assert_frame_equal(result, df)\n\n idx = date_range(\"20010101\", periods=4)\n df = DataFrame({\"a\": np.arange(4)}, index=idx).astype(\"float64\")\n\n result = df.copy()\n result.loc[mask, :] = df.loc[mask, :]\n tm.assert_frame_equal(result, df)\n\n result = df.copy()\n result.loc[mask] = df.loc[mask]\n tm.assert_frame_equal(result, df)\n\n def test_loc_setitem_mask_and_label_with_datetimeindex(self):\n # GH#9478\n # a datetimeindex alignment issue with partial setting\n df = DataFrame(\n np.arange(6.0).reshape(3, 2),\n columns=list(\"AB\"),\n index=date_range(\"1/1/2000\", periods=3, freq=\"1H\"),\n )\n expected = df.copy()\n expected[\"C\"] = [expected.index[0]] + [pd.NaT, pd.NaT]\n\n mask = df.A < 1\n df.loc[mask, \"C\"] = df.loc[mask].index\n tm.assert_frame_equal(df, expected)\n\n def test_loc_setitem_mask_td64_series_value(self):\n # GH#23462 key list of bools, value is a Series\n td1 = Timedelta(0)\n td2 = Timedelta(28767471428571405)\n df = DataFrame({\"col\": Series([td1, td2])})\n df_copy = df.copy()\n ser = Series([td1])\n\n expected = df[\"col\"].iloc[1].value\n df.loc[[True, False]] = ser\n result = df[\"col\"].iloc[1].value\n\n assert expected == result\n tm.assert_frame_equal(df, df_copy)\n\n\nclass TestLocListlike:\n @pytest.mark.parametrize(\"box\", [lambda x: x, np.asarray, list])\n def test_loc_getitem_list_of_labels_categoricalindex_with_na(self, box):\n # passing a list can include valid categories _or_ NA values\n ci = CategoricalIndex([\"A\", \"B\", np.nan])\n ser = Series(range(3), index=ci)\n\n result = ser.loc[box(ci)]\n tm.assert_series_equal(result, ser)\n\n result = ser[box(ci)]\n tm.assert_series_equal(result, ser)\n\n result = ser.to_frame().loc[box(ci)]\n tm.assert_frame_equal(result, ser.to_frame())\n\n ser2 = ser[:-1]\n ci2 = ci[1:]\n # but if there are no NAs present, this should raise KeyError\n msg = (\n r\"Passing list-likes to .loc or \\[\\] with any missing labels is no \"\n \"longer supported. The following labels were missing: \"\n r\"(Categorical)?Index\\(\\[nan\\], .*\\). \"\n \"See https\"\n )\n with pytest.raises(KeyError, match=msg):\n ser2.loc[box(ci2)]\n\n with pytest.raises(KeyError, match=msg):\n ser2[box(ci2)]\n\n with pytest.raises(KeyError, match=msg):\n ser2.to_frame().loc[box(ci2)]\n\n\ndef test_series_loc_getitem_label_list_missing_values():\n # gh-11428\n key = np.array(\n [\"2001-01-04\", \"2001-01-02\", \"2001-01-04\", \"2001-01-14\"], dtype=\"datetime64\"\n )\n s = Series([2, 5, 8, 11], date_range(\"2001-01-01\", freq=\"D\", periods=4))\n with pytest.raises(KeyError, match=\"with any missing labels\"):\n s.loc[key]\n\n\ndef test_series_getitem_label_list_missing_integer_values():\n # GH: 25927\n s = Series(\n index=np.array([9730701000001104, 10049011000001109]),\n data=np.array([999000011000001104, 999000011000001104]),\n )\n with pytest.raises(KeyError, match=\"with any missing labels\"):\n s.loc[np.array([9730701000001104, 10047311000001102])]\n\n\[email protected](\n \"columns, column_key, expected_columns\",\n [\n ([2011, 2012, 2013], [2011, 2012], [0, 1]),\n ([2011, 2012, \"All\"], [2011, 2012], [0, 1]),\n ([2011, 2012, \"All\"], [2011, \"All\"], [0, 2]),\n ],\n)\ndef test_loc_getitem_label_list_integer_labels(columns, column_key, expected_columns):\n # gh-14836\n df = DataFrame(np.random.rand(3, 3), columns=columns, index=list(\"ABC\"))\n expected = df.iloc[:, expected_columns]\n result = df.loc[[\"A\", \"B\", \"C\"], column_key]\n\n if df.columns.is_object() and all(isinstance(x, int) for x in column_key):\n expected.columns = expected.columns.astype(int)\n\n tm.assert_frame_equal(result, expected, check_column_type=True)\n\n\ndef test_loc_setitem_float_intindex():\n # GH 8720\n rand_data = np.random.randn(8, 4)\n result = DataFrame(rand_data)\n result.loc[:, 0.5] = np.nan\n expected_data = np.hstack((rand_data, np.array([np.nan] * 8).reshape(8, 1)))\n expected = DataFrame(expected_data, columns=[0.0, 1.0, 2.0, 3.0, 0.5])\n tm.assert_frame_equal(result, expected)\n\n result = DataFrame(rand_data)\n result.loc[:, 0.5] = np.nan\n tm.assert_frame_equal(result, expected)\n\n\ndef test_loc_axis_1_slice():\n # GH 10586\n cols = [(yr, m) for yr in [2014, 2015] for m in [7, 8, 9, 10]]\n df = DataFrame(\n np.ones((10, 8)),\n index=tuple(\"ABCDEFGHIJ\"),\n columns=pd.MultiIndex.from_tuples(cols),\n )\n result = df.loc(axis=1)[(2014, 9):(2015, 8)]\n expected = DataFrame(\n np.ones((10, 4)),\n index=tuple(\"ABCDEFGHIJ\"),\n columns=pd.MultiIndex.from_tuples(\n [(2014, 9), (2014, 10), (2015, 7), (2015, 8)]\n ),\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_loc_set_dataframe_multiindex():\n # GH 14592\n expected = DataFrame(\n \"a\", index=range(2), columns=pd.MultiIndex.from_product([range(2), range(2)])\n )\n result = expected.copy()\n result.loc[0, [(0, 1)]] = result.loc[0, [(0, 1)]]\n tm.assert_frame_equal(result, expected)\n\n\ndef test_loc_mixed_int_float():\n # GH#19456\n ser = Series(range(2), Index([1, 2.0], dtype=object))\n\n result = ser.loc[1]\n assert result == 0\n\n\ndef test_loc_with_positional_slice_deprecation():\n # GH#31840\n ser = Series(range(4), index=[\"A\", \"B\", \"C\", \"D\"])\n\n with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):\n ser.loc[:3] = 2\n\n expected = Series([2, 2, 2, 3], index=[\"A\", \"B\", \"C\", \"D\"])\n tm.assert_series_equal(ser, expected)\n\n\ndef test_loc_slice_disallows_positional():\n # GH#16121, GH#24612, GH#31810\n dti = pd.date_range(\"2016-01-01\", periods=3)\n df = DataFrame(np.random.random((3, 2)), index=dti)\n\n ser = df[0]\n\n msg = (\n \"cannot do slice indexing on DatetimeIndex with these \"\n r\"indexers \\[1\\] of type int\"\n )\n\n for obj in [df, ser]:\n with pytest.raises(TypeError, match=msg):\n obj.loc[1:3]\n\n with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):\n # GH#31840 deprecated incorrect behavior\n obj.loc[1:3] = 1\n\n with pytest.raises(TypeError, match=msg):\n df.loc[1:3, 1]\n\n with tm.assert_produces_warning(FutureWarning):\n # GH#31840 deprecated incorrect behavior\n df.loc[1:3, 1] = 2\n\n\ndef test_loc_datetimelike_mismatched_dtypes():\n # GH#32650 dont mix and match datetime/timedelta/period dtypes\n\n df = DataFrame(\n np.random.randn(5, 3),\n columns=[\"a\", \"b\", \"c\"],\n index=pd.date_range(\"2012\", freq=\"H\", periods=5),\n )\n # create dataframe with non-unique DatetimeIndex\n df = df.iloc[[0, 2, 2, 3]].copy()\n\n dti = df.index\n tdi = pd.TimedeltaIndex(dti.asi8) # matching i8 values\n\n msg = r\"None of \\[TimedeltaIndex.* are in the \\[index\\]\"\n with pytest.raises(KeyError, match=msg):\n df.loc[tdi]\n\n with pytest.raises(KeyError, match=msg):\n df[\"a\"].loc[tdi]\n\n\ndef test_loc_with_period_index_indexer():\n # GH#4125\n idx = pd.period_range(\"2002-01\", \"2003-12\", freq=\"M\")\n df = DataFrame(np.random.randn(24, 10), index=idx)\n tm.assert_frame_equal(df, df.loc[idx])\n tm.assert_frame_equal(df, df.loc[list(idx)])\n tm.assert_frame_equal(df, df.loc[list(idx)])\n tm.assert_frame_equal(df.iloc[0:5], df.loc[idx[0:5]])\n tm.assert_frame_equal(df, df.loc[list(idx)])\n\n\nclass TestLocSeries:\n @pytest.mark.parametrize(\"val,expected\", [(2 ** 63 - 1, 3), (2 ** 63, 4)])\n def test_loc_uint64(self, val, expected):\n # see GH#19399\n ser = Series({2 ** 63 - 1: 3, 2 ** 63: 4})\n assert ser.loc[val] == expected\n\n def test_loc_getitem(self, string_series, datetime_series):\n inds = string_series.index[[3, 4, 7]]\n tm.assert_series_equal(string_series.loc[inds], string_series.reindex(inds))\n tm.assert_series_equal(string_series.iloc[5::2], string_series[5::2])\n\n # slice with indices\n d1, d2 = datetime_series.index[[5, 15]]\n result = datetime_series.loc[d1:d2]\n expected = datetime_series.truncate(d1, d2)\n tm.assert_series_equal(result, expected)\n\n # boolean\n mask = string_series > string_series.median()\n tm.assert_series_equal(string_series.loc[mask], string_series[mask])\n\n # ask for index value\n assert datetime_series.loc[d1] == datetime_series[d1]\n assert datetime_series.loc[d2] == datetime_series[d2]\n\n def test_loc_getitem_not_monotonic(self, datetime_series):\n d1, d2 = datetime_series.index[[5, 15]]\n\n ts2 = datetime_series[::2][[1, 2, 0]]\n\n msg = r\"Timestamp\\('2000-01-10 00:00:00'\\)\"\n with pytest.raises(KeyError, match=msg):\n ts2.loc[d1:d2]\n with pytest.raises(KeyError, match=msg):\n ts2.loc[d1:d2] = 0\n\n def test_loc_getitem_setitem_integer_slice_keyerrors(self):\n ser = Series(np.random.randn(10), index=list(range(0, 20, 2)))\n\n # this is OK\n cp = ser.copy()\n cp.iloc[4:10] = 0\n assert (cp.iloc[4:10] == 0).all()\n\n # so is this\n cp = ser.copy()\n cp.iloc[3:11] = 0\n assert (cp.iloc[3:11] == 0).values.all()\n\n result = ser.iloc[2:6]\n result2 = ser.loc[3:11]\n expected = ser.reindex([4, 6, 8, 10])\n\n tm.assert_series_equal(result, expected)\n tm.assert_series_equal(result2, expected)\n\n # non-monotonic, raise KeyError\n s2 = ser.iloc[list(range(5)) + list(range(9, 4, -1))]\n with pytest.raises(KeyError, match=r\"^3$\"):\n s2.loc[3:11]\n with pytest.raises(KeyError, match=r\"^3$\"):\n s2.loc[3:11] = 0\n\n def test_loc_getitem_iterator(self, string_series):\n idx = iter(string_series.index[:10])\n result = string_series.loc[idx]\n tm.assert_series_equal(result, string_series[:10])\n\n def test_loc_setitem_boolean(self, string_series):\n mask = string_series > string_series.median()\n\n result = string_series.copy()\n result.loc[mask] = 0\n expected = string_series\n expected[mask] = 0\n tm.assert_series_equal(result, expected)\n\n def test_loc_setitem_corner(self, string_series):\n inds = list(string_series.index[[5, 8, 12]])\n string_series.loc[inds] = 5\n msg = r\"\\['foo'\\] not in index\"\n with pytest.raises(KeyError, match=msg):\n string_series.loc[inds + [\"foo\"]] = 5\n\n def test_basic_setitem_with_labels(self, datetime_series):\n indices = datetime_series.index[[5, 10, 15]]\n\n cp = datetime_series.copy()\n exp = datetime_series.copy()\n cp[indices] = 0\n exp.loc[indices] = 0\n tm.assert_series_equal(cp, exp)\n\n cp = datetime_series.copy()\n exp = datetime_series.copy()\n cp[indices[0] : indices[2]] = 0\n exp.loc[indices[0] : indices[2]] = 0\n tm.assert_series_equal(cp, exp)\n\n def test_loc_setitem_listlike_of_ints(self):\n\n # integer indexes, be careful\n ser = Series(np.random.randn(10), index=list(range(0, 20, 2)))\n inds = [0, 4, 6]\n arr_inds = np.array([0, 4, 6])\n\n cp = ser.copy()\n exp = ser.copy()\n ser[inds] = 0\n ser.loc[inds] = 0\n tm.assert_series_equal(cp, exp)\n\n cp = ser.copy()\n exp = ser.copy()\n ser[arr_inds] = 0\n ser.loc[arr_inds] = 0\n tm.assert_series_equal(cp, exp)\n\n inds_notfound = [0, 4, 5, 6]\n arr_inds_notfound = np.array([0, 4, 5, 6])\n msg = r\"\\[5\\] not in index\"\n with pytest.raises(KeyError, match=msg):\n ser[inds_notfound] = 0\n with pytest.raises(Exception, match=msg):\n ser[arr_inds_notfound] = 0\n\n def test_loc_setitem_dt64tz_values(self):\n # GH#12089\n ser = Series(\n date_range(\"2011-01-01\", periods=3, tz=\"US/Eastern\"),\n index=[\"a\", \"b\", \"c\"],\n )\n s2 = ser.copy()\n expected = Timestamp(\"2011-01-03\", tz=\"US/Eastern\")\n s2.loc[\"a\"] = expected\n result = s2.loc[\"a\"]\n assert result == expected\n\n s2 = ser.copy()\n s2.iloc[0] = expected\n result = s2.iloc[0]\n assert result == expected\n\n s2 = ser.copy()\n s2[\"a\"] = expected\n result = s2[\"a\"]\n assert result == expected\n\n @pytest.mark.parametrize(\"array_fn\", [np.array, pd.array, list, tuple])\n @pytest.mark.parametrize(\"size\", [0, 4, 5, 6])\n def test_loc_iloc_setitem_with_listlike(self, size, array_fn):\n # GH37748\n # testing insertion, in a Series of size N (here 5), of a listlike object\n # of size 0, N-1, N, N+1\n\n arr = array_fn([0] * size)\n expected = Series([arr, 0, 0, 0, 0], index=list(\"abcde\"), dtype=object)\n\n ser = Series(0, index=list(\"abcde\"), dtype=object)\n ser.loc[\"a\"] = arr\n tm.assert_series_equal(ser, expected)\n\n ser = Series(0, index=list(\"abcde\"), dtype=object)\n ser.iloc[0] = arr\n tm.assert_series_equal(ser, expected)\n\n @pytest.mark.parametrize(\"indexer\", [IndexSlice[\"A\", :], (\"A\", slice(None))])\n def test_loc_series_getitem_too_many_dimensions(self, indexer):\n # GH#35349\n ser = Series(\n index=MultiIndex.from_tuples([(\"A\", \"0\"), (\"A\", \"1\"), (\"B\", \"0\")]),\n data=[21, 22, 23],\n )\n msg = \"Too many indices\"\n with pytest.raises(ValueError, match=msg):\n ser.loc[indexer, :]\n\n with pytest.raises(ValueError, match=msg):\n ser.loc[indexer, :] = 1\n" ]
[ [ "numpy.errstate", "pandas._testing.external_error_raised", "pandas._testing.assert_index_equal" ], [ "pandas.errors.InvalidIndexError", "pandas.core.indexes.extension.inherit_names", "pandas._libs.tslibs.Timedelta", "pandas._libs.tslibs.to_offset", "pandas.core.dtypes.common.is_timedelta64_dtype", "pandas.core.dtypes.common.is_scalar", "pandas.core.indexes.base.Index.get_loc", "pandas.core.arrays.timedeltas.TimedeltaArray._generate_range", "pandas.core.common.any_none", "pandas.core.indexes.base.maybe_extract_name", "pandas.core.arrays.timedeltas.TimedeltaArray._from_sequence_not_strict", "pandas.core.arrays.datetimelike.maybe_infer_freq" ], [ "pandas._testing.makeDateIndex", "pandas.date_range" ], [ "pandas.timedelta_range", "pandas.CategoricalIndex", "pandas.core.arrays.timedeltas.sequence_to_td64ns", "pandas.core.arrays.timedeltas.TimedeltaArray", "pandas.TimedeltaIndex", "numpy.asarray", "numpy.arange", "pandas.Categorical", "pandas.Index", "pandas.Timedelta", "numpy.datetime64", "numpy.timedelta64", "pandas.date_range", "pandas.to_timedelta", "pandas.offsets.Second", "numpy.array", "pandas.core.arrays.timedeltas.TimedeltaArray._from_sequence", "pandas._testing.assert_index_equal" ], [ "pandas.to_datetime", "pandas.Series", "pandas.api.types.is_scalar", "pandas.MultiIndex.from_tuples", "pandas.DataFrame", "numpy.random.random_sample", "numpy.random.randn", "pandas.DataFrame.sparse.from_spmatrix", "numpy.iinfo", "pandas._testing.assert_frame_equal", "pandas._testing.assert_numpy_array_equal", "numpy.arange", "numpy.eye", "pandas.Index", "pandas._testing.makeCustomDataframe", "pandas._testing.assert_series_equal", "pandas._testing.assert_index_equal", "pandas._testing.assert_produces_warning", "pandas.Categorical", "pandas.array", "pandas.Timedelta", "numpy.timedelta64", "pandas.MultiIndex.from_product", "numpy.random.rand", "pandas.date_range", "pandas.DataFrame.from_dict", "numpy.array", "pandas.SparseDtype", "pandas.timedelta_range", "pandas.CategoricalIndex", "numpy.random.random", "pandas.TimedeltaIndex", "numpy.random.seed", "pandas.period_range", "pandas._testing.assert_equal", "pandas.MultiIndex.from_arrays", "numpy.ones", "numpy.datetime64", "numpy.random.normal", "pandas.Timestamp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "0.24", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
maxwellzh/ST-NAS
[ "67d1b91cdc42a30a38fb540922b49c02a3b9c74e" ]
[ "ASR/scripts/toolkit/dataset_pickle.py" ]
[ "'''\nCopyright 2020 SPMI-CAT\nModified by Zheng Huahuan\n'''\nfrom torch.utils.data import Dataset\nimport kaldi_io\nimport numpy as np\nimport torch\nimport sys\nimport pickle\nsys.path.append('./ctc-crf')\n\n\nclass SpeechDataset(Dataset):\n def __init__(self, pickle_path):\n with open(pickle_path, 'rb') as f:\n # for python2\n # self.dataset = pickle.load(f)\n # for python3\n self.dataset = pickle.load(f, encoding='latin1')\n\n def __len__(self):\n return len(self.dataset)\n\n def __getitem__(self, idx):\n key, feature_path, label, weight = self.dataset[idx]\n mat = np.asarray(kaldi_io.read_mat(feature_path))\n return torch.FloatTensor(mat), torch.IntTensor(label), torch.FloatTensor(weight)\n\n\nclass SpeechDatasetMem(Dataset):\n def __init__(self, pickle_path):\n with open(pickle_path) as f:\n self.dataset = pickle.load(f)\n\n self.data_batch = []\n\n for data in self.dataset:\n key, feature_path, label, weight = data\n mat = np.asarray(kaldi_io.read_mat(feature_path))\n self.data_batch.append(\n [torch.FloatTensor(mat), torch.IntTensor(label), torch.FloatTensor(weight)])\n print(\"read all data into memory\")\n\n def __len__(self):\n return len(self.data_batch)\n\n def __getitem__(self, idx):\n return self.data_batch[idx]\n\n\ndef pad_tensor(t, pad_to_length, dim):\n pad_size = list(t.shape)\n pad_size[dim] = pad_to_length - t.size(dim)\n return torch.cat([t, torch.zeros(*pad_size).type_as(t)], dim=dim)\n\n\nclass PadCollate:\n def __init__(self):\n pass\n\n def __call__(self, batch):\n # batch: list of (mat, label, weight)\n # return: logits, input_lengths, label_padded, label_lengths, weights\n input_lengths = map(lambda x: x[0].size(0), batch)\n if sys.version > '3':\n input_lengths = list(input_lengths)\n max_input_length = max(input_lengths)\n label_lengths = map(lambda x: x[1].size(0), batch)\n if sys.version > '3':\n label_lengths = list(label_lengths)\n max_label_length = max(label_lengths)\n input_batch = map(lambda x: pad_tensor(\n x[0], max_input_length, 0), batch)\n label_batch = map(lambda x: pad_tensor(\n x[1], max_label_length, 0), batch)\n if sys.version > '3':\n input_batch = list(input_batch)\n label_batch = list(label_batch)\n logits = torch.stack(input_batch, dim=0)\n label_padded = torch.stack(label_batch, dim=0)\n input_lengths = torch.IntTensor(input_lengths)\n label_lengths = torch.IntTensor(label_lengths)\n weights = torch.FloatTensor([x[2] for x in batch])\n return logits, input_lengths, label_padded, label_lengths, weights\n" ]
[ [ "torch.stack", "torch.zeros", "torch.FloatTensor", "torch.IntTensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
DiegoHidalgoS/Kats
[ "48f95d141a6241f26f2fdac84418dba430407fb6" ]
[ "kats/detectors/prophet_detector.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\"\"\"\nThis module contains code to implement the Prophet algorithm\nas a Detector Model.\n\"\"\"\n\nfrom enum import Enum\nfrom typing import Optional\n\nimport numpy as np\nimport pandas as pd\n\ntry:\n from fbprophet import Prophet\n from fbprophet.serialize import model_from_json, model_to_json\n\n _no_prophet = False\nexcept ImportError:\n _no_prophet = True\n\nfrom kats.consts import TimeSeriesData\nfrom kats.detectors.detector import DetectorModel\nfrom kats.detectors.detector_consts import (\n AnomalyResponse,\n ConfidenceBand,\n)\n\nPROPHET_TIME_COLUMN = \"ds\"\nPROPHET_VALUE_COLUMN = \"y\"\nPROPHET_YHAT_COLUMN = \"yhat\"\nPROPHET_YHAT_LOWER_COLUMN = \"yhat_lower\"\nPROPHET_YHAT_UPPER_COLUMN = \"yhat_upper\"\n\nMIN_STDEV = 1e-9\n\n\ndef timeseries_to_prophet_df(ts_data: TimeSeriesData) -> pd.DataFrame:\n \"\"\"Converts a object of TimeSeriesData to a dataframe, as expected by Prophet.\n\n Args:\n ts_data: object of class TimeSeriesData.\n\n Returns:\n pandas DataFrame expected by Prophet.\n \"\"\"\n\n if not ts_data.is_univariate():\n raise ValueError(\"ProphetModel only works with univariate data\")\n\n return pd.DataFrame(\n {\n PROPHET_TIME_COLUMN: ts_data.time,\n PROPHET_VALUE_COLUMN: ts_data.value,\n }\n )\n\n\ndef deviation_from_predicted_val(\n data: TimeSeriesData,\n predict_df: pd.DataFrame,\n ci_threshold: Optional[float] = None,\n uncertainty_samples: Optional[float] = None,\n):\n return (data.value - predict_df[PROPHET_YHAT_COLUMN]) / predict_df[\n PROPHET_YHAT_COLUMN\n ].abs()\n\n\ndef z_score(\n data: TimeSeriesData,\n predict_df: pd.DataFrame,\n ci_threshold: float = 0.8,\n uncertainty_samples: float = 50,\n):\n # asymmetric confidence band => points above the prediction use upper bound in calculation, points below the prediction use lower bound\n\n actual_upper_std = (\n (uncertainty_samples ** 0.5)\n * (predict_df[PROPHET_YHAT_UPPER_COLUMN] - predict_df[PROPHET_YHAT_COLUMN])\n / ci_threshold\n )\n actual_lower_std = (\n (uncertainty_samples ** 0.5)\n * (predict_df[PROPHET_YHAT_COLUMN] - predict_df[PROPHET_YHAT_LOWER_COLUMN])\n / ci_threshold\n )\n\n # if std is 0, set it to a very small value to prevent division by zero in next step\n upper_std = np.maximum(actual_upper_std, MIN_STDEV)\n lower_std = np.maximum(actual_lower_std, MIN_STDEV)\n\n upper_score = (\n (data.value > predict_df[PROPHET_YHAT_COLUMN])\n * (data.value - predict_df[PROPHET_YHAT_COLUMN])\n / upper_std\n )\n lower_score = (\n (data.value < predict_df[PROPHET_YHAT_COLUMN])\n * (data.value - predict_df[PROPHET_YHAT_COLUMN])\n / lower_std\n )\n\n return upper_score + lower_score\n\n\nclass ProphetScoreFunction(Enum):\n deviation_from_predicted_val = \"deviation_from_predicted_val\"\n z_score = \"z_score\"\n\n\nSCORE_FUNC_DICT = {\n ProphetScoreFunction.deviation_from_predicted_val.value: deviation_from_predicted_val,\n ProphetScoreFunction.z_score.value: z_score,\n}\n\n\nclass ProphetDetectorModel(DetectorModel):\n \"\"\"Prophet based anomaly detection model.\n\n A Detector Model that does anomaly detection, by first using the Prophet\n library to forecast the interval for the next point, and comparing this\n to the actually observed data point.\n\n Attributes:\n scoring_confidence_interval: interval_width as required by Prophet.\n Confidence interval is used by some scoring strategies to compute\n anomaly scores.\n uncertainty_samples: Number of samples required by Prophet to\n calculate uncertainty.\n serialized_model: json, representing data from a previously\n serialized model.\n \"\"\"\n\n def __init__(\n self,\n serialized_model: Optional[bytes] = None,\n score_func: ProphetScoreFunction = ProphetScoreFunction.deviation_from_predicted_val,\n scoring_confidence_interval: float = 0.8,\n remove_outliers=False,\n outlier_threshold: float = 0.99,\n uncertainty_samples: float = 50,\n ) -> None:\n if _no_prophet:\n raise RuntimeError(\"requires fbprophet to be installed\")\n if serialized_model:\n self.model = model_from_json(serialized_model)\n else:\n self.model = None\n\n self.score_func = score_func\n self.scoring_confidence_interval = scoring_confidence_interval\n self.remove_outliers = remove_outliers\n self.outlier_threshold = outlier_threshold\n self.uncertainty_samples = uncertainty_samples\n\n def serialize(self) -> bytes:\n \"\"\"Serialize the model into a json.\n\n So it can be loaded later.\n\n Returns:\n json containing information of the model.\n \"\"\"\n return str.encode(model_to_json(self.model))\n\n # pyre-fixme[14]: `fit_predict` overrides method defined in `DetectorModel`\n # inconsistently.\n def fit_predict(\n self, data: TimeSeriesData, historical_data: Optional[TimeSeriesData] = None\n ) -> AnomalyResponse:\n \"\"\"Trains a model, and returns the anomaly scores.\n\n Returns the AnomalyResponse, when data is passed to it.\n\n Args:\n data: TimeSeriesData on which detection is run.\n historical_data: TimeSeriesData corresponding to history. History ends exactly where\n the data begins.\n\n Returns:\n AnomalyResponse object. The length of this object is same as data. The score property\n gives the score for anomaly.\n \"\"\"\n\n # train on historical, then predict on all data.\n # pyre-fixme[6]: Expected `TimeSeriesData` for 1st param but got\n # `Optional[TimeSeriesData]`.\n self.fit(data=historical_data, historical_data=None)\n return self.predict(data)\n\n # pyre-fixme[14]: `fit` overrides method defined in `DetectorModel` inconsistently.\n def fit(\n self, data: TimeSeriesData, historical_data: Optional[TimeSeriesData] = None\n ) -> None:\n \"\"\"Used to train a model.\n\n fit can be called during priming. We train a model using all the data passed in.\n\n Args:\n data: TimeSeriesData on which detection is run.\n historical_data: TimeSeriesData corresponding to history. History ends exactly where\n the data begins.\n\n Returns:\n None.\n \"\"\"\n\n if historical_data is None:\n total_data = data\n else:\n historical_data.extend(data)\n total_data = historical_data\n\n # No incremental training. Create a model and train from scratch\n self.model = Prophet(\n interval_width=self.scoring_confidence_interval,\n uncertainty_samples=self.uncertainty_samples,\n )\n\n data_df = timeseries_to_prophet_df(total_data)\n\n if self.remove_outliers:\n data_df = self._remove_outliers(data_df, self.outlier_threshold)\n\n self.model.fit(data_df)\n\n # pyre-fixme[14]: `predict` overrides method defined in `DetectorModel`\n # inconsistently.\n def predict(\n self,\n data: TimeSeriesData,\n historical_data: Optional[TimeSeriesData] = None,\n ) -> AnomalyResponse:\n \"\"\"Predicts anomaly score for future data.\n\n Predict only expects anomaly score for data. Prophet doesn't need historical_data.\n\n Args:\n data: TimeSeriesData on which detection is run\n historical_data: TimeSeriesData corresponding to history. History ends exactly where\n the data begins.\n\n Returns:\n AnomalyResponse object. The length of this obj.ect is same as data. The score property\n gives the score for anomaly.\n \"\"\"\n time_df = pd.DataFrame({PROPHET_TIME_COLUMN: data.time})\n predict_df = self.model.predict(time_df)\n zeros = np.zeros(len(data))\n response = AnomalyResponse(\n scores=TimeSeriesData(\n time=data.time,\n value=SCORE_FUNC_DICT[self.score_func.value](\n data=data,\n predict_df=predict_df,\n ci_threshold=self.model.interval_width,\n uncertainty_samples=self.uncertainty_samples,\n ),\n ),\n confidence_band=ConfidenceBand(\n upper=TimeSeriesData(\n time=data.time, value=predict_df[PROPHET_YHAT_UPPER_COLUMN]\n ),\n lower=TimeSeriesData(\n time=data.time, value=predict_df[PROPHET_YHAT_LOWER_COLUMN]\n ),\n ),\n predicted_ts=TimeSeriesData(\n time=data.time, value=predict_df[PROPHET_YHAT_COLUMN]\n ),\n anomaly_magnitude_ts=TimeSeriesData(time=data.time, value=pd.Series(zeros)),\n stat_sig_ts=TimeSeriesData(time=data.time, value=pd.Series(zeros)),\n )\n return response\n\n @staticmethod\n def _remove_outliers(\n ts_df: pd.DataFrame,\n outlier_ci_threshold: float = 0.99,\n uncertainty_samples: float = 50,\n ) -> pd.DataFrame:\n \"\"\"\n Remove outliers from the time series by fitting a Prophet model to the time series\n and stripping all points that fall outside the confidence interval of the predictions\n of the model.\n \"\"\"\n\n ts_dates_df = pd.DataFrame({PROPHET_TIME_COLUMN: ts_df.iloc[:, 0]})\n\n model = Prophet(\n interval_width=outlier_ci_threshold, uncertainty_samples=uncertainty_samples\n )\n model_pass1 = model.fit(ts_df)\n\n forecast = model_pass1.predict(ts_dates_df)\n\n is_outlier = (\n ts_df[PROPHET_VALUE_COLUMN] < forecast[PROPHET_YHAT_LOWER_COLUMN]\n ) | (ts_df[PROPHET_VALUE_COLUMN] > forecast[PROPHET_YHAT_UPPER_COLUMN])\n\n ts_df = ts_df[~is_outlier]\n\n return ts_df\n" ]
[ [ "numpy.maximum", "pandas.Series", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
MrHakimov/toy-store
[ "89b944d86273fe41215f4667d4ac6e9ea4566322" ]
[ "app.py" ]
[ "import datetime, json, re, numpy as np, simplejson, time\nfrom collections import defaultdict\nfrom sqlalchemy.ext.hybrid import hybrid_property\nfrom sqlalchemy.ext.mutable import Mutable\nfrom flask import Flask, request, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_marshmallow import Marshmallow\nfrom decimal import *\n\n# Init app\napp = Flask(__name__)\n\n# Database\napp.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://yandex:apidbpassword@localhost/api'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['JSONIFY_PRETTYPRINT_REGULAR'] = True\n\n# Init db\ndb = SQLAlchemy(app)\n\n# Init ma\nma = Marshmallow(app)\n\n\n# Check a non-empty string containing at least one letter or number, no more than 256\ndef noEmpty(field):\n return re.search(r'^[\\w\\d]+', field) and (0 < len(str(field)) < 257)\n\n\n# Check \"appartment\" field\ndef checkAparment(rs):\n try:\n if rs.__contains__('appartement') and rs['appartement'] > 0 and type(rs['appartement']) == int:\n return rs['appartement']\n elif rs.__contains__('apartment') and rs['apartment'] > 0 and type(rs['apartment']) == int:\n return rs['apartment']\n else:\n return False\n except:\n return False\n\n\n# Check Date format and validated\ndef checkDate(date_string):\n try:\n match = re.fullmatch(r'\\d\\d\\.\\d\\d\\.\\d{4}', date_string)\n if match:\n bDate = datetime.datetime.strptime(date_string, '%d.%m.%Y')\n nDate = datetime.datetime.utcnow()\n if bDate < nDate:\n return bDate.date()\n else:\n return False\n else:\n return False\n except:\n return False\n\n\n# Check relatives\ndef checkRelatives(rlt_dict):\n if len(rlt_dict) == 0: return True\n for key, val in rlt_dict.items():\n for i in val:\n if (key == i or rlt_dict.__contains__(i) == False or key not in rlt_dict[i]):\n return False\n rlt_dict[i].remove(key)\n return True\n\n\n# Check fields from POST/PATCH/GET\ndef checkFields(field):\n fields = ['town', 'street', 'building', 'apartment',\n 'appartement', 'name', 'birth_date', 'gender', 'relatives']\n for key in field:\n if key not in fields:\n return False\n return True\n\n\n# Check key\ndef checkKey(rs, key):\n if rs.__contains__(key):\n return True\n return False\n\n\n# Calculate age\ndef get_age(b_day):\n utc_n = datetime.datetime.utcnow()\n return utc_n.year - b_day.year - ((utc_n.month, utc_n.day) < (b_day.month, b_day.day))\n\n\n# Timeout Checker\ndef time_out(counter, limit=10000, timeout=10):\n counter += 1\n if counter > limit:\n time.sleep(timeout)\n counter = 0\n return counter\n\n\n# Class Mutable\nclass MutableList(Mutable, list):\n def append(self, value):\n list.append(self, value)\n self.changed()\n\n @classmethod\n def coerce(cls, key, value):\n if not isinstance(value, MutableList):\n if isinstance(value, list):\n return MutableList(value)\n return Mutable.coerce(key, value)\n else:\n return value\n\n\n# Import Class/Model\nclass Import(db.Model):\n import_id = db.Column(db.Integer, primary_key=True)\n created_date = db.Column(db.DateTime, default=db.func.current_timestamp())\n citizens = db.relationship('Citizen', backref='_import_id')\n\n\n# Import Schema\nclass ImportSchema(ma.Schema):\n class Meta:\n fields = ('import_id',)\n\n\n# Citizen Class/Model\nclass Citizen(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n import_id = db.Column(db.Integer, db.ForeignKey('import.import_id'))\n citizen_id = db.Column(db.Integer, nullable=False)\n town = db.Column(db.VARCHAR(256), nullable=False)\n street = db.Column(db.VARCHAR(256), nullable=False)\n building = db.Column(db.VARCHAR(256), nullable=False)\n apartment = db.Column(db.Integer, nullable=False)\n name = db.Column(db.VARCHAR(256), nullable=False)\n birth_date = db.Column(db.DATE, nullable=False)\n gender = db.Column(db.VARCHAR(6), nullable=False)\n relatives = db.Column(MutableList.as_mutable(db.ARRAY(db.Integer)))\n\n\n# Citizen Schema\nclass CitizenSchema(ma.Schema):\n class Meta:\n fields = ('citizen_id', 'town', 'street', 'building', 'apartment', 'name', 'birth_date', 'gender', 'relatives')\n\n\n# Init Schema\nimport_schema = ImportSchema()\ncitizen_schema = CitizenSchema()\ncitizens_schema = CitizenSchema(many=True)\n\n\n# Create a Import/Citizen\[email protected]('/imports', methods=['POST'])\ndef add_imports():\n try:\n new_import = Import()\n db.session.add(new_import)\n relatives_dict = {}\n cit_id, counter = [], 0\n for rs in request.json['citizens']:\n rs['apartment'] = checkAparment(rs)\n if (len(rs) == 9 and (rs['citizen_id'] > 0 and type(rs['citizen_id']) == int) and noEmpty(rs['town'])\n and noEmpty(rs['street']) and noEmpty(rs['building'])\n and rs['apartment'] and (\n re.fullmatch(r'^[\\D]+', rs['name'].strip()) and (0 < len(rs['name']) < 257))\n and (re.fullmatch(r'^female|male', rs['gender']))\n and (rs['relatives'] == [] or len(rs['relatives']) > 0)\n and (len(set(rs['relatives'])) == len(list(rs['relatives'])))\n and rs['citizen_id'] not in cit_id):\n\n if len(rs['relatives']) > 0: relatives_dict[rs['citizen_id']] = rs['relatives']\n\n db.session.add(Citizen(_import_id=new_import, citizen_id=rs['citizen_id'], town=rs['town'],\n street=rs['street'], building=rs['building'], apartment=rs['apartment'],\n name=rs['name'], birth_date=checkDate(rs['birth_date']), gender=rs['gender'],\n relatives=rs['relatives']))\n cit_id.append(rs['citizen_id'])\n counter = time_out(counter)\n else:\n return {}, 400\n\n del cit_id # delete citizen_id list\n\n if checkRelatives(relatives_dict):\n db.session.commit()\n return jsonify(data=import_schema.dump(new_import)), 201\n else:\n return {}, 400\n\n except:\n return {}, 400\n\n\n# PATCH - modified information about the specified dataset\[email protected]('/imports/<import_id>/citizens/<citizen_id>', methods=['PATCH'])\ndef update_citizen(import_id, citizen_id):\n try:\n rj = request.json\n if len(rj) > 0 and checkFields(rj):\n citizen = Citizen.query.filter_by(import_id=import_id, citizen_id=citizen_id).first()\n if citizen is None: return {}, 404\n checkS = True\n if checkS and checkKey(rj, 'town'):\n if noEmpty(rj['town']):\n citizen.town = rj['town']\n else:\n checkS = False\n\n if checkS and checkKey(rj, 'street'):\n if noEmpty(rj['street']):\n citizen.street = rj['street']\n else:\n checkS = False\n\n if checkS and checkKey(rj, 'building'):\n if noEmpty(rj['building']):\n citizen.building = rj['building']\n else:\n checkS = False\n\n if checkS and (checkKey(rj, 'appartement') or checkKey(rj, 'apartment')):\n apart = checkAparment(rj)\n if apart:\n citizen.apartment = apart\n else:\n checkS = False\n\n if checkS and checkKey(rj, 'name'):\n if re.fullmatch(r'^[\\D]+', rj['name'].strip()) and (0 < len(rj['name']) < 257):\n citizen.name = rj['name']\n else:\n checkS = False\n\n if checkS and checkKey(rj, 'birth_date'):\n birth_date = checkDate(rj['birth_date'])\n if bool(birth_date):\n citizen.birth_date = birth_date\n else:\n checkS = False\n\n if checkS and checkKey(rj, 'gender'):\n if re.fullmatch(r'^female|male', rj['gender']):\n citizen.gender = rj['gender']\n else:\n checkS = False\n\n if checkS and checkKey(rj, 'relatives'):\n if ((rj['relatives'] == [] or len(rj['relatives']) > 0)\n and len(set(rj['relatives'])) == len(rj['relatives'])\n and (citizen.citizen_id not in list(rj['relatives']))\n ):\n if rj['relatives'] != list(citizen.relatives):\n if rj['relatives'] == '[]':\n # remove citizen.citizen_id from relatives\n for v in citizen.relatives:\n cUpdt = Citizen.query.filter_by(import_id=import_id, citizen_id=v).first()\n l = list(cUpdt.relatives)\n l.remove(citizen.citizen_id)\n cUpdt.relatives = l\n # change citizen.relatives\n citizen.relatives = rj['relatives']\n else:\n # remove citizen.citizen_id from relatives\n for v in citizen.relatives:\n cUpdt = Citizen.query.filter_by(import_id=import_id, citizen_id=v).first()\n if cUpdt.citizen_id not in rj['relatives']:\n l = list(cUpdt.relatives)\n l.remove(citizen.citizen_id)\n cUpdt.relatives = l\n\n # add citizen.citizen_id to relatives\n for v in rj['relatives']:\n cUpdt = Citizen.query.filter_by(import_id=import_id, citizen_id=v).first()\n l = list(cUpdt.relatives)\n if citizen.citizen_id not in l:\n l.append(citizen.citizen_id)\n cUpdt.relatives = l\n\n # change citizen.relatives\n citizen.relatives = rj['relatives']\n\n else:\n checkS = False\n\n if checkS:\n db.session.commit()\n citizen.relatives = [int(i) for i in citizen.relatives]\n citizen.birth_date = citizen.birth_date.strftime('%d.%m.%Y')\n citizen.gender = citizen.gender.strip()\n return jsonify(data=citizen_schema.dump(citizen)), 200\n else:\n return {}, 400\n else:\n return {}, 400\n except:\n return {}, 400\n\n\n# GET - returns a list of all residents for the specified dataset\[email protected]('/imports/<import_id>/citizens', methods=['GET'])\ndef get_citizen(import_id):\n try:\n all_citizens = Citizen.query.filter_by(import_id=import_id).all()\n if all_citizens:\n counter = 0\n result = []\n for row in all_citizens:\n result.append(dict(citizen_id=row.citizen_id, town=row.town, street=row.street, building=row.building,\n apartment=row.apartment, name=row.name,\n birth_date=row.birth_date.strftime('%d.%m.%Y'),\n gender=row.gender.strip(), relatives=[int(i) for i in row.relatives]))\n counter = time_out(counter)\n return jsonify(data=result), 200\n return {}, 404\n except:\n return {}, 400\n\n\n# GET - /imports/$import_id/citizens/birthdays\[email protected]('/imports/<import_id>/citizens/birthdays')\ndef get_birthdays(import_id):\n try:\n sql = \"SELECT citizen_id AS id, date_part('month', birth_date) AS m_int, relatives \" \\\n \"FROM citizen WHERE import_id = %d AND array_length(relatives, 1) > 0;\" % int(import_id)\n result = db.session.execute(sql)\n\n rs, cnt = {}, {}\n counter = 0\n for row in result:\n rs[row[\"id\"]] = [[int(i) for i in row[\"relatives\"]], int(row[\"m_int\"])]\n cnt[row[\"id\"]] = [0 for i in range(12)]\n counter = time_out(counter)\n\n if (len(rs)):\n del result\n for k, v in rs.items():\n for i in v[0]:\n cnt[k][rs[i][1] - 1] += 1\n cnt[i][v[1] - 1] += 1\n rs[i][0].remove(k)\n rs[k][0].remove(i)\n d_moth = {1: [], 2: [], 3: [], 4: [], 5: [], 6: [],\n 7: [], 8: [], 9: [], 10: [], 11: [], 12: []}\n for k, v in cnt.items():\n for i in range(12):\n if v[i] > 0:\n d_moth[i + 1].append({\"citizen_id\": k, \"presents\": v[i]})\n return jsonify(data=d_moth), 200\n\n return {}, 404\n\n except:\n return {}, 400\n\n\n# GET - /imports/$import_id/towns/stat/percentile/age\[email protected]('/imports/<import_id>/towns/stat/percentile/age')\ndef get_percentile(import_id):\n try:\n sql = \"SELECT town, birth_date FROM citizen WHERE import_id = %d;\" % int(import_id)\n result = db.session.execute(sql)\n rs = defaultdict(list)\n counter = 0\n for row in result:\n rs[row['town']].append(get_age(row['birth_date']))\n counter = time_out(counter)\n\n if len(rs):\n percentile_town = [] # percentile by town\n for t, al in rs.items():\n current_dict = {'town': t}\n for i in [50, 75, 99]:\n current_dict[f'p{i}'] = float(\"%0.2f\" % (np.percentile(al, i, interpolation='linear')))\n percentile_town.append(current_dict)\n return jsonify(data=percentile_town), 200\n\n return {}, 404\n except:\n return {}, 400\n\n\n# Run Server\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8080, debug=True)\n" ]
[ [ "numpy.percentile" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JungHunOh/liif
[ "c4445bf4b4adf25cae66d760fc590b80e627f0d7" ]
[ "train_liif.py" ]
[ "\"\"\" Train for generating LIIF, from image to implicit representation.\r\n\r\n Config:\r\n train_dataset:\r\n dataset: $spec; wrapper: $spec; batch_size:\r\n val_dataset:\r\n dataset: $spec; wrapper: $spec; batch_size:\r\n (data_norm):\r\n inp: {sub: []; div: []}\r\n gt: {sub: []; div: []}\r\n (eval_type):\r\n (eval_bsize):\r\n\r\n model: $spec\r\n optimizer: $spec\r\n epoch_max:\r\n (multi_step_lr):\r\n milestones: []; gamma: 0.5\r\n (resume): *.pth\r\n\r\n (epoch_val): ; (epoch_save):\r\n\"\"\"\r\n\r\nimport argparse\r\nimport os\r\n\r\nimport yaml\r\nimport torch\r\nimport torch.nn as nn\r\nfrom tqdm import tqdm\r\nfrom torch.utils.data import DataLoader\r\nfrom torch.optim.lr_scheduler import MultiStepLR\r\n\r\nimport datasets\r\nimport models\r\nimport utils\r\nfrom test import eval_psnr\r\n\r\n\r\ndef make_data_loader(spec, tag=''):\r\n if spec is None:\r\n return None\r\n\r\n dataset = datasets.make(spec['dataset'])\r\n dataset = datasets.make(spec['wrapper'], args={'dataset': dataset})\r\n\r\n log('{} dataset: size={}'.format(tag, len(dataset)))\r\n for k, v in dataset[0].items():\r\n log(' {}: shape={}'.format(k, tuple(v.shape)))\r\n\r\n loader = DataLoader(dataset, batch_size=spec['batch_size'],\r\n shuffle=(tag == 'train'), num_workers=8, pin_memory=True)\r\n return loader\r\n\r\n\r\ndef make_data_loaders():\r\n train_loader = make_data_loader(config.get('train_dataset'), tag='train')\r\n val_loader = make_data_loader(config.get('val_dataset'), tag='val')\r\n return train_loader, val_loader\r\n\r\n\r\ndef prepare_training():\r\n if config.get('resume') is not None:\r\n sv_file = torch.load(config['resume'])\r\n model = models.make(sv_file['model'], load_sd=True).cuda()\r\n optimizer = utils.make_optimizer(\r\n model.parameters(), sv_file['optimizer'], load_sd=True)\r\n epoch_start = sv_file['epoch'] + 1\r\n if config.get('multi_step_lr') is None:\r\n lr_scheduler = None\r\n else:\r\n lr_scheduler = MultiStepLR(optimizer, **config['multi_step_lr'])\r\n for _ in range(epoch_start - 1):\r\n lr_scheduler.step()\r\n else:\r\n model = models.make(config['model']).cuda()\r\n optimizer = utils.make_optimizer(\r\n model.parameters(), config['optimizer'])\r\n epoch_start = 1\r\n if config.get('multi_step_lr') is None:\r\n lr_scheduler = None\r\n else:\r\n lr_scheduler = MultiStepLR(optimizer, **config['multi_step_lr'])\r\n\r\n log('model: #params={}'.format(utils.compute_num_params(model, text=True)))\r\n return model, optimizer, epoch_start, lr_scheduler\r\n\r\n\r\ndef train(train_loader, model, optimizer):\r\n model.train()\r\n loss_fn = nn.L1Loss()\r\n train_loss = utils.Averager()\r\n\r\n data_norm = config['data_norm']\r\n t = data_norm['inp']\r\n inp_sub = torch.FloatTensor(t['sub']).view(1, -1, 1, 1).cuda()\r\n inp_div = torch.FloatTensor(t['div']).view(1, -1, 1, 1).cuda()\r\n t = data_norm['gt']\r\n gt_sub = torch.FloatTensor(t['sub']).view(1, 1, -1).cuda()\r\n gt_div = torch.FloatTensor(t['div']).view(1, 1, -1).cuda()\r\n\r\n for batch in tqdm(train_loader, leave=False, desc='train'):\r\n for k, v in batch.items():\r\n batch[k] = v.cuda()\r\n\r\n inp = (batch['inp'] - inp_sub) / inp_div\r\n pred = model(inp, batch['coord'], batch['cell'])\r\n\r\n gt = (batch['gt'] - gt_sub) / gt_div\r\n loss = loss_fn(pred, gt)\r\n\r\n train_loss.add(loss.item())\r\n\r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n\r\n pred = None; loss = None\r\n\r\n return train_loss.item()\r\n\r\n\r\ndef main(config_, save_path):\r\n global config, log, writer\r\n config = config_\r\n log, writer = utils.set_save_path(save_path)\r\n with open(os.path.join(save_path, 'config.yaml'), 'w') as f:\r\n yaml.dump(config, f, sort_keys=False)\r\n\r\n train_loader, val_loader = make_data_loaders()\r\n if config.get('data_norm') is None:\r\n config['data_norm'] = {\r\n 'inp': {'sub': [0], 'div': [1]},\r\n 'gt': {'sub': [0], 'div': [1]}\r\n }\r\n\r\n model, optimizer, epoch_start, lr_scheduler = prepare_training()\r\n\r\n n_gpus = len(os.environ['CUDA_VISIBLE_DEVICES'].split(','))\r\n if n_gpus > 1:\r\n model = nn.parallel.DataParallel(model)\r\n\r\n epoch_max = config['epoch_max']\r\n epoch_val = config.get('epoch_val')\r\n epoch_save = config.get('epoch_save')\r\n max_val_v = -1e18\r\n\r\n timer = utils.Timer()\r\n\r\n for epoch in range(epoch_start, epoch_max + 1):\r\n t_epoch_start = timer.t()\r\n log_info = ['epoch {}/{}'.format(epoch, epoch_max)]\r\n\r\n writer.add_scalar('lr', optimizer.param_groups[0]['lr'], epoch)\r\n\r\n train_loss = train(train_loader, model, optimizer)\r\n if lr_scheduler is not None:\r\n lr_scheduler.step()\r\n\r\n log_info.append('train: loss={:.4f}'.format(train_loss))\r\n writer.add_scalars('loss', {'train': train_loss}, epoch)\r\n\r\n if n_gpus > 1:\r\n model_ = model.module\r\n else:\r\n model_ = model\r\n model_spec = config['model']\r\n model_spec['sd'] = model_.state_dict()\r\n optimizer_spec = config['optimizer']\r\n optimizer_spec['sd'] = optimizer.state_dict()\r\n sv_file = {\r\n 'model': model_spec,\r\n 'optimizer': optimizer_spec,\r\n 'epoch': epoch\r\n }\r\n\r\n torch.save(sv_file, os.path.join(save_path, 'epoch-last.pth'))\r\n\r\n if (epoch_save is not None) and (epoch % epoch_save == 0):\r\n torch.save(sv_file,\r\n os.path.join(save_path, 'epoch-{}.pth'.format(epoch)))\r\n\r\n if (epoch_val is not None) and (epoch % epoch_val == 0):\r\n if n_gpus > 1 and (config.get('eval_bsize') is not None):\r\n model_ = model.module\r\n else:\r\n model_ = model\r\n val_res = eval_psnr(val_loader, model_,\r\n data_norm=config['data_norm'],\r\n eval_type=config.get('eval_type'),\r\n eval_bsize=config.get('eval_bsize'))\r\n\r\n log_info.append('val: psnr={:.4f}'.format(val_res))\r\n writer.add_scalars('psnr', {'val': val_res}, epoch)\r\n if val_res > max_val_v:\r\n max_val_v = val_res\r\n torch.save(sv_file, os.path.join(save_path, 'epoch-best.pth'))\r\n\r\n t = timer.t()\r\n prog = (epoch - epoch_start + 1) / (epoch_max - epoch_start + 1)\r\n t_epoch = utils.time_text(t - t_epoch_start)\r\n t_elapsed, t_all = utils.time_text(t), utils.time_text(t / prog)\r\n log_info.append('{} {}/{}'.format(t_epoch, t_elapsed, t_all))\r\n\r\n log(', '.join(log_info))\r\n writer.flush()\r\n\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--config')\r\n parser.add_argument('--name', default=None)\r\n parser.add_argument('--tag', default=None)\r\n parser.add_argument('--gpu', default='0')\r\n args = parser.parse_args()\r\n\r\n os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu\r\n\r\n with open(args.config, 'r') as f:\r\n config = yaml.load(f, Loader=yaml.FullLoader)\r\n print('config loaded.')\r\n\r\n save_name = args.name\r\n if save_name is None:\r\n save_name = '_' + args.config.split('/')[-1][:-len('.yaml')]\r\n if args.tag is not None:\r\n save_name += '_' + args.tag\r\n save_path = os.path.join('./save', save_name)\r\n\r\n main(config, save_path)\r\n" ]
[ [ "torch.optim.lr_scheduler.MultiStepLR", "torch.nn.parallel.DataParallel", "torch.load", "torch.utils.data.DataLoader", "torch.FloatTensor", "torch.nn.L1Loss" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vandbt/pyblur
[ "5a31b6ced59a4cdef30eafebf0f7c27135d19d34" ]
[ "LinearMotionBlur.py" ]
[ "# -*- coding: utf-8 -*-\r\nimport math\r\nimport numpy as np\r\nfrom PIL import Image\r\nfrom scipy.signal import convolve2d\r\nfrom skimage.draw import line\r\n\r\nfrom LineDictionary import LineDictionary\r\n\r\nlineLengths =[3,5,7,9]\r\nlineTypes = [\"full\", \"right\", \"left\"]\r\n\r\nlineDict = LineDictionary()\r\n\r\ndef LinearMotionBlur_random(img):\r\n lineLengthIdx = np.random.randint(0, len(lineLengths))\r\n lineTypeIdx = np.random.randint(0, len(lineTypes)) \r\n lineLength = lineLengths[lineLengthIdx]\r\n lineType = lineTypes[lineTypeIdx]\r\n lineAngle = randomAngle(lineLength)\r\n return LinearMotionBlur(img, lineLength, lineAngle, lineType)\r\n\r\ndef LinearMotionBlur(img, dim, angle, linetype):\r\n imgarray = np.array(img, dtype=\"float32\")\r\n kernel = LineKernel(dim, angle, linetype)\r\n if imgarray.ndim==3 and imgarray.shape[-1]==3:\r\n convolved = np.stack([convolve2d(imgarray[...,channel_id], \r\n kernel, mode='same', \r\n fillvalue=255.0).astype(\"uint8\") \r\n for channel_id in range(3)], axis=2)\r\n else:\r\n convolved = convolve2d(imgarray, kernel, mode='same', fillvalue=255.0).astype(\"uint8\")\r\n img = Image.fromarray(convolved)\r\n return img\r\n\r\ndef LineKernel(dim, angle, linetype):\r\n kernelwidth = dim\r\n kernelCenter = int(math.floor(dim/2))\r\n angle = SanitizeAngleValue(kernelCenter, angle)\r\n kernel = np.zeros((kernelwidth, kernelwidth), dtype=np.float32)\r\n lineAnchors = lineDict.lines[dim][angle]\r\n if(linetype == 'right'):\r\n lineAnchors[0] = kernelCenter\r\n lineAnchors[1] = kernelCenter\r\n if(linetype == 'left'):\r\n lineAnchors[2] = kernelCenter\r\n lineAnchors[3] = kernelCenter\r\n rr,cc = line(lineAnchors[0], lineAnchors[1], lineAnchors[2], lineAnchors[3])\r\n kernel[rr,cc]=1\r\n normalizationFactor = np.count_nonzero(kernel)\r\n kernel = kernel / normalizationFactor \r\n return kernel\r\n\r\ndef SanitizeAngleValue(kernelCenter, angle):\r\n numDistinctLines = kernelCenter * 4\r\n angle = math.fmod(angle, 180.0)\r\n validLineAngles = np.linspace(0,180, numDistinctLines, endpoint = False)\r\n angle = nearestValue(angle, validLineAngles)\r\n return angle\r\n\r\ndef nearestValue(theta, validAngles):\r\n idx = (np.abs(validAngles-theta)).argmin()\r\n return validAngles[idx]\r\n\r\ndef randomAngle(kerneldim):\r\n kernelCenter = int(math.floor(kerneldim/2))\r\n numDistinctLines = kernelCenter * 4\r\n validLineAngles = np.linspace(0,180, numDistinctLines, endpoint = False)\r\n angleIdx = np.random.randint(0, len(validLineAngles))\r\n return int(validLineAngles[angleIdx])" ]
[ [ "numpy.abs", "numpy.linspace", "scipy.signal.convolve2d", "numpy.count_nonzero", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
ScriptBox99/deepmind-sonnet
[ "5cbfdc356962d9b6198d5b63f0826a80acfdf35b", "5cbfdc356962d9b6198d5b63f0826a80acfdf35b", "5cbfdc356962d9b6198d5b63f0826a80acfdf35b" ]
[ "sonnet/src/conformance/pickle_test.py", "sonnet/src/functional/jax_test.py", "sonnet/src/depthwise_conv_test.py" ]
[ "# Copyright 2019 The Sonnet Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests pickling Sonnet modules.\"\"\"\n\nimport pickle\n\nfrom absl.testing import parameterized\nfrom sonnet.src import test_utils\nfrom sonnet.src.conformance import goldens\nimport tensorflow as tf\nimport tree\n\n\nclass PickleTest(test_utils.TestCase, parameterized.TestCase):\n\n # TODO(tomhennigan) Add tests with dill and cloudpickle.\n\n @goldens.all_goldens\n def test_pickle(self, golden):\n m1 = golden.create_module()\n golden.create_all_variables(m1)\n m2 = pickle.loads(pickle.dumps(m1))\n self.assertIsNot(m1, m2)\n\n # Check that module variables are recreated with equivalent properties.\n for v1, v2 in zip(m1.variables, m2.variables):\n self.assertIsNot(v1, v2)\n self.assertEqual(v1.name, v2.name)\n self.assertEqual(v1.device, v2.device)\n self.assertAllEqual(v1.read_value(), v2.read_value())\n\n if golden.deterministic:\n y1 = golden.forward(m1)\n y2 = golden.forward(m2)\n tree.map_structure(self.assertAllEqual, y1, y2)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n", "# Copyright 2020 The Sonnet Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for Sonnet JAX interop layer.\"\"\"\n\nfrom absl.testing import parameterized\nfrom sonnet.src import test_utils\nfrom sonnet.src.functional import jax\nimport tensorflow as tf\n\n\nclass JaxTest(test_utils.TestCase, parameterized.TestCase):\n\n def test_jit_copies_to_device(self):\n accelerators = get_accelerators()\n if not accelerators:\n self.skipTest(\"No accelerator.\")\n\n with tf.device(\"CPU\"):\n x = tf.ones([])\n\n self.assertTrue(x.device.endswith(\"CPU:0\"))\n\n for device in accelerators:\n y = jax.jit(lambda x: x, device=device)(x)\n self.assertTrue(y.device, device)\n\n def test_device_put(self):\n accelerators = get_accelerators()\n if not accelerators:\n self.skipTest(\"No accelerator.\")\n\n with tf.device(\"CPU\"):\n x = tf.ones([])\n\n for device in accelerators:\n y = jax.device_put(x, device=device)\n self.assertTrue(y.device.endswith(device))\n\n\nclass GradTest(test_utils.TestCase, parameterized.TestCase):\n\n def test_grad(self):\n f = lambda x: x ** 2\n g = jax.grad(f)\n x = tf.constant(4.)\n self.assertAllClose(g(x).numpy(), (2 * x).numpy())\n\n def test_argnums(self):\n f = lambda x, y: (x ** 2 + y ** 2)\n g = jax.grad(f, argnums=(0, 1))\n x = tf.constant(4.)\n y = tf.constant(5.)\n gx, gy = g(x, y)\n self.assertAllClose(gx.numpy(), (2 * x).numpy())\n self.assertAllClose(gy.numpy(), (2 * y).numpy(), rtol=1e-3)\n\n def test_has_aux(self):\n f = lambda x: (x ** 2, \"aux\")\n g = jax.grad(f, has_aux=True)\n x = tf.constant(2.)\n gx, aux = g(x)\n self.assertAllClose(gx.numpy(), (2 * x).numpy())\n self.assertEqual(aux, \"aux\")\n\n\ndef get_accelerators():\n gpus = tf.config.experimental.list_logical_devices(\"GPU\")\n tpus = tf.config.experimental.list_logical_devices(\"TPU\")\n return [tf.DeviceSpec.from_string(d.name).to_string() for d in gpus + tpus]\n\nif __name__ == \"__main__\":\n tf.test.main()\n", "# Copyright 2019 The Sonnet Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"Tests for sonnet.v2.src.depthwise_conv.\"\"\"\n\nfrom absl.testing import parameterized\n\nimport numpy as np\nfrom sonnet.src import depthwise_conv\nfrom sonnet.src import initializers\nfrom sonnet.src import test_utils\nimport tensorflow as tf\n\n\ndef create_constant_initializers(w, b, with_bias):\n if with_bias:\n return {\n \"w_init\": initializers.Constant(w),\n \"b_init\": initializers.Constant(b)\n }\n else:\n return {\"w_init\": initializers.Constant(w)}\n\n\nclass DepthwiseConvTest(test_utils.TestCase, parameterized.TestCase):\n\n def testInitializerKeysInvalidWithoutBias(self):\n with self.assertRaisesRegex(ValueError, \"b_init must be None\"):\n depthwise_conv.DepthwiseConv2D(\n channel_multiplier=1,\n kernel_shape=3,\n data_format=\"NHWC\",\n with_bias=False,\n b_init=tf.zeros_initializer())\n\n @parameterized.parameters(tf.float32, tf.float64)\n def testDefaultInitializers(self, dtype):\n if \"TPU\" in self.device_types and dtype == tf.float64:\n self.skipTest(\"Double precision not supported on TPU.\")\n\n conv1 = depthwise_conv.DepthwiseConv2D(\n kernel_shape=16, stride=1, padding=\"VALID\", data_format=\"NHWC\")\n\n out = conv1(tf.random.normal([8, 64, 64, 1], dtype=dtype))\n\n self.assertAllEqual(out.shape, [8, 49, 49, 1])\n self.assertEqual(out.dtype, dtype)\n\n # Note that for unit variance inputs the output is below unit variance\n # because of the use of the truncated normal initalizer\n err = 0.2 if self.primary_device == \"TPU\" else 0.1\n self.assertNear(out.numpy().std(), 0.87, err=err)\n\n @parameterized.named_parameters((\"SamePaddingUseBias\", True, \"SAME\"),\n (\"SamePaddingNoBias\", False, \"SAME\"),\n (\"ValidPaddingNoBias\", False, \"VALID\"),\n (\"ValidPaddingUseBias\", True, \"VALID\"))\n def testFunction(self, with_bias, padding):\n conv1 = depthwise_conv.DepthwiseConv2D(\n channel_multiplier=1,\n kernel_shape=3,\n stride=1,\n padding=padding,\n with_bias=with_bias,\n data_format=\"NHWC\",\n **create_constant_initializers(1.0, 1.0, with_bias))\n conv2 = depthwise_conv.DepthwiseConv2D(\n channel_multiplier=1,\n kernel_shape=3,\n stride=1,\n padding=padding,\n with_bias=with_bias,\n data_format=\"NHWC\",\n **create_constant_initializers(1.0, 1.0, with_bias))\n defun_conv = tf.function(conv2)\n\n iterations = 5\n\n for _ in range(iterations):\n x = tf.random.uniform([1, 5, 5, 1])\n y1 = conv1(x)\n y2 = defun_conv(x)\n\n self.assertAllClose(self.evaluate(y1), self.evaluate(y2), atol=1e-4)\n\n def testUnknownBatchSizeNHWC(self):\n x = tf.TensorSpec([None, 5, 5, 3], dtype=tf.float32)\n\n c = depthwise_conv.DepthwiseConv2D(\n channel_multiplier=1, kernel_shape=3, data_format=\"NHWC\")\n defun_conv = tf.function(c).get_concrete_function(x)\n\n out1 = defun_conv(tf.ones([3, 5, 5, 3]))\n self.assertEqual(out1.shape, [3, 5, 5, 3])\n\n out2 = defun_conv(tf.ones([5, 5, 5, 3]))\n self.assertEqual(out2.shape, [5, 5, 5, 3])\n\n def testUnknownBatchSizeNCHW(self):\n if self.primary_device == \"CPU\":\n self.skipTest(\"NCHW not supported on CPU\")\n\n x = tf.TensorSpec([None, 3, 5, 5], dtype=tf.float32)\n c = depthwise_conv.DepthwiseConv2D(\n channel_multiplier=1, kernel_shape=3, data_format=\"NCHW\")\n defun_conv = tf.function(c).get_concrete_function(x)\n\n out1 = defun_conv(tf.ones([3, 3, 5, 5]))\n self.assertEqual(out1.shape, [3, 3, 5, 5])\n\n out2 = defun_conv(tf.ones([5, 3, 5, 5]))\n self.assertEqual(out2.shape, [5, 3, 5, 5])\n\n def testUnknownSpatialDims(self):\n x = tf.TensorSpec([3, None, None, 3], dtype=tf.float32)\n\n c = depthwise_conv.DepthwiseConv2D(\n channel_multiplier=1, kernel_shape=3, data_format=\"NHWC\")\n defun_conv = tf.function(c).get_concrete_function(x)\n\n out = defun_conv(tf.ones([3, 5, 5, 3]))\n expected_out = c(tf.ones([3, 5, 5, 3]))\n self.assertEqual(out.shape, [3, 5, 5, 3])\n self.assertAllEqual(self.evaluate(out), self.evaluate(expected_out))\n\n out = defun_conv(tf.ones([3, 4, 4, 3]))\n expected_out = c(tf.ones([3, 4, 4, 3]))\n self.assertEqual(out.shape, [3, 4, 4, 3])\n self.assertAllEqual(self.evaluate(out), self.evaluate(expected_out))\n\n @parameterized.parameters(True, False)\n def testUnknownChannels(self, autograph):\n x = tf.TensorSpec([3, 3, 3, None], dtype=tf.float32)\n\n c = depthwise_conv.DepthwiseConv2D(\n channel_multiplier=1, kernel_shape=3, data_format=\"NHWC\")\n defun_conv = tf.function(c, autograph=autograph)\n\n with self.assertRaisesRegex(ValueError,\n \"The number of input channels must be known\"):\n defun_conv.get_concrete_function(x)\n\n @parameterized.named_parameters((\"WithBias\", True), (\"WithoutBias\", False))\n def testComputationSame(self, with_bias):\n conv1 = depthwise_conv.DepthwiseConv2D(\n channel_multiplier=1,\n kernel_shape=[3, 3],\n stride=1,\n padding=\"SAME\",\n with_bias=with_bias,\n **create_constant_initializers(1.0, 1.0, with_bias))\n\n out = conv1(tf.ones([1, 5, 5, 1]))\n expected_out = np.array([[5, 7, 7, 7, 5], [7, 10, 10, 10, 7],\n [7, 10, 10, 10, 7], [7, 10, 10, 10, 7],\n [5, 7, 7, 7, 5]])\n if not with_bias:\n expected_out -= 1\n\n self.assertEqual(out.shape, [1, 5, 5, 1])\n self.assertAllClose(np.reshape(out.numpy(), [5, 5]), expected_out)\n\n @parameterized.named_parameters((\"WithBias\", True), (\"WithoutBias\", False))\n def testComputationValid(self, with_bias):\n conv1 = depthwise_conv.DepthwiseConv2D(\n channel_multiplier=1,\n kernel_shape=[3, 3],\n stride=1,\n padding=\"VALID\",\n with_bias=with_bias,\n **create_constant_initializers(1.0, 1.0, with_bias))\n\n out = conv1(tf.ones([1, 5, 5, 1]))\n expected_out = np.array([[10, 10, 10], [10, 10, 10], [10, 10, 10]])\n if not with_bias:\n expected_out -= 1\n\n self.assertEqual(out.shape, [1, 3, 3, 1])\n self.assertAllClose(np.reshape(out.numpy(), [3, 3]), expected_out)\n\n @parameterized.named_parameters((\"WithBias\", True), (\"WithoutBias\", False))\n def testComputationValidMultiChannel(self, with_bias):\n conv1 = depthwise_conv.DepthwiseConv2D(\n channel_multiplier=1,\n kernel_shape=[3, 3],\n stride=1,\n padding=\"VALID\",\n with_bias=with_bias,\n **create_constant_initializers(1.0, 1.0, with_bias))\n\n out = conv1(tf.ones([1, 5, 5, 3]))\n expected_out = np.array([[[10] * 3] * 3] * 3)\n if not with_bias:\n expected_out -= 1\n\n self.assertAllClose(np.reshape(out.numpy(), [3, 3, 3]), expected_out)\n\n @parameterized.named_parameters((\"WithBias\", True), (\"WithoutBias\", False))\n def testSharing(self, with_bias):\n \"\"\"Sharing is working.\"\"\"\n conv1 = depthwise_conv.DepthwiseConv2D(\n channel_multiplier=3,\n kernel_shape=3,\n stride=1,\n padding=\"SAME\",\n with_bias=with_bias)\n\n x = np.random.randn(1, 5, 5, 1)\n x1 = tf.constant(x, dtype=np.float32)\n x2 = tf.constant(x, dtype=np.float32)\n\n self.assertAllClose(conv1(x1), conv1(x2))\n\n # Kernel shape was set to 3, which is expandeded to [3, 3, 3].\n # Input channels are 1, output channels := in_channels * multiplier.\n # multiplier is kernel_shape[2] == 3. So weight layout must be:\n # (3, 3, 1, 3).\n w = np.random.randn(3, 3, 1, 3) # Now change the weights.\n conv1.w.assign(w)\n self.assertAllClose(conv1(x1), conv1(x2))\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n" ]
[ [ "tensorflow.test.main" ], [ "tensorflow.device", "tensorflow.constant", "tensorflow.config.experimental.list_logical_devices", "tensorflow.DeviceSpec.from_string", "tensorflow.test.main", "tensorflow.ones" ], [ "tensorflow.constant", "tensorflow.zeros_initializer", "tensorflow.random.uniform", "tensorflow.test.main", "tensorflow.ones", "tensorflow.function", "numpy.random.randn", "numpy.array", "tensorflow.random.normal", "tensorflow.TensorSpec" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "1.12", "2.6", "2.2", "1.13", "2.3", "2.4", "1.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.8", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.2", "1.13", "2.3", "2.4", "2.9", "2.5", "2.8", "2.10" ] } ]
Mandorath/analyze_ais
[ "335a5185a588f99a8151ea77e2406e8cec43ffcb" ]
[ "stats_ais.py" ]
[ "import pandas as pd\nimport numpy as np\n\n\ndef calc_stats(df, col_ais, col_spd, col_zn, unique_col, date, df_out, ship_count):\n '''\n Statistics calculation function.\n '''\n # df = pd.read_csv(file, delimiter=\",\")\n # the percentage of \"True\" in AIS gaps\n df['spd_and_gap'] = pd.np.where(df[['flag_spd_chng',\n 'AIS_G']].eq(True).all(1, skipna=True), True,\n pd.np.where(df[['flag_spd_chng',\n 'AIS_G']].isnull().all(1), None,\n False))\n df['spd_gap_zn'] = pd.np.where(df[['flag_spd_chng',\n 'AIS_G', 'Zn_entry']].eq(True).all(1, skipna=True), True,\n pd.np.where(df[['flag_spd_chng',\n 'AIS_G']].isnull().all(1), None,\n False))\n new_df = df[((df['flag_spd_chng'] == True) & (df['AIS_G'] == True) & (df['Zn_entry'] == True))]\n print(new_df)\n percent_g = df[col_ais].value_counts(normalize=True,\n sort=True,\n ascending=True\n ).mul(100).rename_axis('Gap').reset_index(name='Percentage')\n percentage_gap_true = percent_g.at[0, 'Percentage']\n percentage_gap_false = percent_g.at[1, 'Percentage']\n # the percentage of \"True\" in speed change\n percent_sc = df[col_spd].value_counts(normalize=True,\n sort=True,\n ascending=True\n ).mul(100).rename_axis('SpeedChange').reset_index(name='Percentage')\n percentage_speed_true = percent_sc.at[0, 'Percentage']\n percentage_speed_false = percent_sc.at[1, 'Percentage']\n # the percentage of \"True\" in zone entry with unique MMSI number\n dfc = df[df[col_zn] == True]\n group1 = dfc.groupby(unique_col)['Zn_entry'].unique()\n group2 = df[unique_col].unique()\n percentage_zone_true, percentage_zone_false = ((len(group1)/len(group2)*100), (100-(len(group1)/len(group2)*100)))\n percentage_spd_gap = df['spd_and_gap'].value_counts(normalize=True,\n sort=True,\n ascending=True\n ).mul(100).rename_axis('spd_gap').reset_index(name='Percentage')\n print(percentage_spd_gap)\n percentage_spd_gap_t = percentage_spd_gap.at[0, 'spd_gap']\n if not percentage_spd_gap_t:\n percentage_spd_gap_true = 0.0\n percentage_spd_gap_false = percentage_spd_gap.at[0, 'Percentage']\n else:\n percentage_spd_gap_true = percentage_spd_gap.at[0, 'Percentage']\n percentage_spd_gap_false = percentage_spd_gap.at[1, 'Percentage']\n percentage_all = df['spd_gap_zn'].value_counts(normalize=True,\n sort=True,\n ascending=True,\n dropna=False\n ).mul(100).rename_axis('spd_gap_zn').reset_index(name='Percentage')\n print(percentage_all)\n percentage_all_t = percentage_all.at[0, 'spd_gap_zn']\n print(percentage_all_t)\n if not percentage_all_t:\n percentage_all_true = 0.0\n percentage_all_false = percentage_all.at[0, 'Percentage']\n else:\n percentage_all_true = percentage_all.at[0, 'Percentage']\n percentage_all_false = percentage_all.at[1, 'Percentage']\n stats = {'date': [date],\n 'Gap_true': [percentage_gap_true],\n 'Gap_false': [percentage_gap_false],\n 'Speed_true': [percentage_speed_true],\n 'Speed_false': [percentage_speed_false],\n 'Zone_true': [percentage_zone_true],\n 'Zone_false': [percentage_zone_false],\n 'spd_gap_true': [percentage_spd_gap_true],\n 'spd_gap_false': [percentage_spd_gap_false],\n 'all_true': [percentage_all_true],\n 'all_false': [percentage_all_false],\n 'ship_count': [ship_count]\n }\n\n dfstats = pd.DataFrame(stats)\n df_t = df_out\n df_t = df_t.append(dfstats, ignore_index=True)\n return df_t, new_df\n\n\ndef create_stats_df():\n dfstats = {'date': [],\n 'Gap_true': [],\n 'Gap_false': [],\n 'Speed_true': [],\n 'Speed_false': [],\n 'Zone_true': [],\n 'Zone_false': [],\n 'spd_gap_true': [],\n 'spd_gap_false': [],\n 'all_true': [],\n 'all_false': [],\n 'ship_count': []\n }\n df = pd.DataFrame(dfstats)\n return df\n# '''\n# fishing\n# '''\n# fish = pd.read_csv('/Users/caseywong/Documents/GitHub/analyze_ais/Fishing2.out', delimiter = \",\")\n#\n# # the percentage of \"True\" in AIS gaps\n# percent_gap = fish['AIS_G'].value_counts(normalize=True,sort=True, ascending=True).mul(100).astype(str)+'%'\n# print(percent_gap)\n#\n# # the percentage of \"True\" in speed change\n# percent_sc = fish['flag_spd_chng'].value_counts(normalize=True,sort=True, ascending=True).mul(100).astype(str)+'%'\n# print(percent_sc)\n#\n# # the percentage of \"True\" in zone entry with unique MMSI number\n# dff = fish[fish['Zn_entry'] == True]\n# group1 = dff.groupby('MMSI')['Zn_entry'].unique()\n# group2 = fish['MMSI'].unique()\n# percentage_zone= len(group1)/len(group2)*100\n# print(\"True:\", percentage_zone,\"%\")\n#\n# fishstats = {'Anomaly': ['Gap','Speed','Zone'],\n# 'Percentage': [percent_gap,percent_sc,percentage_zone]\n# }\n#\n# df = pd.DataFrame(fishstats, columns = ['Anomaly', 'Percentage'])\n# df.to_csv('fishstats.csv')\n# '''\n# Tank\n# '''\n# tank = pd.read_csv('/Users/caseywong/Documents/GitHub/analyze_ais/Tank2.out', delimiter = \",\")\n#\n# # the percentage of \"True\" in AIS gaps\n# percent_gap = tank['AIS_G'].value_counts(normalize=True,sort=True, ascending=True).mul(100).astype(str)+'%'\n# print(percent_gap)\n#\n# # the percentage of \"True\" in speed change\n# percent_sc = tank['flag_spd_chng'].value_counts(normalize=True,sort=True, ascending=True).mul(100).astype(str)+'%'\n# print(percent_sc)\n#\n# # the percentage of \"True\" in zone entry with unique MMSI number\n# dft = tank[tank['Zn_entry'] == True]\n# group1 = dft.groupby('MMSI')['Zn_entry'].unique()\n# group2 = tank['MMSI'].unique()\n# percentage_zone= len(group1)/len(group2)*100\n# print(\"True:\", percentage_zone,\"%\")\n#\n# tankstats = {'Anomaly': ['Gap','Speed','Zone'],\n# 'Percentage': [percent_gap,percent_sc,percentage_zone]\n# }\n#\n# df = pd.DataFrame(tankstats, columns = ['Anomaly', 'Percentage'])\n# df.to_csv('tankstats.csv')\n# '''\n# Passenger\n# '''\n# passenger = pd.read_csv('/Users/caseywong/Documents/GitHub/analyze_ais/Passenger2.out', delimiter = \",\")\n#\n# # the percentage of \"True\" in AIS gaps\n# percent_gap = passenger['AIS_G'].value_counts(normalize=True,sort=True, ascending=True).mul(100).astype(str)+'%'\n# print(percent_gap)\n#\n# # the percentage of \"True\" in speed change\n# percent_sc = passenger['flag_spd_chng'].value_counts(normalize=True,sort=True, ascending=True).mul(100).astype(str)+'%'\n# print(percent_sc)\n#\n# # the percentage of \"True\" in zone entry with unique MMSI number\n# dfp = passenger[passenger['Zn_entry'] == True]\n# group1 = dfp.groupby('MMSI')['Zn_entry'].unique()\n# group2 = passenger['MMSI'].unique()\n# percentage_zone= len(group1)/len(group2)*100\n# print(\"True:\", percentage_zone,\"%\")\n#\n# passstats = {'Anomaly': ['Gap','Speed','Zone'],\n# 'Percentage': [percent_gap,percent_sc,percentage_zone]\n# }\n#\n# df = pd.DataFrame(passstats, columns = ['Anomaly', 'Percentage'])\n# df.to_csv('passstats.csv')\n" ]
[ [ "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
Heron-Repositories/Transfer-Learning-In-Animals
[ "96a49f1ca27c62defaf3ea90f0dd6640034c8541" ]
[ "Transforms/Transfer_Learning/TL_Experiment_Phase_2/tl_experiment_phase_2_worker.py" ]
[ "\r\nimport sys\r\nfrom os import path\r\n\r\ncurrent_dir = path.dirname(path.abspath(__file__))\r\nwhile path.split(current_dir)[-1] != r'Heron':\r\n current_dir = path.dirname(current_dir)\r\nsys.path.insert(0, path.dirname(current_dir))\r\n\r\nimport copy\r\nimport numpy as np\r\nfrom enum import Enum\r\nfrom Heron.communication.socket_for_serialization import Socket\r\nfrom Heron import general_utils as gu\r\nfrom Heron import constants as ct\r\n\r\nno_mtt: bool\r\nreward_on_poke_delay: float\r\nlevers_state: bool\r\ntrap_on: bool\r\nmax_distance_to_target: int\r\nspeed: float\r\nvariable_targets: bool\r\nmust_lift_at_target: bool\r\n\r\nangles_of_visuals: np.empty(4) # 0th number is if any lever is pressed, 1st is the angle of the manipulandum, 2nd and\r\n# 3rd the angles of the target and trap respectively\r\ninitial_angle_of_manipulandum: int\r\nup_or_down: bool\r\n\r\n\r\nclass ExperimentState(Enum):\r\n PokeOut = 1\r\n PokeIn_Running = 2\r\n PokeIn_OnTarget = 3\r\n PokeIn_Finished_Sucess = 4\r\n PokeIn_Finished_Failure = 5\r\n\r\n\r\nexperiment_state = ExperimentState.PokeOut\r\nnumber_of_pellets = 1\r\n\r\n# When an animal takes its snout out of poke then the experiment counts how many time steps\r\n# of 100 ms the animal is out of poke. If that number crosses a threshold then the trial aborts.\r\n# VERY IMPORTANT!! This assumes the TL_Levers is sending commands every 100ms. If this changes (in the arduino .ino)\r\n# then this number and its comparisons should change to keep times the same!!\r\n_100ms_time_steps_counter = 20\r\n\r\n# When the manipulandum reaches the target then the animal must release the lever. This variable holds\r\n# how many time steps of 100 ms the manipulandum has remained unmoving on target and reward is provided only if this\r\n# crosses a threshold\r\non_target = 0\r\n\r\ntime_on_target = 5\r\nerror = 3\r\n\r\n\r\ndef initialise(_worker_object):\r\n global no_mtt\r\n global reward_on_poke_delay\r\n global levers_state\r\n global trap_on\r\n global max_distance_to_target\r\n global speed\r\n global variable_targets\r\n global must_lift_at_target\r\n global number_of_pellets\r\n global worker_object\r\n\r\n try:\r\n parameters = _worker_object.parameters\r\n reward_on_poke = parameters[0]\r\n reward_on_poke_delay = parameters[1]\r\n trap_on = parameters[2]\r\n max_distance_to_target = parameters[3]\r\n speed = parameters[4]\r\n variable_targets = parameters[5]\r\n must_lift_at_target = parameters[6]\r\n number_of_pellets = parameters[7]\r\n except Exception as e:\r\n print(e)\r\n return False\r\n\r\n return True\r\n\r\n\r\ndef initialise_trial():\r\n global variable_targets\r\n global initial_angle_of_manipulandum\r\n global angles_of_visuals\r\n\r\n if variable_targets:\r\n manipulandum, target, trap = initialise_trial_with_variable_target_trap()\r\n else:\r\n manipulandum, target, trap = initialise_trial_with_constant_target_trap()\r\n\r\n initial_angle_of_manipulandum = manipulandum\r\n angles_of_visuals = np.array([0, manipulandum, target, trap])\r\n angles_of_visuals = angles_of_visuals - np.max(angles_of_visuals[1:])\r\n initial_angle_of_manipulandum = copy.copy(angles_of_visuals[1])\r\n angles_of_visuals[0] = 0\r\n\r\n\r\ndef initialise_trial_with_variable_target_trap():\r\n global angles_of_visuals\r\n global max_distance_to_target\r\n global up_or_down\r\n global experiment_state\r\n\r\n manipulandum = np.random.randint(-80, -9)\r\n up_or_down = np.random.binomial(n=1, p=0.5)\r\n\r\n if up_or_down:\r\n target = np.random.randint(manipulandum + 11, np.min([manipulandum + max_distance_to_target + 12, 0]))\r\n trap = np.random.randint(-90, manipulandum - 9)\r\n else:\r\n trap = np.random.randint(manipulandum + 11, 0)\r\n target = np.random.randint(np.max([manipulandum - max_distance_to_target - 10, -90]), manipulandum - 9)\r\n\r\n return manipulandum, target, trap\r\n\r\n\r\ndef initialise_trial_with_constant_target_trap():\r\n global max_distance_to_target\r\n global up_or_down\r\n global experiment_state\r\n\r\n up_or_down = np.random.binomial(n=1, p=0.5)\r\n\r\n if up_or_down:\r\n target = 0\r\n trap = -90\r\n\r\n manipulandum = np.random.randint(np.max([target - max_distance_to_target - 10, -90]), target - 9)\r\n else:\r\n target = -90\r\n trap = 0\r\n\r\n manipulandum = np.random.randint(target + 11, np.min([target + max_distance_to_target + 12, 0]))\r\n\r\n return manipulandum, target, trap\r\n\r\n\r\ndef update_of_visuals(lever_pressed_time):\r\n global angles_of_visuals\r\n global experiment_state\r\n global on_target\r\n global error\r\n global time_on_target\r\n\r\n new_position = int(initial_angle_of_manipulandum + speed * lever_pressed_time / 1000)\r\n\r\n # If the rat is not pressing a lever\r\n if lever_pressed_time == 0:\r\n\r\n # If the manipulandum has reached the target\r\n if experiment_state == ExperimentState.PokeIn_OnTarget:\r\n on_target += 1\r\n\r\n # If the manipulandum has stayed on the target long enough\r\n if on_target > time_on_target:\r\n\r\n experiment_state = ExperimentState.PokeIn_Finished_Sucess\r\n\r\n # If the manipulandum is not on the target\r\n else:\r\n angles_of_visuals[1] = initial_angle_of_manipulandum\r\n angles_of_visuals[0] = 0\r\n\r\n # If the rat is pressing a lever\r\n else:\r\n angles_of_visuals[0] = 1\r\n\r\n # If the target is over (left) of the rotating (translating) manipulandum\r\n if up_or_down:\r\n\r\n # If the manipulandum still hasn't reached the target\r\n if angles_of_visuals[1] <= angles_of_visuals[2] - error or angles_of_visuals[1] >= angles_of_visuals[2] + error:\r\n experiment_state = ExperimentState.PokeIn_Running\r\n on_target = 0\r\n\r\n # If the correct lever was pressed\r\n if new_position > angles_of_visuals[1]:\r\n angles_of_visuals[1] = new_position\r\n\r\n # If the wrong lever was pressed and the trap is on\r\n if new_position < angles_of_visuals[1] and trap_on:\r\n angles_of_visuals[1] = new_position\r\n\r\n # If the manipulandum has reached the target but hasn't stayed long enough on it\r\n if angles_of_visuals[2] - error <= angles_of_visuals[1] <= angles_of_visuals[2] + error \\\r\n and on_target < time_on_target + 1:\r\n\r\n if must_lift_at_target:\r\n experiment_state = ExperimentState.PokeIn_OnTarget\r\n\r\n # If the rat is till pressing the lever keep moving\r\n if lever_pressed_time > 0:\r\n angles_of_visuals[1] = new_position\r\n else:\r\n experiment_state = ExperimentState.PokeIn_Finished_Sucess\r\n\r\n # If the manipulandum reached the trap\r\n if angles_of_visuals[3] + error > angles_of_visuals[1] > angles_of_visuals[3] - error:\r\n experiment_state = ExperimentState.PokeIn_Finished_Failure\r\n on_target = 0\r\n\r\n else: # If the target is under (right) of the manipulandum do as before with reversed movement\r\n\r\n # If the manipulandum still hasn't reached the target\r\n if angles_of_visuals[1] >= angles_of_visuals[2] + error or angles_of_visuals[1] <= angles_of_visuals[2] - error:\r\n experiment_state = ExperimentState.PokeIn_Running\r\n on_target = 0\r\n\r\n # If the correct lever was pressed\r\n if new_position < angles_of_visuals[1]:\r\n angles_of_visuals[1] = new_position\r\n\r\n # If the wrong lever was pressed and the trap is on\r\n if new_position > angles_of_visuals[1] and trap_on:\r\n angles_of_visuals[1] = new_position\r\n\r\n # If the manipulandum has reached the target but hasn't stayed long enough on it\r\n if angles_of_visuals[2] + error >= angles_of_visuals[1] >= angles_of_visuals[2] - error \\\r\n and on_target < time_on_target + 1:\r\n\r\n if must_lift_at_target:\r\n experiment_state = ExperimentState.PokeIn_OnTarget\r\n\r\n # If the rat is till pressing the lever keep moving\r\n if lever_pressed_time < 0:\r\n angles_of_visuals[1] = new_position\r\n else:\r\n experiment_state = ExperimentState.PokeIn_Finished_Sucess\r\n\r\n # If the manipulandum reached the trap\r\n if angles_of_visuals[3] + error > angles_of_visuals[1] > angles_of_visuals[3] - error:\r\n experiment_state = ExperimentState.PokeIn_Finished_Failure\r\n on_target = 0\r\n\r\n\r\ndef experiment(data, parameters):\r\n global no_mtt\r\n global reward_on_poke_delay\r\n global levers_state\r\n global trap_on\r\n global speed\r\n global variable_targets\r\n global number_of_pellets\r\n global experiment_state\r\n global _100ms_time_steps_counter\r\n\r\n try:\r\n reward_on_poke_delay = parameters[1]\r\n speed = parameters[4]\r\n number_of_pellets = parameters[7]\r\n except:\r\n pass\r\n\r\n message = data[1:] # data[0] is the topic\r\n message = Socket.reconstruct_array_from_bytes_message(message)\r\n # The first element of the array is whether the rat is in the poke. The second is the milliseconds it has been\r\n # pressing either the left or the right lever (one is positive the other negative). If it is 0 then the rat is not\r\n # pressing a lever\r\n poke_on = message[0]\r\n lever_press_time = message[1]\r\n\r\n result = [np.array([ct.IGNORE]), np.array([ct.IGNORE])]\r\n max_100ms_steps_for_poke_out = 10\r\n\r\n # 1. If the no_mtt is on then\r\n # 1. a. If the rat is in the poke then tell the monitors to turn on and the poke controller\r\n # to deliver number_of_pellets (assuming the TL Poke Controller Trigger String is set to 'number').\r\n # 1. b. If the rat is not in the poke turn the monitors off and do not send a message to the poke controller\r\n # 2. If the no_mtt is off then:\r\n # 2. a. If the rat i snot in the poke then as above and send the experiment_state to PokeOut\r\n # 2. b. If the rat is in the poke then:\r\n # 2. b. i. If the trial has finished successfully then turn off the monitors and deliver number_of_pellets (if the\r\n # reward poke is in availability mode it will ignore any further commands to deliver reward)\r\n # 2. b. ii. If the trial has finished unsuccessfully then turn off the monitors and do not send anything to the poke\r\n # 2. b. iii. If the trial just started then put the state to running, initialise the visuals and update the monitors.\r\n # 2. b. iv. If the trial was running then update the visuals\r\n # In cases iii. and iv. send the visuals\r\n if reward_on_poke:\r\n if poke_on:\r\n _100ms_time_steps_counter += 1\r\n result = [np.array([True]), np.array([ct.IGNORE])]\r\n if _100ms_time_steps_counter >= reward_on_poke_delay * 10:\r\n result = [np.array([True]), np.array([number_of_pellets])]\r\n else:\r\n _100ms_time_steps_counter = 0\r\n result = [np.array([False]), np.array([ct.IGNORE])]\r\n else:\r\n if not poke_on:\r\n experiment_state = ExperimentState.PokeOut\r\n _100ms_time_steps_counter += 1\r\n\r\n if _100ms_time_steps_counter < max_100ms_steps_for_poke_out:\r\n result = [angles_of_visuals, np.array([ct.IGNORE])]\r\n else:\r\n result = [np.array([False]), np.array([ct.IGNORE])]\r\n\r\n else:\r\n if experiment_state == ExperimentState.PokeIn_Finished_Sucess:\r\n result = [np.array([False]), np.array([number_of_pellets])]\r\n _100ms_time_steps_counter = max_100ms_steps_for_poke_out + 1\r\n\r\n elif experiment_state == ExperimentState.PokeIn_Finished_Failure:\r\n result = [np.array([False]), np.array([ct.IGNORE])]\r\n _100ms_time_steps_counter = max_100ms_steps_for_poke_out + 1\r\n\r\n elif experiment_state == ExperimentState.PokeOut and _100ms_time_steps_counter > max_100ms_steps_for_poke_out:\r\n initialise_trial()\r\n experiment_state = ExperimentState.PokeIn_Running\r\n result = [angles_of_visuals, np.array([ct.IGNORE])]\r\n _100ms_time_steps_counter = 0\r\n\r\n elif experiment_state == ExperimentState.PokeIn_Running or \\\r\n (experiment_state == ExperimentState.PokeOut and _100ms_time_steps_counter < max_100ms_steps_for_poke_out)\\\r\n or experiment_state == ExperimentState.PokeIn_OnTarget:\r\n update_of_visuals(lever_press_time)\r\n result = [angles_of_visuals, np.array([ct.IGNORE])]\r\n _100ms_time_steps_counter = 0\r\n\r\n return result\r\n\r\n\r\ndef on_end_of_life():\r\n pass\r\n\r\n\r\nif __name__ == \"__main__\":\r\n worker_object = gu.start_the_transform_worker_process(work_function=experiment,\r\n end_of_life_function=on_end_of_life,\r\n initialisation_function=initialise)\r\n worker_object.start_ioloop()\r\n" ]
[ [ "numpy.min", "numpy.max", "numpy.random.binomial", "numpy.array", "numpy.empty", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
iamxiaodong/Kats
[ "31df55acc22797ce06330586542fe6e5f315e574", "31df55acc22797ce06330586542fe6e5f315e574", "31df55acc22797ce06330586542fe6e5f315e574" ]
[ "kats/models/metalearner/metalearner_modelselect.py", "kats/models/linear_model.py", "kats/models/metalearner/metalearner_predictability.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n# pyre-unsafe\n\n\"\"\"A module for meta-learner model selection.\n\nThis module contains:\n - :class:`MetaLearnModelSelect` for meta-learner models selection, which recommends the forecasting model based on time series or time series features;\n - :class:`RandomDownSampler` for creating balanced dataset via downsampling.\n\"\"\"\n\nimport ast\nimport logging\nfrom collections import Counter, defaultdict\nfrom typing import Any, Dict, List, Optional, Tuple, Union\n\nimport joblib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom kats.consts import TimeSeriesData\nfrom kats.tsfeatures.tsfeatures import TsFeatures\nfrom sklearn import metrics\nfrom sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import SVC\n\n\nclass MetaLearnModelSelect:\n \"\"\"Meta-learner framework on forecasting model selection.\n This framework uses classification algorithms to recommend suitable forecasting models.\n For training, it uses time series features as inputs and the best forecasting models as labels.\n For prediction, it takes time series or time series features as inputs to predict the most suitable forecasting model.\n The class provides count_category, preprocess, plot_feature_comparison, get_corr_mtx, plot_corr_heatmap, train, pred, pred_by_feature, pred_fuzzy, load_model and save_model.\n\n Attributes:\n metadata: Optional; A list of dictionaries representing the meta-data of time series (e.g., the meta-data generated by GetMetaData object).\n Each dictionary d must contain at least 3 components: 'hpt_res', 'features' and 'best_model'. d['hpt_res'] represents the best hyper-parameters for each candidate model and the corresponding errors;\n d['features'] are time series features, and d['best_model'] is a string representing the best candidate model of the corresponding time series data.\n metadata should not be None unless load_model is True. Default is None.\n load_model: Optional; A boolean to specify whether or not to load a trained model. Default is False.\n\n Sample Usage:\n >>> mlms = MetaLearnModelSelect(data)\n >>> mlms.train(n_trees=200, test_size=0.1, eval_method='mean') # Train a meta-learner model selection model.\n >>> mlms.pred(TSdata) # Predict/recommend forecasting model for a new time series data.\n >>> mlms2.pred(TSdata, n_top=3) # Predict/recommend the top 3 most suitable forecasting model.\n >>> mlms.save_model(\"mlms.pkl\") # Save the trained model.\n >>> mlms2 = MetaLearnModelSelect(metadata=None, load_model=True) # Create a new object and then load a pre-trained model.\n >>> mlms2.load_model(\"mlms.pkl\")\n \"\"\"\n\n def __init__(\n self, metadata: Optional[List[Dict[str, Any]]] = None, load_model: bool = False\n ) -> None:\n if not load_model:\n # pyre-fixme[6]: Expected `Sized` for 1st param but got\n # `Optional[List[typing.Any]]`.\n if len(metadata) <= 30:\n msg = \"Dataset is too small to train a meta learner!\"\n logging.error(msg)\n raise ValueError(msg)\n\n if metadata is None:\n msg = \"Missing metadata!\"\n logging.error(msg)\n raise ValueError(msg)\n\n if \"hpt_res\" not in metadata[0]:\n msg = \"Missing best hyper-params, not able to train a meta learner!\"\n logging.error(msg)\n raise ValueError(msg)\n\n if \"features\" not in metadata[0]:\n msg = \"Missing time series features, not able to train a meta learner!\"\n logging.error(msg)\n raise ValueError(msg)\n\n if \"best_model\" not in metadata[0]:\n msg = \"Missing best models, not able to train a meta learner!\"\n logging.error(msg)\n raise ValueError(msg)\n\n self.metadata = metadata\n self._reorganize_data()\n self._validate_data()\n\n self.scale = False\n self.clf = None\n elif load_model:\n pass\n else:\n msg = \"Fail to initiate MetaLearnModelSelect.\"\n raise ValueError(msg)\n\n def _reorganize_data(self) -> None:\n hpt_list = []\n metadataX_list = []\n metadataY_list = []\n for i in range(len(self.metadata)):\n if isinstance(self.metadata[i][\"hpt_res\"], str):\n hpt_list.append(ast.literal_eval(self.metadata[i][\"hpt_res\"]))\n else:\n hpt_list.append(self.metadata[i][\"hpt_res\"])\n\n if isinstance(self.metadata[i][\"features\"], str):\n metadataX_list.append(\n list(ast.literal_eval(self.metadata[i][\"features\"]).values())\n )\n else:\n metadataX_list.append(list(self.metadata[i][\"features\"].values()))\n\n metadataY_list.append(self.metadata[i][\"best_model\"])\n\n self.col_namesX = list(self.metadata[0][\"features\"].keys())\n self.hpt = pd.Series(hpt_list, name=\"hpt\")\n self.metadataX = pd.DataFrame(metadataX_list, columns=self.col_namesX)\n self.metadataX.fillna(0, inplace=True)\n self.metadataY = pd.Series(metadataY_list, name=\"y\")\n self.x_mean = np.average(self.metadataX.values, axis=0)\n self.x_std = np.std(self.metadataX.values, axis=0)\n self.x_std[self.x_std == 0] = 1.0\n\n def _validate_data(self):\n num_class = self.metadataY.nunique()\n if num_class == 1:\n msg = \"Only one class in the label column (best_model), not able to train a classifier!\"\n logging.error(msg)\n raise ValueError(msg)\n\n local_count = list(self.count_category().values())\n if min(local_count) * num_class < 30:\n msg = \"Not recommend to do downsampling! Dataset will be too small after downsampling!\"\n logging.info(msg)\n elif max(local_count) > min(local_count) * 5:\n msg = \"Number of obs in majority class is much greater than in minority class. Downsampling is recommended!\"\n logging.info(msg)\n else:\n msg = \"No significant data imbalance problem, no need to do downsampling.\"\n logging.info(msg)\n\n def count_category(self) -> Dict[str, int]:\n \"\"\"Count the number of observations of each candidate model in meta-data.\n\n Returns:\n A dictionary storing the number of observations of each candidate model in meta-data.\n \"\"\"\n\n return Counter(self.metadataY)\n\n def preprocess(self, downsample: bool = True, scale: bool = False) -> None:\n \"\"\"Pre-process meta data before training a classifier.\n\n There are 2 options in this function: 1) whether or not to downsample meta-data to ensure each candidate model has the same number of observations;\n and 2) whether or not to rescale the time series features to zero-mean and unit-variance.\n\n Args:\n downsample: Optional; A boolean to specify whether or not to downsample meta-data to ensure each candidate model has the same number of observations.\n Default is True.\n scale: Optional; A boolean to specify whether or not to rescale the time series features to zero-mean and unit-variance.\n\n Returns:\n None\n \"\"\"\n\n if downsample:\n self.hpt, self.metadataX, self.metadataY = RandomDownSampler(\n self.hpt, self.metadataX, self.metadataY\n ).fit_resample()\n logging.info(\"Successfully applied random downsampling!\")\n self.x_mean = np.average(self.metadataX.values, axis=0)\n self.x_std = np.std(self.metadataX.values, axis=0)\n self.x_std[self.x_std == 0] = 1.0\n\n if scale:\n self.scale = True\n self.metadataX = (self.metadataX - self.x_mean) / self.x_std\n logging.info(\n \"Successfully scaled data by centering to the mean and component-wise scaling to unit variance!\"\n )\n\n def plot_feature_comparison(\n self,\n i: int,\n j: int,\n ax: Optional[plt.Axes] = None,\n figsize: Optional[Tuple[int, int]] = None,\n ) -> plt.Axes:\n \"\"\"Generate the time series features comparison plot.\n\n Args:\n i: A integer representing the index of one feature vector from feature matrix to be compared.\n j: A integer representing the other index of one feature vector from feature matrix to be compared.\n ax: optional axes to use for plotting.\n figsize: optional figure size to create. If None, use (12, 6).\n\n Returns:\n The matplotlib Axes.\n \"\"\"\n\n combined = pd.concat([self.metadataX.iloc[i], self.metadataX.iloc[j]], axis=1)\n combined.columns = [\n f\"{self.metadataY.iloc[i]} model\",\n f\"{self.metadataY.iloc[j]} model\",\n ]\n if figsize is None:\n figsize = (12, 6)\n # pyre-fixme[29]: `CachedAccessor` is not a function.\n return combined.plot(kind=\"bar\", figsize=figsize, ax=ax)\n\n def get_corr_mtx(self) -> pd.DataFrame:\n \"\"\"Calculate correlation matrix of feature matrix.\n\n Returns:\n A pd.DataFrame representing the correlation matrix of time series features.\n \"\"\"\n\n return self.metadataX.corr()\n\n def plot_corr_heatmap(\n self,\n camp: str = \"RdBu_r\",\n ax: Optional[plt.Axes] = None,\n figsize: Optional[Tuple[int, int]] = None,\n ) -> plt.Axes:\n \"\"\"Generate heat-map for correlation matrix of feature matrix.\n\n Args:\n cmap: Optional; A string representing the color bar used to generate heat-map. Default is \"RdBu_r\".\n ax: optional axes to use for plotting.\n figsize: optional figure size to create. If None, use (8, 6).\n\n Returns:\n The matplotlib Axes.\n \"\"\"\n if ax is None:\n if figsize is None:\n figsize = (8, 6)\n _, ax = plt.subplots(figsize=figsize)\n\n return sns.heatmap(\n self.get_corr_mtx(),\n cmap=camp,\n yticklabels=self.metadataX.columns,\n xticklabels=self.metadataX.columns,\n ax=ax,\n )\n\n def train(\n self,\n method: str = \"RandomForest\",\n eval_method: str = \"mean\",\n test_size: float = 0.1,\n n_trees: int = 500,\n n_neighbors: int = 5,\n ) -> Dict[str, Any]:\n \"\"\"Train a meta-learner model selection model (i.e., a classifier).\n\n Args:\n method: Optional; A string representing the name of the classification algorithm. Can be 'RandomForest', 'GBDT', 'SVM', 'KNN' or 'NaiveBayes'. Default is 'RandomForest'.\n eval_method: Optional; A string representing the aggregation method used for computing errors. Can be 'mean' or 'median'. Default is 'mean'.\n test_size: Optional; A float representing the proportion of test set, which should be within (0, 1). Default is 0.1.\n n_trees: Optional; An integer representing the number of trees in random forest model. Default is 500.\n n_neighbors: Optional; An integer representing the number of neighbors in KNN model. Default is 5.\n\n Returns:\n A dictionary summarizing the performance of the trained classifier on both training and validation set.\n \"\"\"\n\n if method not in [\"RandomForest\", \"GBDT\", \"SVM\", \"KNN\", \"NaiveBayes\"]:\n msg = \"Only support RandomForest, GBDT, SVM, KNN, and NaiveBayes method.\"\n logging.error(msg)\n raise ValueError(msg)\n\n if eval_method not in [\"mean\", \"median\"]:\n msg = \"Only support mean and median as evaluation method.\"\n logging.error(msg)\n raise ValueError(msg)\n\n if test_size <= 0 or test_size >= 1:\n msg = \"Illegal test set.\"\n logging.error(msg)\n raise ValueError(msg)\n\n x_train, x_test, y_train, y_test, hpt_train, hpt_test = train_test_split(\n self.metadataX, self.metadataY, self.hpt, test_size=test_size\n )\n\n if method == \"RandomForest\":\n clf = RandomForestClassifier(n_estimators=n_trees)\n elif method == \"GBDT\":\n clf = GradientBoostingClassifier()\n elif method == \"SVM\":\n clf = make_pipeline(StandardScaler(), SVC(gamma=\"auto\"))\n elif method == \"KNN\":\n clf = KNeighborsClassifier(n_neighbors=n_neighbors)\n else:\n clf = GaussianNB()\n\n clf.fit(x_train, y_train)\n y_fit = clf.predict(x_train)\n y_pred = clf.predict(x_test)\n\n # calculate model errors\n fit_error, pred_error = {}, {}\n\n # evaluate method\n em = np.mean if eval_method == \"mean\" else np.median\n\n # meta learning errors\n fit_error[\"meta-learn\"] = em(\n [hpt_train.iloc[i][c][-1] for i, c in enumerate(y_fit)]\n )\n pred_error[\"meta-learn\"] = em(\n [hpt_test.iloc[i][c][-1] for i, c in enumerate(y_pred)]\n )\n\n # pre-selected model errors, for all candidate models\n for label in self.metadataY.unique():\n fit_error[label] = em(\n [hpt_train.iloc[i][label][-1] for i in range(len(hpt_train))]\n )\n pred_error[label] = em(\n [hpt_test.iloc[i][label][-1] for i in range(len(hpt_test))]\n )\n\n self.clf = clf\n return {\n \"fit_error\": fit_error,\n \"pred_error\": pred_error,\n \"clf_accuracy\": metrics.accuracy_score(y_test, y_pred),\n }\n\n def save_model(self, file_name: str) -> None:\n \"\"\"Save the trained model.\n\n Args:\n file_name: A string representing the path to save the trained model.\n\n Returns:\n None.\n \"\"\"\n\n if self.clf is None:\n msg = \"Haven't trained a model.\"\n logging.error(msg)\n raise ValueError(msg)\n else:\n joblib.dump(self.__dict__, file_name)\n logging.info(\"Successfully saved the trained model!\")\n\n def load_model(self, file_name: str) -> None:\n \"\"\"Load a pre-trained model.\n\n Args:\n file_name: A string representing the path to load the pre-trained model.\n\n Returns:\n None.\n \"\"\"\n\n try:\n self.__dict__ = joblib.load(file_name)\n logging.info(\"Successfully loaded a pre-trained model!\")\n except Exception:\n msg = \"No existing pre-trained model. Please change file path or train a model first!\"\n logging.error(msg)\n raise ValueError(msg)\n\n def pred(\n self, source_ts: TimeSeriesData, ts_scale: bool = True, n_top: int = 1\n ) -> Union[str, List[str]]:\n \"\"\"Predict the best forecasting model for a new time series data.\n\n Args:\n source_ts: :class:`kats.consts.TimeSeriesData` object representing the new time series data.\n ts_scale: Optional; A boolean to specify whether or not to rescale time series data (i.e., normalizing it with its maximum vlaue) before calculating features. Default is True.\n n_top: Optional; A integer for the number of top model names to return. Default is 1.\n\n Returns:\n A string or a list of strings of the names of forecasting models.\n \"\"\"\n\n ts = TimeSeriesData(pd.DataFrame(source_ts.to_dataframe().copy()))\n if self.clf is None:\n msg = \"Haven't trained a model. Please train a model or load a model before predicting.\"\n logging.error(msg)\n raise ValueError(msg)\n\n if ts_scale:\n # scale time series to make ts features more stable\n ts.value /= ts.value.max()\n msg = \"Successful scaled! Each value of TS has been divided by the max value of TS.\"\n logging.info(msg)\n\n new_features = TsFeatures().transform(ts)\n # pyre-fixme[16]: `List` has no attribute `values`.\n new_features_vector = np.asarray(list(new_features.values()))\n if np.any(np.isnan(new_features_vector)):\n msg = (\n \"Features of the test time series contains NaN value, consider processing it. Features are: \"\n f\"{new_features}. Fill in NaNs with 0.\"\n )\n logging.warning(msg)\n return self.pred_by_feature([new_features_vector], n_top=n_top)[0]\n\n def pred_by_feature(\n self,\n source_x: Union[np.ndarray, List[np.ndarray], pd.DataFrame],\n n_top: int = 1,\n ) -> np.ndarray:\n \"\"\"Predict the best forecasting models given a list/dataframe of time series features\n Args:\n source_x: the time series features of the time series that one wants to predict, can be a np.ndarray, a list of np.ndarray or a pd.DataFrame.\n n_top: Optional; An integer for the number of top model names to return. Default is 1.\n\n Returns:\n An array of strings representing the forecasing models. If n_top=1, a 1-d np.ndarray will be returned. Otherwise, a 2-d np.ndarray will be returned.\n \"\"\"\n\n if self.clf is None:\n msg = \"Haven't trained a model. Please train a model or load a model before predicting.\"\n logging.error(msg)\n raise ValueError(msg)\n if isinstance(source_x, List):\n x = np.row_stack(source_x)\n elif isinstance(source_x, np.ndarray):\n x = source_x.copy()\n else:\n msg = f\"Invalid source_x type: {type(source_x)}.\"\n logging.error(msg)\n raise ValueError(msg)\n if self.scale:\n x = (x - self.x_mean) / self.x_std\n x[np.isnan(x)] = 0.0\n if n_top == 1:\n return self.clf.predict(x)\n prob = self.clf.predict_proba(x)\n order = np.argsort(-prob, axis=1)\n classes = np.array(self.clf.classes_)\n return classes[order][:, :n_top]\n\n def _bootstrap(self, data: np.ndarray, rep: int = 200) -> float:\n \"\"\"Helper function for bootstrap test and returns the pvalue.\"\"\"\n\n diff = data[:, 0] - data[:, 1]\n n = len(diff)\n idx = np.random.choice(np.arange(n), n * rep)\n sample = diff[idx].reshape(-1, n)\n bs = np.average(sample, axis=1)\n pvalue = np.average(bs < 0)\n return pvalue\n\n def pred_fuzzy(\n self, source_ts: TimeSeriesData, ts_scale: bool = True, sig_level: float = 0.2\n ) -> Dict[str, Any]:\n \"\"\"Predict a forecasting model for a new time series data using fuzzy method.\n\n The fuzzy method returns the best candiate model and the second best model will be returned if there is no statistically significant difference between them.\n The statistical test is based on the bootstrapping samples drawn from the fitted random forest model. This function is only available for random forest classifier.\n\n Args:\n source_ts: :class:`kats.consts.TimeSeriesData` object representing the new time series data.\n ts_scale: Optional; A boolean to specify whether or not to rescale time series data (i.e., normalizing it with its maximum vlaue) before calculating features. Default is True.\n sig_level: Optional; A float representing the significance level for bootstrap test. If pvalue>=sig_level, then we deem there is no difference between the best and the second best model.\n Default is 0.2.\n\n Returns:\n A dictionary of prediction results, including forecasting models, their probability of being th best forecasting models and the pvalues of bootstrap tests.\n \"\"\"\n\n ts = TimeSeriesData(pd.DataFrame(source_ts.to_dataframe().copy()))\n if ts_scale:\n # scale time series to make ts features more stable\n ts.value /= ts.value.max()\n # pyre-fixme[16]: `List` has no attribute `values`.\n test = np.asarray(list(TsFeatures().transform(ts).values()))\n test[np.isnan(test)] = 0.0\n if self.scale:\n test = (test - self.x_mean) / self.x_std\n test = test.reshape([1, -1])\n m = len(self.clf.estimators_)\n data = np.array(\n [self.clf.estimators_[i].predict_proba(test)[0] for i in range(m)]\n )\n prob = self.clf.predict_proba(test)[0]\n idx = np.argsort(-prob)[:2]\n pvalue = self._bootstrap(data[:, idx[:2]])\n if pvalue >= sig_level:\n label = self.clf.classes_[idx[:2]]\n prob = prob[idx[:2]]\n else:\n label = self.clf.classes_[idx[:1]]\n prob = prob[idx[:1]]\n ans = {\"label\": label, \"probability\": prob, \"pvalue\": pvalue}\n return ans\n\n def __str__(self):\n return \"MetaLearnModelSelect\"\n\n\nclass RandomDownSampler:\n \"\"\"An assistant class for class MetaLearnModelSelect to do random downsampling.\n\n RandomDownSampler provides methods for creating a balanced dataset via downsampling. It contains fit_resample.\n\n Attributes:\n hpt: A `pandas.Series` object storing the best hyper-parameters and the corresponding errors for each model.\n dataX: A `pandas.DataFrame` object representing the time series features matrix.\n dataY: A `pandas.Series` object representing the best models for the corresponding time series.\n \"\"\"\n\n def __init__(self, hpt: pd.Series, dataX: pd.DataFrame, dataY: pd.Series) -> None:\n self.hpt = hpt\n self.dataX = dataX\n self.dataY = dataY\n self.col_namesX = self.dataX.columns\n\n def fit_resample(self) -> Tuple[pd.Series, pd.DataFrame, pd.Series]:\n \"\"\"Create balanced dataset via random downsampling.\n\n Returns:\n A tuple containing the `pandas.Series` object of the best hyper-parameters and the corresponding errors, the `pandas.DataFrame` object of the downsampled time series features,\n and the `pandas.Series` object of the downsampled best models for the corresponding time series.\n \"\"\"\n\n resampled_x, resampled_y, resampled_hpt = [], [], []\n # naive down-sampler technique for data imbalance problem\n min_n = min(Counter(self.dataY).values())\n\n idx_dict = defaultdict(list)\n for i, c in enumerate(self.dataY):\n idx_dict[c].append(i)\n\n for key in idx_dict:\n idx_dict[key] = np.random.choice(idx_dict[key], size=min_n, replace=False)\n resampled_x += self.dataX.iloc[np.asarray(idx_dict[key]), :].values.tolist()\n resampled_y += list(self.dataY.iloc[np.asarray(idx_dict[key])])\n resampled_hpt += list(self.hpt.iloc[np.asarray(idx_dict[key])])\n\n resampled_x = pd.DataFrame(resampled_x)\n resampled_x.columns = self.col_namesX\n\n resampled_y = pd.Series(resampled_y, name=\"y\")\n resampled_hpt = pd.Series(resampled_hpt, name=\"hpt\")\n\n return resampled_hpt, resampled_x, resampled_y\n", "# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n# Forecasting with simple linear regression model\n#\n# In the simplest case, the regression model explores a linear relationship\n# between the forecast variable `y` (observed time series) and a single\n# predictor variable `x` (time).\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nfrom typing import List, Optional, Any, Dict, Union\n\nimport numpy as np\nimport pandas as pd\nimport statsmodels.api as sm\nfrom kats.consts import Params, TimeSeriesData\nfrom kats.models.model import Model\nfrom statsmodels.sandbox.regression.predstd import wls_prediction_std\n\n\nclass LinearModelParams(Params):\n \"\"\"Parameter class for Linear model.\n\n This is the parameter class for the linear model.\n\n Attributes:\n alpha: The alpha level for the confidence interval. The default alpha = 0.05 returns a 95% confidence interval\n \"\"\"\n\n def __init__(self, alpha: float = 0.05) -> None:\n super().__init__()\n self.alpha = alpha\n logging.debug(\n \"Initialized LinearModel parameters. \" \"alpha:{alpha}\".format(alpha=alpha)\n )\n\n def validate_params(self) -> None:\n \"\"\"Validate Linear Model Parameters\n\n Since the linear model does not require key parameters to be defined this is not required for this class\n \"\"\"\n logging.info(\"Method validate_params() is not implemented.\")\n pass\n\n\nclass LinearModel(Model[LinearModelParams]):\n \"\"\"Model class for Linear Model.\n\n This class provides the fit, predict and plot methods for the Linear Model\n\n Attributes:\n data: :class:`kats.consts.TimeSeriesData`, the input time series data as `TimeSeriesData`\n params: the parameter class defined with `LinearModelParams`\n \"\"\"\n\n def __init__(self, data: TimeSeriesData, params: LinearModelParams) -> None:\n super().__init__(data, params)\n if not isinstance(self.data.value, pd.Series):\n msg = \"Only support univariate time series, but get {type}.\".format(\n type=type(self.data.value)\n )\n logging.error(msg)\n raise ValueError(msg)\n self.model: Optional[sm.OLS] = None\n self.fcst_df = pd.DataFrame(data=None)\n self.freq: Optional[str] = None\n self._X_future: Optional[List[int]] = None\n self.past_length: int = len(data.time)\n self.dates: Optional[pd.DatetimeIndex] = None\n self.y_fcst: Optional[Union[pd.Series, np.ndarray]] = None\n self.sdev: Optional[Union[np.ndarray, float]] = None\n self.y_fcst_lower: Optional[Union[pd.Series, np.ndarray, float]] = None\n self.y_fcst_upper: Optional[Union[pd.Series, np.ndarray, float]] = None\n\n def fit(self) -> None:\n \"\"\"fit Linear Model.\"\"\"\n logging.debug(\n \"Call fit() with parameters: \"\n \"alpha:{alpha}\".format(alpha=self.params.alpha)\n )\n\n # prepare X and y for linear model\n _X = list(range(self.past_length))\n X = sm.add_constant(_X)\n y = self.data.value\n lm = sm.OLS(y, X)\n self.model = lm.fit()\n\n def predict(\n self, steps: int, include_history: bool = False, *args: Any, **kwargs: Any\n ) -> pd.DataFrame:\n \"\"\"predict with fitted linear model.\n\n Args:\n steps: the steps or length of the prediction horizon\n include_history: whether to include the historical data in the prediction\n\n Returns:\n The predicted dataframe with the following columns:\n `time`, `fcst`, `fcst_lower`, and `fcst_upper`\n \"\"\"\n logging.debug(\n \"Call predict() with parameters. \"\n \"steps:{steps}, kwargs:{kwargs}\".format(steps=steps, kwargs=kwargs)\n )\n self.freq = kwargs.get(\"freq\", pd.infer_freq(self.data.time))\n self.include_history = include_history\n\n if include_history:\n self._X_future = list(range(0, self.past_length + steps))\n else:\n self._X_future = list(range(self.past_length, self.past_length + steps))\n\n X_fcst = sm.add_constant(self._X_future)\n if self.model is None:\n raise ValueError(\"Call fit() before predict()\")\n else:\n y_fcst = self.model.predict(X_fcst)\n\n self.sdev, self.y_fcst_lower, self.y_fcst_upper = wls_prediction_std(\n self.model, exog=X_fcst, alpha=self.params.alpha\n )\n\n self.y_fcst = pd.Series(y_fcst)\n self.y_fcst_lower = pd.Series(self.y_fcst_lower)\n self.y_fcst_upper = pd.Series(self.y_fcst_upper)\n\n # create future dates\n last_date = self.data.time.max()\n dates = pd.date_range(start=last_date, periods=steps + 1, freq=self.freq)\n self.dates = dates[dates != last_date]\n\n if include_history:\n self.dates = np.concatenate((pd.to_datetime(self.data.time), self.dates))\n\n self.fcst_df = pd.DataFrame(\n {\n \"time\": self.dates,\n \"fcst\": self.y_fcst,\n \"fcst_lower\": self.y_fcst_lower,\n \"fcst_upper\": self.y_fcst_upper,\n }\n )\n logging.debug(\"Return forecast data: {fcst_df}\".format(fcst_df=self.fcst_df))\n return self.fcst_df\n\n def __str__(self) -> str:\n return \"Linear Model\"\n\n @staticmethod\n def get_parameter_search_space() -> List[Dict[str, Any]]:\n \"\"\"get default parameter search space for Linear model.\"\"\"\n return [\n {\n \"name\": \"alpha\",\n \"type\": \"choice\",\n \"value_type\": \"float\",\n \"values\": [0.01, 0.05, 0.1, 0.25],\n \"is_ordered\": True,\n },\n ]\n", "# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n# pyre-unsafe\n\n\"\"\"A module for meta-learner predictability.\n\nThis module contains the class :class:`MetaLearnPredictability` for meta-learner predictability. This class predicts whether a time series is predictable or not.\nThe predictability of a time series is determined by whether the forecasting errors of the possible best forecasting model can be less than a user-defined threshold.\n\"\"\"\n\nimport ast\nimport logging\nfrom typing import Dict, List, Optional, Union, Any\n\nimport joblib\nimport numpy as np\nimport pandas as pd\nfrom kats.consts import TimeSeriesData\nfrom kats.tsfeatures.tsfeatures import TsFeatures\nfrom sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier\nfrom sklearn.metrics import precision_recall_curve, precision_recall_fscore_support\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.neighbors import KNeighborsClassifier\n\n\nclass MetaLearnPredictability:\n \"\"\"Meta-learner framework on predictability.\n This framework uses classification algorithms to predict whether a time series is predictable or not (\n we define the time series with error metrics less than a user defined threshold as predictable).\n For training, it uses time series features as inputs and whether the best forecasting models' errors less than the user-defined threshold as labels.\n For prediction, it takes time series or time series features as inputs to predict whether the corresponding time series is predictable or not.\n This class provides preprocess, pred, pred_by_feature, save_model and load_model.\n\n Attributes:\n metadata: Optional; A list of dictionaries representing the meta-data of time series (e.g., the meta-data generated by GetMetaData object).\n Each dictionary d must contain at least 3 components: 'hpt_res', 'features' and 'best_model'. d['hpt_res'] represents the best hyper-parameters for each candidate model and the corresponding errors;\n d['features'] are time series features, and d['best_model'] is a string representing the best candidate model of the corresponding time series data.\n metadata should not be None unless load_model is True. Default is None\n threshold: Optional; A float representing the threshold for the forecasting error. A time series whose forecasting error of the best forecasting model is higher than the threshold is considered as unpredictable. Default is 0.2.\n load_model: Optional; A boolean to specify whether or not to load a trained model. Default is False.\n\n Sample Usage:\n >>> mlp = MetaLearnPredictability(data)\n >>> mlp.train()\n >>> mlp.save_model()\n >>> mlp.pred(TSdata) # Predict whether a time series is predictable.\n >>> mlp2 = MetaLearnPredictability(load_model=True) # Create a new object to load the trained model\n >>> mlp2.load_model()\n \"\"\"\n\n def __init__(\n self,\n metadata: Optional[List[Any]] = None,\n threshold: float = 0.2,\n load_model=False,\n ) -> None:\n if load_model:\n msg = \"Initialize this class without meta data, and a pretrained model should be loaded using .load_model() method.\"\n logging.info(msg)\n else:\n if metadata is None:\n msg = \"Please input meta data to initialize this class.\"\n logging.error(msg)\n raise ValueError(msg)\n if len(metadata) <= 30:\n msg = \"Dataset is too small to train a meta learner!\"\n logging.error(msg)\n raise ValueError(msg)\n\n if \"hpt_res\" not in metadata[0]:\n msg = \"Missing best hyper-params, not able to train a meta learner!\"\n logging.error(msg)\n raise ValueError(msg)\n\n if \"features\" not in metadata[0]:\n msg = \"Missing time series features, not able to train a meta learner!\"\n logging.error(msg)\n raise ValueError(msg)\n\n if \"best_model\" not in metadata[0]:\n msg = \"Missing best models, not able to train a meta learner!\"\n logging.error(msg)\n raise ValueError(msg)\n\n self.metadata = metadata\n self.threshold = threshold\n self._reorganize_data()\n self._validate_data()\n self.rescale = False\n self.clf = None\n self._clf_threshold = None\n\n def _reorganize_data(self) -> None:\n \"\"\"Reorganize raw input data into features and labels.\"\"\"\n\n metadata = self.metadata\n\n self.features = []\n self.labels = []\n\n for i in range(len(metadata)):\n try:\n if isinstance(metadata[i][\"hpt_res\"], str):\n hpt = ast.literal_eval(metadata[i][\"hpt_res\"])\n else:\n hpt = metadata[i][\"hpt_res\"]\n\n if isinstance(metadata[i][\"features\"], str):\n feature = ast.literal_eval(metadata[i][\"features\"])\n else:\n feature = metadata[i][\"features\"]\n\n self.features.append(feature)\n self.labels.append(hpt[metadata[i][\"best_model\"]][1])\n except Exception as e:\n logging.exception(e)\n self.labels = (np.array(self.labels) > self.threshold).astype(int)\n self.features = pd.DataFrame(self.features)\n self.features.fillna(0, inplace=True)\n self.features_mean = np.average(self.features.values, axis=0)\n\n self.features_std = np.std(self.features.values, axis=0)\n\n self.features_std[self.features_std == 0] = 1.0\n\n return\n\n def _validate_data(self) -> None:\n \"\"\"Validate input data.\n\n We check two aspects:\n 1) whether input data contain both positive and negative instances.\n 2) whether training data size is at least 30.\n \"\"\"\n\n if len(np.unique(self.labels)) == 1:\n msg = \"Only one type of time series data and cannot train a classifier!\"\n logging.error(msg)\n raise ValueError(msg)\n if len(self.features) <= 30:\n msg = \"Dataset is too small to train a meta learner!\"\n logging.error(msg)\n raise ValueError(msg)\n\n def preprocess(self) -> None:\n \"\"\"Rescale input time series features to zero-mean and unit-variance.\n\n Returns:\n None.\n \"\"\"\n\n self.rescale = True\n features = (self.features.values - self.features_mean) / self.features_std\n self.features = pd.DataFrame(features, columns=self.features.columns)\n\n def train(\n self,\n method: str = \"RandomForest\",\n valid_size: float = 0.1,\n test_size: float = 0.1,\n recall_threshold: float = 0.7,\n n_estimators: int = 500,\n n_neighbors: int = 5,\n **kwargs,\n ) -> Dict[str, float]:\n \"\"\"Train a classifier with time series features to forecast predictability.\n\n Args:\n method: Optional; A string representing the name of the classification algorithm. Can be 'RandomForest', 'GBDT', 'KNN' or 'NaiveBayes'. Default is 'RandomForest'.\n valid_size: Optional; A float representing the size of validation set for parameter tunning, which should be within (0, 1). Default is 0.1.\n test_size: Optional; A float representing the size of test set, which should be within [0., 1-valid_size). Default is 0.1.\n recall_threshold: Optional; A float controlling the recall score of the classifier. The recall of the trained classifier will be larger than recall_threshold. Default is 0.7.\n n_estimators: Optional; An integer representing the number of trees in random forest model. Default is 500.\n n_neighbors: Optional; An integer representing the number of neighbors in KNN model. Default is 5.\n\n Returns:\n A dictionary stores the classifier performance on the test set (if test_size is valid).\n \"\"\"\n\n if method not in [\"RandomForest\", \"GBDT\", \"KNN\", \"NaiveBayes\"]:\n msg = \"Only support RandomForest, GBDT, KNN, and NaiveBayes method.\"\n logging.error(msg)\n raise ValueError(msg)\n\n if valid_size <= 0.0 or valid_size >= 1.0:\n msg = \"valid size should be in (0.0, 1.0)\"\n logging.error(msg)\n raise ValueError(msg)\n\n if test_size <= 0.0 or test_size >= 1.0:\n msg = f\"invalid test_size={test_size} and reset the test_size as 0.\"\n test_size = 0.0\n logging.warning(msg)\n\n n = len(self.features)\n x_train, x_valid, y_train, y_valid = train_test_split(\n self.features, self.labels, test_size=int(n * valid_size)\n )\n\n if test_size > 0 and test_size < (1 - valid_size):\n x_train, x_test, y_train, y_test = train_test_split(\n x_train, y_train, test_size=int(n * test_size)\n )\n elif test_size == 0:\n x_train, y_train = self.features, self.labels\n x_test, y_test = None, None\n else:\n msg = \"Invalid test_size and re-set test_size as 0.\"\n logging.info(msg)\n x_train, y_train = self.features, self.labels\n x_test, y_test = None, None\n if method == \"NaiveBayes\":\n clf = GaussianNB(**kwargs)\n elif method == \"GBDT\":\n clf = GradientBoostingClassifier(**kwargs)\n elif method == \"KNN\":\n kwargs[\"n_neighbors\"] = kwargs.get(\"n_neighbors\", 5)\n clf = KNeighborsClassifier(**kwargs)\n else:\n kwargs[\"n_estimators\"] = n_estimators\n kwargs[\"class_weight\"] = kwargs.get(\"class_weight\", \"balanced_subsample\")\n clf = RandomForestClassifier(**kwargs)\n clf.fit(x_train, y_train)\n pred_valid = clf.predict_proba(x_valid)[:, 1]\n p, r, threshold = precision_recall_curve(y_valid, pred_valid)\n try:\n clf_threshold = threshold[np.where(p == np.max(p[r >= recall_threshold]))][\n -1\n ]\n except Exception as e:\n msg = f\"Fail to get a proper threshold for recall {recall_threshold}, use 0.5 as threshold instead. Exception message is: {e}\"\n logging.warning(msg)\n clf_threshold = 0.5\n if x_test is not None:\n pred_test = clf.predict_proba(x_test)[:, 1] > clf_threshold\n precision_test, recall_test, f1_test, _ = precision_recall_fscore_support(\n y_test, pred_test, average=\"binary\"\n )\n accuracy = np.average(pred_test == y_test)\n ans = {\n \"accuracy\": accuracy,\n \"precision\": precision_test,\n \"recall\": recall_test,\n \"f1\": f1_test,\n }\n else:\n ans = {}\n self.clf = clf\n self._clf_threshold = clf_threshold\n return ans\n\n def pred(self, source_ts: TimeSeriesData, ts_rescale: bool = True) -> bool:\n \"\"\"Predict whether a time series is predicable or not.\n\n Args:\n source_ts: :class:`kats.consts.TimeSeriesData` object representing the new time series data.\n ts_scale: Optional; A boolean to specify whether or not to rescale time series data (i.e., normalizing it with its maximum vlaue) before calculating features. Default is True.\n\n Returns:\n A boolean representing whether the time series is predictable or not.\n \"\"\"\n\n ts = TimeSeriesData(pd.DataFrame(source_ts.to_dataframe().copy()))\n if self.clf is None:\n msg = \"No model trained yet, please train the model first.\"\n logging.error(msg)\n raise ValueError(msg)\n if ts_rescale:\n ts.value /= ts.value.max()\n msg = \"Successful scaled! Each value of TS has been divided by the max value of TS.\"\n logging.info(msg)\n features = TsFeatures().transform(ts)\n # pyre-fixme[16]: `List` has no attribute `values`.\n x = np.array(list(features.values()))\n if np.sum(np.isnan(x)) > 0:\n msg = (\n \"Features of ts contain NaN, please consider preprocessing ts. Features are: \"\n f\"{features}. Fill in NaNs with 0.\"\n )\n logging.warning(msg)\n ans = True if self.pred_by_feature([x])[0] == 1 else False\n return ans\n\n def pred_by_feature(\n self, source_x: Union[np.ndarray, List[np.ndarray], pd.DataFrame]\n ) -> np.ndarray:\n \"\"\"Predict whether a list of time series are predicable or not given their time series features.\n Args:\n source_x: the time series features of the time series that one wants to predict, can be a np.ndarray, a list of np.ndarray or a pd.DataFrame.\n\n Returns:\n A np.array storing whether the corresponding time series are predictable or not.\n \"\"\"\n\n if self.clf is None:\n msg = \"No model trained yet, please train the model first.\"\n logging.error(msg)\n raise ValueError(msg)\n if isinstance(source_x, List):\n x = np.row_stack(source_x)\n elif isinstance(source_x, np.ndarray):\n x = source_x.copy()\n else:\n msg = f\"In valid source_x type: {type(source_x)}.\"\n logging.error(msg)\n raise ValueError(msg)\n x[np.isnan(x)] = 0.0\n if self.rescale:\n x = (x - self.features_mean) / self.features_std\n pred = (self.clf.predict_proba(x)[:, 1] < self._clf_threshold).astype(int)\n return pred\n\n def save_model(self, file_path: str) -> None:\n \"\"\"Save the trained model.\n\n Args:\n file_name: A string representing the path to save the trained model.\n\n Returns:\n None.\n \"\"\"\n if self.clf is None:\n msg = \"Please train the model first!\"\n logging.error(msg)\n raise ValueError(msg)\n joblib.dump(self.__dict__, file_path)\n logging.info(f\"Successfully save the model: {file_path}.\")\n\n def load_model(self, file_path) -> None:\n \"\"\"Load a pre-trained model.\n\n Args:\n file_name: A string representing the path to load the pre-trained model.\n\n Returns:\n None.\n \"\"\"\n try:\n self.__dict__ = joblib.load(file_path)\n logging.info(f\"Successfully load the model: {file_path}.\")\n except Exception as e:\n msg = f\"Fail to load model with Exception msg: {e}\"\n logging.exception(msg)\n raise ValueError(msg)\n" ]
[ [ "pandas.Series", "numpy.asarray", "pandas.DataFrame", "sklearn.ensemble.RandomForestClassifier", "numpy.arange", "sklearn.neighbors.KNeighborsClassifier", "numpy.std", "pandas.concat", "sklearn.naive_bayes.GaussianNB", "numpy.random.choice", "numpy.isnan", "sklearn.model_selection.train_test_split", "sklearn.svm.SVC", "sklearn.ensemble.GradientBoostingClassifier", "numpy.argsort", "numpy.array", "matplotlib.pyplot.subplots", "numpy.row_stack", "sklearn.preprocessing.StandardScaler", "numpy.average", "sklearn.metrics.accuracy_score" ], [ "pandas.to_datetime", "pandas.Series", "pandas.DataFrame", "pandas.date_range", "pandas.infer_freq" ], [ "sklearn.naive_bayes.GaussianNB", "sklearn.ensemble.RandomForestClassifier", "numpy.unique", "numpy.isnan", "sklearn.metrics.precision_recall_curve", "pandas.DataFrame", "sklearn.neighbors.KNeighborsClassifier", "sklearn.metrics.precision_recall_fscore_support", "numpy.std", "numpy.max", "numpy.row_stack", "sklearn.ensemble.GradientBoostingClassifier", "numpy.array", "numpy.average" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
amsword/maskrcnn-benchmark
[ "660457d5f28c5d7d7887829486a20c60976b1dd8" ]
[ "maskrcnn_benchmark/modeling/backbone/resnet_fast.py" ]
[ "# This file cames from Xiyang Dai\n\nimport torch.nn as nn\nfrom torch.nn import BatchNorm2d\nfrom maskrcnn_benchmark.modeling.make_layers import frozen_batch_norm\nfrom maskrcnn_benchmark.layers.batch_norm import FrozenBatchNorm2d\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\ndef conv1x1(in_planes, out_planes, stride=1):\n \"\"\"1x1 convolution\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, normf=frozen_batch_norm):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = normf(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = normf(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, cfg, block=BasicBlock):\n\n super(ResNet, self).__init__()\n layers = [3, 4, 6, 3]\n if cfg.MODEL.BACKBONE.USE_BN:\n normf = BatchNorm2d\n else:\n normf = frozen_batch_norm\n\n self.inplanes = 16\n self.conv1 = nn.Conv2d(3, 16, kernel_size=7, stride=2, padding=3, bias=False)\n self.bn1 = normf(16)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = self._make_layer(block, 16, 1)\n if True:\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n else:\n self.maxpool = nn.Sequential(nn.Conv2d(16, 16, kernel_size=3, stride=2, padding=1, bias=False),\n nn.ReLU(inplace=True))\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n\n self._freeze_backbone(cfg.MODEL.BACKBONE.FREEZE_CONV_BODY_AT)\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n conv1x1(self.inplanes, planes * block.expansion, stride),\n frozen_batch_norm(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def _freeze_backbone(self, freeze_at):\n if freeze_at < 0:\n return\n for stage_index in range(freeze_at):\n if stage_index == 0:\n m = self.conv1 # stage 0 is the stem\n else:\n m = getattr(self, \"layer\" + str(stage_index))\n for p in m.parameters():\n p.requires_grad = False\n\n def forward(self, x):\n\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.conv2(x)\n x = self.maxpool(x)\n res = []\n x = self.layer1(x)\n res.append(x)\n x = self.layer2(x)\n res.append(x)\n x = self.layer3(x)\n res.append(x)\n x = self.layer4(x)\n res.append(x)\n\n return res\n\nclass ResNet_XX(nn.Module):\n def __init__(self, cfg, block=BasicBlock):\n super().__init__()\n layers = cfg.MODEL.RESNETS.LAYERS\n in_channels = cfg.MODEL.RESNETS.IN_CHANNELS\n if cfg.MODEL.BACKBONE.USE_BN:\n self.normf = BatchNorm2d\n else:\n self.normf = FrozenBatchNorm2d\n\n self.inplanes = 16\n self.conv1 = nn.Conv2d(3, 16, kernel_size=7, stride=2, padding=3, bias=False)\n self.bn1 = self.normf(16)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = self._make_layer(block, 16, 1)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, in_channels[0], layers[0])\n self.layer2 = self._make_layer(block, in_channels[1], layers[1], stride=2)\n self.layer3 = self._make_layer(block, in_channels[2], layers[2], stride=2)\n self.layer4 = self._make_layer(block, in_channels[3], layers[3], stride=2)\n\n self._freeze_backbone(cfg.MODEL.BACKBONE.FREEZE_CONV_BODY_AT)\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n conv1x1(self.inplanes, planes * block.expansion, stride),\n self.normf(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample, normf=self.normf))\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.inplanes, planes, normf=self.normf))\n\n return nn.Sequential(*layers)\n\n def _freeze_backbone(self, freeze_at):\n if freeze_at < 0:\n return\n for stage_index in range(freeze_at):\n if stage_index == 0:\n self._no_grad(self.conv1)\n self._no_grad(self.conv2)\n else:\n m = getattr(self, \"layer\" + str(stage_index))\n self._no_grad(m)\n\n def _no_grad(self, m):\n for p in m.parameters():\n p.requires_grad = False\n\n def forward(self, x):\n\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.conv2(x)\n x = self.maxpool(x)\n res = []\n x = self.layer1(x)\n res.append(x)\n x = self.layer2(x)\n res.append(x)\n x = self.layer3(x)\n res.append(x)\n x = self.layer4(x)\n res.append(x)\n\n return res\n" ]
[ [ "torch.nn.Sequential", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.nn.MaxPool2d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
FloCF/pytorch
[ "383a33a0eb28ae454c0c8965650aea8ce1608943", "383a33a0eb28ae454c0c8965650aea8ce1608943" ]
[ "test/distributed/test_c10d_nccl.py", "torch/fx/experimental/fx2trt/example/fx2trt_example.py" ]
[ "import copy\nimport math\nimport os\nimport random\nimport signal\nimport sys\nimport tempfile\nimport threading\nimport time\nfrom contextlib import contextmanager\nfrom datetime import timedelta\nfrom itertools import product\nfrom unittest import mock\n\nimport torch\nimport torch.distributed as c10d\n\nif not c10d.is_available():\n print(\"c10d not available, skipping tests\", file=sys.stderr)\n sys.exit(0)\n\nimport test_c10d_common\nimport torch.distributed as dist\nimport torch.distributed.algorithms.ddp_comm_hooks.default_hooks as default\nimport torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD\nimport torch.nn.functional as F\nimport torch.testing._internal.common_utils as common\nfrom test_c10d_common import gpus_for_rank, DoubleGpuNet, ConvNet, ModuleForDdpCommHook\nfrom torch import nn\nfrom torch.nn.parallel import DistributedDataParallel\nfrom torch.testing._internal.common_distributed import (\n MultiProcessTestCase,\n requires_nccl,\n requires_nccl_version,\n skip_if_lt_x_gpu,\n get_timeout,\n skip_if_rocm,\n with_dist_debug_levels,\n with_nccl_blocking_wait,\n)\nfrom torch.testing._internal.common_utils import (\n IS_WINDOWS,\n TestCase,\n run_tests,\n retry_on_connect_failures,\n TEST_WITH_DEV_DBG_ASAN,\n TEST_WITH_TSAN,\n sandcastle_skip,\n sandcastle_skip_if,\n)\nfrom torch.utils.checkpoint import checkpoint\nfrom torch.distributed.optim import functional_optim_map\n\nif not IS_WINDOWS:\n from torch.distributed.optim.functional_sgd import _FunctionalSGD\n from torch.distributed.optim.functional_adam import _FunctionalAdam\n from torch.distributed.optim.functional_adamw import _FunctionalAdamW\n\nif TEST_WITH_TSAN:\n print(\n \"Skip as TSAN is not fork-safe since we're forking in a multi-threaded environment\",\n file=sys.stderr,\n )\n sys.exit(0)\n\nif TEST_WITH_DEV_DBG_ASAN:\n print(\n \"Skip ASAN as torch + multiprocessing spawn have known issues\", file=sys.stderr\n )\n sys.exit(0)\n\n\nclass RendezvousEnvTest(TestCase):\n @retry_on_connect_failures\n @requires_nccl()\n @sandcastle_skip_if(\n torch.cuda.device_count() == 0, \"No GPUs available, skipping test\"\n )\n def test_common_errors(self):\n vars = {\n \"WORLD_SIZE\": \"1\",\n \"RANK\": \"0\",\n \"MASTER_ADDR\": \"127.0.0.1\",\n \"MASTER_PORT\": str(common.find_free_port()),\n }\n\n class Env(object):\n def __init__(self, vars):\n self.env_patcher = mock.patch.dict(os.environ, vars, clear=True)\n\n def __enter__(self):\n self.env_patcher.start()\n\n def __exit__(self, type, value, traceback):\n self.env_patcher.stop()\n\n def without(d, key):\n d = d.copy()\n d.pop(key)\n return d\n\n def withouts(d, keys):\n d = d.copy()\n for key in keys:\n d.pop(key)\n return d\n\n with Env(without(vars, \"WORLD_SIZE\")):\n self.assertEqual(None, os.environ.get(\"WORLD_SIZE\"))\n with self.assertRaisesRegex(ValueError, \"WORLD_SIZE expected\"):\n gen = c10d.rendezvous(\"env://\")\n next(gen)\n c10d.init_process_group(backend=\"nccl\", world_size=1)\n self.assertEqual(c10d.get_rank(), 0)\n self.assertEqual(c10d.get_world_size(), 1)\n c10d.destroy_process_group()\n\n with Env(without(vars, \"RANK\")):\n self.assertEqual(None, os.environ.get(\"RANK\"))\n with self.assertRaisesRegex(ValueError, \"RANK expected\"):\n gen = c10d.rendezvous(\"env://\")\n next(gen)\n c10d.init_process_group(backend=\"nccl\", rank=0)\n self.assertEqual(c10d.get_rank(), 0)\n self.assertEqual(c10d.get_world_size(), 1)\n c10d.destroy_process_group()\n\n with Env(withouts(vars, [\"RANK\", \"WORLD_SIZE\"])):\n self.assertEqual(None, os.environ.get(\"RANK\"))\n self.assertEqual(None, os.environ.get(\"WORLD_SIZE\"))\n c10d.init_process_group(backend=\"nccl\", rank=0, world_size=1)\n self.assertEqual(c10d.get_rank(), 0)\n self.assertEqual(c10d.get_world_size(), 1)\n c10d.destroy_process_group()\n\n with Env(vars):\n c10d.init_process_group(backend=\"nccl\")\n self.assertEqual(c10d.get_rank(), 0)\n self.assertEqual(c10d.get_world_size(), 1)\n c10d.destroy_process_group()\n\n with Env(without(vars, \"MASTER_ADDR\")):\n self.assertEqual(None, os.environ.get(\"MASTER_ADDR\"))\n with self.assertRaisesRegex(ValueError, \"MASTER_ADDR expected\"):\n gen = c10d.rendezvous(\"env://\")\n next(gen)\n\n with Env(without(vars, \"MASTER_PORT\")):\n self.assertEqual(None, os.environ.get(\"MASTER_PORT\"))\n with self.assertRaisesRegex(ValueError, \"MASTER_PORT expected\"):\n gen = c10d.rendezvous(\"env://\")\n next(gen)\n\n with Env(without(vars, \"WORLD_SIZE\")):\n self.assertEqual(None, os.environ.get(\"WORLD_SIZE\"))\n gen = c10d.rendezvous(\"env://?world_size={}\".format(1))\n _, _, size = next(gen)\n self.assertEqual(size, 1)\n\n with Env(without(vars, \"RANK\")):\n self.assertEqual(None, os.environ.get(\"RANK\"))\n gen = c10d.rendezvous(\"env://?rank={}\".format(0))\n _, rank, _ = next(gen)\n self.assertEqual(rank, 0)\n\n with Env(withouts(vars, [\"RANK\", \"WORLD_SIZE\"])):\n self.assertEqual(None, os.environ.get(\"RANK\"))\n self.assertEqual(None, os.environ.get(\"WORLD_SIZE\"))\n gen = c10d.rendezvous(\"env://?rank={}&world_size={}\".format(0, 1))\n _, rank, size = next(gen)\n self.assertEqual(rank, 0)\n self.assertEqual(size, 1)\n\n\nclass TimeoutTest(test_c10d_common.AbstractTimeoutTest, TestCase):\n @requires_nccl()\n @retry_on_connect_failures\n @sandcastle_skip_if(\n torch.cuda.device_count() == 0, \"No GPUs available, skipping test\"\n )\n def test_default_store_timeout_nccl(self):\n self._test_default_store_timeout(\"nccl\")\n\n\nclass ProcessGroupNCCLNoGPUTest(TestCase):\n MAIN_PROCESS_RANK = 0\n\n def setUp(self):\n self.rank = self.MAIN_PROCESS_RANK\n self.world_size = 1\n self.file = tempfile.NamedTemporaryFile(delete=False)\n\n def tearDown(self):\n pass\n\n @requires_nccl()\n @sandcastle_skip_if(\n torch.cuda.device_count() > 0, \"GPUs are available, skipping test\"\n )\n def test_init_no_gpus(self):\n store = c10d.FileStore(self.file.name, self.world_size)\n with self.assertRaisesRegex(\n RuntimeError, \"ProcessGroupNCCL is only supported with GPUs, no GPUs found!\"\n ):\n c10d.ProcessGroupNCCL(store, self.rank, self.world_size)\n\n\nclass ProcessGroupNCCLTest(TestCase):\n MAIN_PROCESS_RANK = 0\n\n def setUp(self):\n self.rank = self.MAIN_PROCESS_RANK\n self.world_size = 1\n self.file = tempfile.NamedTemporaryFile(delete=False)\n\n # NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests\n # that use NCCL_BLOCKING_WAIT will test it as expected.\n os.environ[\"NCCL_ASYNC_ERROR_HANDLING\"] = \"1\"\n self.num_gpus = torch.cuda.device_count()\n\n def tearDown(self):\n pass\n\n @requires_nccl()\n @sandcastle_skip_if(torch.cuda.device_count() < 2, \"NCCL test requires 2+ GPUs\")\n def test_empty_tensors(self):\n store = c10d.FileStore(self.file.name, self.world_size)\n pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)\n\n xs = [torch.cuda.FloatTensor([])]\n pg.broadcast(xs).wait()\n self.assertEqual(0, xs[0].numel())\n\n pg.allreduce(xs).wait()\n self.assertEqual(0, xs[0].numel())\n\n pg.reduce(xs).wait()\n self.assertEqual(0, xs[0].numel())\n\n ys = [[torch.cuda.FloatTensor([]) for _ in range(self.world_size)]]\n pg.allgather(ys, xs).wait()\n for y in ys[0]:\n self.assertEqual(0, y.numel())\n\n ys = [torch.cuda.FloatTensor([])]\n xs = [[torch.cuda.FloatTensor([]) for _ in range(self.world_size)]]\n pg.reduce_scatter(ys, xs).wait()\n self.assertEqual(0, ys[0].numel())\n\n @requires_nccl()\n @sandcastle_skip_if(torch.cuda.device_count() < 2, \"NCCL test requires 2+ GPUs\")\n def test_broadcast_ops(self):\n store = c10d.FileStore(self.file.name, self.world_size)\n pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)\n\n def broadcast(xs, rootRank, rootTensor):\n opts = c10d.BroadcastOptions()\n opts.rootRank = rootRank\n opts.rootTensor = rootTensor\n work = pg.broadcast(xs, opts)\n work.wait()\n\n # for every root tensor\n for rt in range(self.num_gpus):\n tensors = []\n for i in range(self.num_gpus):\n tensors.append(torch.tensor([i]).cuda(i))\n\n broadcast(tensors, self.rank, rt)\n\n for i in range(self.num_gpus):\n self.assertEqual(tensors[i], tensors[rt])\n\n @requires_nccl()\n @sandcastle_skip_if(torch.cuda.device_count() < 2, \"NCCL test requires 2+ GPUs\")\n def test_allreduce_ops(self):\n store = c10d.FileStore(self.file.name, self.world_size)\n pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)\n\n def allreduce(tensors, op):\n opts = c10d.AllreduceOptions()\n opts.reduceOp = op\n work = pg.allreduce(tensors, opts)\n work.wait()\n\n # Sum\n tensors = []\n for i in range(self.num_gpus):\n tensors.append(torch.tensor([i + 1]).cuda(i))\n\n allreduce(tensors, c10d.ReduceOp.SUM)\n\n for i in range(self.num_gpus):\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(\n torch.tensor([float(self.num_gpus * (self.num_gpus + 1) / 2)]),\n tensors[i],\n )\n\n # Product\n tensors = []\n for i in range(self.num_gpus):\n tensors.append(torch.tensor([i + 1]).cuda(i))\n\n allreduce(tensors, c10d.ReduceOp.PRODUCT)\n\n for i in range(self.num_gpus):\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(\n torch.tensor([float(math.factorial(self.num_gpus))]), tensors[i]\n )\n\n # Min\n tensors = []\n for i in range(self.num_gpus):\n tensors.append(torch.tensor([i + 1]).cuda(i))\n\n allreduce(tensors, c10d.ReduceOp.MIN)\n\n for i in range(self.num_gpus):\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(torch.tensor([1.0]), tensors[i])\n\n # Max\n tensors = []\n for i in range(self.num_gpus):\n tensors.append(torch.tensor([i + 1]).cuda(i))\n\n allreduce(tensors, c10d.ReduceOp.MAX)\n\n for i in range(self.num_gpus):\n self.assertEqual(torch.tensor([self.num_gpus]), tensors[i])\n\n for op in (c10d.ReduceOp.BAND, c10d.ReduceOp.BOR, c10d.ReduceOp.BXOR):\n with self.assertRaisesRegex(\n RuntimeError, \"Cannot use \" + str(op) + \" with NCCL\"\n ):\n allreduce(tensors, op)\n\n @requires_nccl()\n @sandcastle_skip_if(torch.cuda.device_count() < 2, \"NCCL test requires 2+ GPUs\")\n def test_reduce_ops(self):\n store = c10d.FileStore(self.file.name, self.world_size)\n pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)\n\n def reduce(xs, rootRank, rootTensor, op=None):\n opts = c10d.ReduceOptions()\n opts.rootRank = rootRank\n opts.rootTensor = rootTensor\n if op:\n opts.reduceOp = op\n work = pg.reduce(xs, opts)\n work.wait()\n\n # for every root tensor\n for rt in range(self.num_gpus):\n tensors = []\n for i in range(self.num_gpus):\n tensors.append(torch.tensor([i + 1]).cuda(i))\n\n reduce(tensors, self.rank, rt)\n\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(\n torch.tensor([float(self.num_gpus * (self.num_gpus + 1) / 2)]),\n tensors[rt],\n )\n\n for op in (c10d.ReduceOp.BAND, c10d.ReduceOp.BOR, c10d.ReduceOp.BXOR):\n with self.assertRaisesRegex(\n RuntimeError, \"Cannot use \" + str(op) + \" with NCCL\"\n ):\n reduce(tensors, self.rank, rt, op)\n\n @requires_nccl()\n @sandcastle_skip_if(torch.cuda.device_count() < 2, \"NCCL test requires 2+ GPUs\")\n def test_allgather_ops(self):\n store = c10d.FileStore(self.file.name, self.world_size)\n pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)\n\n def allgather(output_ts, input_ts):\n work = pg.allgather(output_ts, input_ts)\n work.wait()\n\n tensors = []\n output_ts = [[] for _ in range(self.num_gpus)]\n\n for idx, ls in enumerate(output_ts):\n for _ in range(self.world_size * self.num_gpus):\n ls.append(torch.tensor([0]).cuda(idx))\n\n for i in range(self.num_gpus):\n tensors.append(torch.tensor([i]).cuda(i))\n\n allgather(output_ts, tensors)\n\n # Verification\n for device_ts in output_ts:\n for s_idx, t in enumerate(device_ts):\n self.assertEqual(torch.tensor([s_idx]), t)\n\n @requires_nccl()\n @sandcastle_skip_if(torch.cuda.device_count() < 2, \"NCCL test requires 2+ GPUs\")\n def test_allgather_base_ops(self):\n store = c10d.FileStore(self.file.name, self.world_size)\n pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)\n\n def allgather_base(output_t, input_t):\n work = pg._allgather_base(output_t, input_t)\n work.wait()\n\n device_id = self.rank % self.num_gpus\n # allgather_base is GPU number agnostic.\n # Each rank contribute one tensor regardless of GPU counts\n tensor = torch.tensor([self.rank]).cuda(device_id)\n output_t = torch.empty((self.world_size), dtype=tensor.dtype).cuda(device_id)\n\n allgather_base(output_t, tensor)\n\n # Verification\n self.assertEqual(torch.arange(self.world_size), output_t)\n\n @requires_nccl()\n @sandcastle_skip_if(torch.cuda.device_count() < 2, \"NCCL test requires 2+ GPUs\")\n def test_allgather_base_basics(self):\n store = c10d.FileStore(self.file.name, self.world_size)\n pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)\n\n def allgather_base(output_t, input_t):\n work = pg._allgather_base(output_t, input_t)\n work.wait()\n\n device_id = self.rank % self.num_gpus\n # anticpate an error\n with self.assertRaisesRegex(\n RuntimeError,\n \"output tensor size must be equal to world_size times input tensor size\",\n ):\n tensor = torch.tensor([self.rank]).cuda(device_id)\n output_t = torch.empty((self.world_size + 1), dtype=tensor.dtype).cuda(\n device_id\n )\n # fails the check because output_t is not correctly sized\n allgather_base(output_t, tensor)\n\n # anticpate an error\n with self.assertRaisesRegex(\n RuntimeError, \"output tensor must have the same type as input tensor\"\n ):\n tensor = torch.tensor([self.rank], dtype=torch.float).cuda(device_id)\n output_t = torch.empty((self.world_size + 1), dtype=torch.long).cuda(\n device_id\n )\n # fails the check because the dtype is different\n allgather_base(output_t, tensor)\n\n @requires_nccl()\n @sandcastle_skip_if(torch.cuda.device_count() < 2, \"NCCL test requires 2+ GPUs\")\n def test_reduce_scatter_base_basics(self):\n store = c10d.FileStore(self.file.name, self.world_size)\n pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)\n\n def reduce_scatter_base(output_t, input_t):\n work = pg._reduce_scatter_base(output_t, input_t)\n work.wait()\n\n device_id = self.rank % self.num_gpus\n # anticpate an error\n with self.assertRaisesRegex(\n RuntimeError,\n \"input tensor must be the same size as output size times world size\",\n ):\n input_t = torch.tensor([self.rank]).cuda(device_id)\n output_t = torch.empty((self.world_size + 1), dtype=input_t.dtype).cuda(\n device_id\n )\n # fails the check because output_t is not correctly sized\n reduce_scatter_base(output_t, input_t)\n\n # anticpate an error\n with self.assertRaisesRegex(\n RuntimeError, \"input tensor must be the same type as the outut tensor.\"\n ):\n tensor = torch.tensor([self.rank], dtype=torch.float).cuda(device_id)\n output_t = torch.empty((self.world_size + 1), dtype=torch.long).cuda(\n device_id\n )\n # fails the check because the dtype is different\n reduce_scatter_base(output_t, tensor)\n\n @requires_nccl()\n @sandcastle_skip_if(torch.cuda.device_count() < 2, \"NCCL test requires 2+ GPUs\")\n def test_reduce_scatter_ops(self):\n store = c10d.FileStore(self.file.name, self.world_size)\n pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)\n\n def reduce_scatter(outputs, input_lists, op):\n opts = c10d.ReduceScatterOptions()\n opts.reduceOp = op\n work = pg.reduce_scatter(outputs, input_lists, opts)\n work.wait()\n\n virtual_rank = self.rank * self.world_size\n virtual_world_size = self.num_gpus * self.world_size\n\n output = [torch.tensor([0]).cuda(i) for i in range(self.num_gpus)]\n\n # 0 1 2\n # 0 [0..11] [1..12]\n # 1 [3..14]\n # 2\n # 3\n\n # Sum\n tensor_lists = [\n [\n torch.tensor([self.rank * self.num_gpus + i + j]).cuda(i)\n for j in range(virtual_world_size)\n ]\n for i in range(self.num_gpus)\n ]\n\n reduce_scatter(output, tensor_lists, c10d.ReduceOp.SUM)\n\n for i in range(self.num_gpus):\n expected = torch.tensor(\n [\n float(self.num_gpus * (self.num_gpus - 1) / 2)\n + (virtual_rank + i) * virtual_world_size\n ]\n )\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(expected, output[i])\n\n # Min\n reduce_scatter(output, tensor_lists, c10d.ReduceOp.MIN)\n\n for i in range(self.num_gpus):\n expected = torch.tensor([self.rank * self.world_size + i])\n self.assertEqual(expected, output[i])\n\n # Max\n reduce_scatter(output, tensor_lists, c10d.ReduceOp.MAX)\n\n for i in range(self.num_gpus):\n expected = torch.tensor(\n [self.rank * self.world_size + i + virtual_world_size - 1]\n )\n self.assertEqual(expected, output[i])\n\n # Product\n tensor_lists = [\n [\n torch.tensor(\n [(self.rank * self.num_gpus + i + j) % virtual_world_size + 1]\n ).cuda(i)\n for j in range(virtual_world_size)\n ]\n for i in range(self.num_gpus)\n ]\n\n reduce_scatter(output, tensor_lists, c10d.ReduceOp.PRODUCT)\n\n for i in range(self.num_gpus):\n expected = torch.tensor([float(math.factorial(virtual_world_size))])\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(expected, output[i])\n\n @requires_nccl()\n @sandcastle_skip_if(torch.cuda.device_count() < 2, \"NCCL test requires 2+ GPUs\")\n def test_reduce_scatter_base_ops(self):\n store = c10d.FileStore(self.file.name, self.world_size)\n pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)\n\n def reduce_scatter_base(output_t, input_t):\n work = pg._reduce_scatter_base(output_t, input_t)\n work.wait()\n\n device_id = self.rank % self.num_gpus\n # reduce_scatter_base is GPU number agnostic.\n # Each rank contribute one tensor regardless of GPU counts\n output_t = torch.empty([1]).cuda(device_id)\n tensor = torch.arange(self.world_size, dtype=output_t.dtype).cuda(device_id)\n\n reduce_scatter_base(output_t, tensor)\n\n # Verification\n self.assertEqual(output_t[0], self.rank * self.world_size)\n\n @requires_nccl()\n @sandcastle_skip_if(torch.cuda.device_count() < 2, \"NCCL test requires 2+ GPUs\")\n def test_barrier(self):\n store = c10d.FileStore(self.file.name, self.world_size)\n pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)\n\n def allreduce(tensors):\n opts = c10d.AllreduceOptions()\n work = pg.allreduce(tensors, opts)\n return work\n\n # Making the collective to operate on\n # 1, 2, 3, 4, .... self.num_gpus GPUs\n tensors_list = [[] for _ in range(2, self.num_gpus + 1)]\n for i in range(2, self.num_gpus + 1):\n for j in range(i):\n tensors_list[i - 2].append(torch.tensor([j + 1]).cuda(j))\n\n works = []\n for tensors in tensors_list:\n work = allreduce(tensors)\n works.append(work)\n\n # Barrier will ensure that all previous work is completed\n pg.barrier().wait()\n\n for i in range(2, self.num_gpus + 1):\n for j in range(i):\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(\n torch.tensor([float(i * (i + 1) / 2)]), tensors_list[i - 2][j]\n )\n\n\nclass DistributedDataParallelTest(\n test_c10d_common.AbstractDistributedDataParallelTest, MultiProcessTestCase\n):\n def setUp(self):\n super(DistributedDataParallelTest, self).setUp()\n # NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests\n # that use NCCL_BLOCKING_WAIT will test it as expected.\n os.environ[\"NCCL_ASYNC_ERROR_HANDLING\"] = \"1\"\n self._spawn_processes()\n\n def _test_nccl_backend(\n self, devices, device_ids, multi_device=False, gradient_as_bucket_view=False\n ):\n store = c10d.FileStore(self.file_name, self.world_size)\n process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)\n self._test_ddp_with_process_group(\n process_group, devices, device_ids, multi_device, gradient_as_bucket_view\n )\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_nccl_backend_multi_device_ids_not_allowed(self):\n int_devices = list(range(torch.cuda.device_count()))\n devices = [torch.device(\"cuda:\" + str(i)) for i in int_devices]\n with self.assertRaisesRegex(\n ValueError, \"device_ids can only be None or contain a single element.\"\n ):\n self._test_nccl_backend(devices, int_devices)\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_nccl_backend_single_device_module_device_ids_None(self):\n self._test_nccl_backend(None, None)\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_nccl_backend_single_device_module_empty_device_ids(self):\n # This tests the backward compatibility of accepting an empty list as `device_ids`,\n # although we no longer document this in favor of the default value of `None`,\n # which is consistent with multi-device modules and CPU modules.\n self._test_nccl_backend(None, [])\n\n @requires_nccl()\n @skip_if_lt_x_gpu(4)\n def test_nccl_backend_multi_device_module_device_ids_None(self):\n int_devices = gpus_for_rank(self.world_size)[self.rank][:2]\n devices = [torch.device(\"cuda:\" + str(i)) for i in int_devices]\n self._test_nccl_backend(devices, None, multi_device=True)\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_nccl_backend_1gpu_module_device_ids_integer_list(self):\n int_devices = gpus_for_rank(self.world_size)[self.rank][:1]\n devices = [torch.device(\"cuda:\" + str(i)) for i in int_devices]\n self._test_nccl_backend(devices, int_devices)\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_nccl_backend_1gpu_module_device_ids_torch_device_list(self):\n int_devices = gpus_for_rank(self.world_size)[self.rank][:1]\n devices = [torch.device(\"cuda:\" + str(i)) for i in int_devices]\n self._test_nccl_backend(devices, devices)\n\n @requires_nccl()\n @skip_if_lt_x_gpu(4)\n def test_nccl_backend_2gpu_module(self):\n int_devices = gpus_for_rank(self.world_size)[self.rank][:2]\n devices = [torch.device(\"cuda:\" + str(i)) for i in int_devices]\n self._test_nccl_backend(devices, None, multi_device=True)\n\n @requires_nccl()\n @skip_if_lt_x_gpu(8)\n def test_nccl_backend_4gpu_module(self):\n int_devices = gpus_for_rank(self.world_size)[self.rank][:4]\n devices = [torch.device(\"cuda:\" + str(i)) for i in int_devices]\n self._test_nccl_backend(devices, None, multi_device=True)\n\n @requires_nccl()\n @skip_if_lt_x_gpu(4)\n def test_ddp_multi_device_module_config(self):\n gpus = gpus_for_rank(self.world_size)[self.rank]\n\n self.assertTrue(len(gpus) >= 2, \"expecting at least 2 gpus per process\")\n\n store = c10d.FileStore(self.file_name, self.world_size)\n process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)\n\n gpus = gpus[:2]\n model = DoubleGpuNet(gpus)\n\n with self.assertRaisesRegex(\n ValueError,\n \"DistributedDataParallel device_ids and output_device arguments only work with \"\n \"single-device/multiple-device GPU modules or CPU modules\",\n ):\n ddp_model = DistributedDataParallel(\n model, output_device=gpus[1], process_group=process_group\n )\n\n with self.assertRaisesRegex(\n ValueError, \"device_ids can only be None or contain a single element.\"\n ):\n ddp_model = DistributedDataParallel(\n model, device_ids=gpus, process_group=process_group\n )\n\n with self.assertRaisesRegex(\n ValueError, \"input module must be on the same type of devices\"\n ):\n model.fc1 = model.fc1.cpu()\n ddp_model = DistributedDataParallel(model, process_group=process_group)\n\n model = model.cpu()\n with self.assertRaisesRegex(\n ValueError, \"device_ids can only be None or contain a single element.\"\n ):\n ddp_model = DistributedDataParallel(\n model, device_ids=gpus, process_group=process_group\n )\n\n def _test_fp16(self, gradient_as_bucket_view=False):\n store = c10d.FileStore(self.file_name, self.world_size)\n process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)\n\n gpus = gpus_for_rank(self.world_size)[self.rank]\n model = nn.Linear(1, 1, bias=False).cuda(gpus[0]).half()\n nn.init.constant_(model.weight, 1)\n ddp_model = DistributedDataParallel(\n model,\n device_ids=[gpus[0]],\n process_group=process_group,\n bucket_cap_mb=0.001,\n gradient_as_bucket_view=gradient_as_bucket_view,\n )\n\n # Input 2**15, so that the gradients will overflow with a\n # world_size of 2, unless we normalize the gradient by the\n # world_size before the reduction\n input = torch.tensor([[2 ** 15]]).cuda(gpus[0]).half()\n\n # Step model\n ddp_model.train()\n output = ddp_model(input)\n loss = output.sum()\n loss.backward()\n\n self.assertFalse(any(torch.isinf(p.grad).any() for p in ddp_model.parameters()))\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_fp16(self):\n self._test_fp16()\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_fp16_grad_is_view(self):\n self._test_fp16(gradient_as_bucket_view=True)\n\n def _test_arbitrary_forward_return_value(self, gradient_as_bucket_view=False):\n \"\"\"\n Note: this test can be sped up by only running it on a CPU module\n once DistributedDataParallel supports them.\n \"\"\"\n store = c10d.FileStore(self.file_name, self.world_size)\n process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)\n\n class ForwardReturnValueModule(nn.Module):\n def __init__(self):\n super(ForwardReturnValueModule, self).__init__()\n self.fc1 = nn.Linear(2, 10, bias=False)\n self.fc2 = nn.Linear(10, 4, bias=False)\n self.fc3 = nn.Linear(4, 4, bias=False)\n self.relu = nn.ReLU()\n\n def forward(self, x, fn):\n x = self.relu(self.fc1(x))\n x = self.relu(self.fc2(x))\n # The first softmax does NOT include fc3 in its autograd graph\n # whereas the second softmax DOES. If we pass only the first\n # tensor we see in the output to the reducer, it marks the\n # gradient for fc3 as ready (because it doesn't show up). If\n # downstream uses of this return value choose to differentiate\n # against the second output tensor, it would still receive a\n # gradient and a callback for this tensor, resulting in a crash.\n return fn(\n F.softmax(x, dim=1),\n F.softmax(self.fc3(x), dim=1),\n )\n\n device_id = gpus_for_rank(self.world_size)[self.rank][0]\n model = DistributedDataParallel(\n ForwardReturnValueModule().float().to(device_id),\n device_ids=[device_id],\n process_group=process_group,\n gradient_as_bucket_view=gradient_as_bucket_view,\n )\n\n batch_size = 4\n criterion = nn.CrossEntropyLoss()\n input = torch.rand([batch_size, 2], dtype=torch.float)\n target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(\n device_id\n )\n\n # Always run \"backward\" to ensure the reducer is called by autograd.\n # If we don't correctly capture the output tensors from the return value,\n # the reducer won't see a hook for the unused parameter, and throw an error.\n # The correct capture is what we're testing in this function.\n def test(box, unbox):\n output = model(input, fn=box)\n loss = criterion(unbox(output), target)\n loss.backward()\n\n # Test with identity return value\n test(\n box=lambda x, y: (x, y),\n unbox=lambda obj: obj[1],\n )\n\n # Test with list return value\n test(\n box=lambda x, y: [\"foo\", x, \"bar\", y],\n unbox=lambda obj: obj[3],\n )\n\n # Test with tuple return value\n test(\n box=lambda x, y: (\"foo\", x, \"bar\", y),\n unbox=lambda obj: obj[3],\n )\n\n # Test with dict return value\n test(\n box=lambda x, y: {\"foo\": \"bar\", \"a\": x, \"b\": y},\n unbox=lambda obj: obj[\"b\"],\n )\n\n # Test with list with dict return value\n test(\n box=lambda x, y: [\"foo\", \"bar\", {\"a\": x, \"b\": y}],\n unbox=lambda obj: obj[2][\"b\"],\n )\n\n # Test with dict with list return value\n test(\n box=lambda x, y: {\"foo\": \"bar\", \"list\": [0, x, 1, y]},\n unbox=lambda obj: obj[\"list\"][3],\n )\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_arbitrary_forward_return_value(self):\n self._test_arbitrary_forward_return_value()\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_arbitrary_forward_return_value_grad_is_view(self):\n self._test_arbitrary_forward_return_value(gradient_as_bucket_view=True)\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_ddp_with_lazy_parameters(self):\n store = c10d.FileStore(self.file_name, self.world_size)\n process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)\n with self.assertRaisesRegex(\n RuntimeError, \"Modules with uninitialized parameters\"\n ):\n DistributedDataParallel(\n torch.nn.LazyLinear(10), process_group=process_group\n )\n\n def _test_find_unused_parameters_kwarg(self, gradient_as_bucket_view=False):\n \"\"\"\n Note: this test can be sped up by only running it on a CPU module\n once DistributedDataParallel supports them.\n \"\"\"\n torch.cuda.set_device(self.rank)\n dist.init_process_group(\n backend=\"nccl\",\n world_size=self.world_size,\n rank=self.rank,\n init_method=f\"file://{self.file_name}\",\n )\n process_group = c10d.distributed_c10d._get_default_group()\n\n class FindUnusedParametersModule(nn.Module):\n def __init__(self):\n super(FindUnusedParametersModule, self).__init__()\n self.fc1 = nn.Linear(2, 10, bias=False)\n self.fc2 = nn.Linear(10, 4, bias=False)\n self.fc3 = nn.Linear(4, 4, bias=False)\n self.relu = nn.ReLU()\n\n def forward(self, x):\n x = self.relu(self.fc1(x))\n x = self.relu(self.fc2(x))\n # Return the fc3 module so that the caller can invoke it\n # outside of the forward function. While this is bad practice,\n # we can use it to trigger a reducer error.\n return (F.softmax(x, dim=1), self.fc3)\n\n device_id = gpus_for_rank(self.world_size)[self.rank][0]\n batch_size = 4\n criterion = nn.CrossEntropyLoss()\n input = torch.rand([batch_size, 2], dtype=torch.float)\n target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(\n device_id\n )\n\n ddp_model = None\n\n def test_find_unused_parameters(\n find_unused_parameters, test_default=False, gradient_as_bucket_view=False\n ):\n if test_default:\n model = DistributedDataParallel(\n FindUnusedParametersModule().float().to(device_id),\n device_ids=[device_id],\n process_group=process_group,\n gradient_as_bucket_view=gradient_as_bucket_view,\n )\n else:\n model = DistributedDataParallel(\n FindUnusedParametersModule().float().to(device_id),\n device_ids=[device_id],\n process_group=process_group,\n find_unused_parameters=find_unused_parameters,\n gradient_as_bucket_view=gradient_as_bucket_view,\n )\n nonlocal ddp_model\n ddp_model = model\n\n output, fc3 = model(input)\n output = fc3(output)\n loss = criterion(output, target)\n loss.backward()\n\n # First test that finding unused params under these conditions is to\n # trigger an error when `backward` is called (because fc3 is an unused\n # parameter and will therefore be marked ready twice).\n try:\n test_find_unused_parameters(\n True, gradient_as_bucket_view=gradient_as_bucket_view\n )\n except Exception as ex:\n self.assertTrue(\n str(ex).startswith(\n \"Expected to mark a variable ready only once.\",\n )\n )\n unused_index = 2\n unused_index_str = f\"Parameter at index {unused_index}\"\n model = ddp_model.module\n for module_name, module in model.named_modules():\n if module == model.fc3:\n for parameter_name, _ in module.named_parameters(recurse=False):\n unused_fqn = f\"{module_name}.{parameter_name}\"\n # Only one such parameter in model.fc3, since bias=False\n break\n\n if dist._get_debug_mode() != dist._DistributedDebugLevel.OFF:\n unused_index_str += f\" with name {unused_fqn}\"\n\n self.assertTrue(unused_index_str in str(ex))\n else:\n self.fail(\"Expected exception\")\n\n dist.barrier(process_group)\n\n # Then test that the default behavior can be overridden by setting\n # `find_unused_parameters=False`.\n try:\n test_find_unused_parameters(\n False, gradient_as_bucket_view=gradient_as_bucket_view\n )\n except Exception as ex:\n self.fail(\"Unexpected exception: %s\" % ex)\n\n # Test find_unused_parameters defaults to False\n try:\n test_find_unused_parameters(\n True, test_default=True, gradient_as_bucket_view=gradient_as_bucket_view\n )\n except Exception as ex:\n self.fail(\"Unexpected exception: %s\" % ex)\n\n # TODO: Combine the following tests once https://github.com/pytorch/pytorch/issues/55967\n # is resolved.\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n @with_dist_debug_levels(levels=[\"DETAIL\"])\n def test_find_unused_parameters_kwarg_debug_detail(self):\n self._test_find_unused_parameters_kwarg()\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n @with_dist_debug_levels(levels=[\"INFO\"])\n def test_find_unused_parameters_kwarg_debug_info(self):\n self._test_find_unused_parameters_kwarg()\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n @with_dist_debug_levels(levels=[\"OFF\"])\n def test_find_unused_parameters_kwarg_debug_off(self):\n self._test_find_unused_parameters_kwarg()\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n @with_dist_debug_levels(levels=[\"DETAIL\"])\n def test_find_unused_parameters_kwarg_grad_is_view_debug_detail(self):\n self._test_find_unused_parameters_kwarg(gradient_as_bucket_view=True)\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n @with_dist_debug_levels(levels=[\"INFO\"])\n def test_find_unused_parameters_kwarg_grad_is_view_debug_info(self):\n self._test_find_unused_parameters_kwarg(gradient_as_bucket_view=True)\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n @with_dist_debug_levels(levels=[\"OFF\"])\n def test_find_unused_parameters_kwarg_grad_is_view_debug_off(self):\n self._test_find_unused_parameters_kwarg(gradient_as_bucket_view=True)\n\n def _test_multiple_outputs_multiple_backward(self, gradient_as_bucket_view=False):\n \"\"\"\n Note: this test can be sped up by only running it on a CPU module\n once DistributedDataParallel supports them.\n \"\"\"\n store = c10d.FileStore(self.file_name, self.world_size)\n process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)\n\n class MultipleOutputModule(nn.Module):\n def __init__(self):\n super(MultipleOutputModule, self).__init__()\n\n def define_module():\n return nn.Sequential(\n nn.Linear(2, 10, bias=False),\n nn.ReLU(),\n nn.Linear(10, 4, bias=False),\n nn.ReLU(),\n )\n\n self.module0 = define_module()\n self.module1 = define_module()\n\n def forward(self, x):\n return (\n F.softmax(self.module0(x), dim=1),\n F.softmax(self.module1(x), dim=1),\n )\n\n device_id = gpus_for_rank(self.world_size)[self.rank][0]\n model = DistributedDataParallel(\n MultipleOutputModule().float().to(device_id),\n device_ids=[device_id],\n process_group=process_group,\n gradient_as_bucket_view=gradient_as_bucket_view,\n )\n\n batch_size = 4\n criterion = nn.CrossEntropyLoss()\n input = torch.rand([batch_size, 2], dtype=torch.float)\n target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(\n device_id\n )\n\n # Compute loss and gradients for both outputs\n output1, output2 = model(input)\n loss1 = criterion(output1, target)\n loss1.backward()\n loss2 = criterion(output2, target)\n loss2.backward()\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_multiple_outputs_multiple_backward(self):\n self._test_multiple_outputs_multiple_backward()\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_multiple_outputs_multiple_backward_grad_is_view(self):\n self._test_multiple_outputs_multiple_backward(gradient_as_bucket_view=True)\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_no_grad(self):\n \"\"\"\n Note: this test can be sped up by only running it on a CPU module\n once DistributedDataParallel supports them.\n \"\"\"\n store = c10d.FileStore(self.file_name, self.world_size)\n process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)\n\n class NoGradModule(nn.Module):\n def __init__(self):\n super(NoGradModule, self).__init__()\n self.fc1 = nn.Linear(2, 10, bias=False)\n self.fc2 = nn.Linear(10, 4, bias=False)\n self.relu = nn.ReLU()\n\n def forward(self, x):\n x = self.relu(self.fc1(x))\n x = self.relu(self.fc2(x))\n return F.softmax(x, dim=1)\n\n device_id = gpus_for_rank(self.world_size)[self.rank][0]\n model = DistributedDataParallel(\n NoGradModule().float().to(device_id),\n device_ids=[device_id],\n process_group=process_group,\n )\n\n batch_size = 4\n input = torch.rand([batch_size, 2], dtype=torch.float)\n\n def check_no_grads():\n for p in model.parameters():\n self.assertTrue(p.requires_grad)\n self.assertIsNone(p.grad)\n\n # After initialization, no parameter has their gradient set.\n check_no_grads()\n\n # Run `forward` function with torch.no_grad()\n with torch.no_grad():\n output = model(input)\n self.assertTrue(isinstance(output, torch.Tensor))\n\n # No parameter should have their gradient set.\n check_no_grads()\n\n def _test_accumulate_gradients_module(self, gradient_as_bucket_view=False):\n # This is NOT the recommended way to implement accumulating grads, but\n # we would like to make sure DDP does not mess up with the underlying\n # module.\n int_devices = gpus_for_rank(self.world_size)[self.rank][:1]\n devices = [torch.device(\"cuda:\" + str(i)) for i in int_devices]\n store = c10d.FileStore(self.file_name, self.world_size)\n process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)\n global_batch_size = self.world_size\n\n model, ddp_model, input, target = self._prepare_single_device_module(\n process_group, devices, devices, global_batch_size, gradient_as_bucket_view\n )\n\n def step_model(model, input, target):\n model.train()\n output = model(input)\n loss = F.mse_loss(output, target.to(output.device))\n loss.backward()\n\n # ensure accumulate grads works with no_grad\n with torch.no_grad():\n ddp_model.train()\n ddp_model.module(input)\n\n # Check two model parameters over 4 iterations.\n # Use 4 iterations because we alternate between reducing and\n # not reducing and want to make sure we switch both ways.\n for iteration in range(4):\n step_model(model, input, target)\n\n if iteration % 2 == 0:\n # Skip gradients sync without calling prepare_for_backward\n step_model(\n ddp_model.module,\n input[self.rank : (self.rank + 1)],\n target[self.rank : (self.rank + 1)],\n )\n for i, j in zip(model.parameters(), ddp_model.parameters()):\n self.assertNotEqual(i.grad, j.grad)\n else:\n step_model(\n ddp_model,\n input[self.rank : (self.rank + 1)],\n target[self.rank : (self.rank + 1)],\n )\n for i, j in zip(model.parameters(), ddp_model.parameters()):\n # TODO(#38095): Replace assertEqualIgnoreType. See issue #38095\n self.assertEqualIgnoreType(i.grad, j.grad, rtol=1.3e-06, atol=5e-5)\n\n # Shuffle the input so that DDP input is different\n torch.manual_seed(1337 + iteration)\n input = input[torch.randperm(global_batch_size)]\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_accumulate_gradients_module(self):\n self._test_accumulate_gradients_module()\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_accumulate_gradients_module_with_grad_is_view(self):\n self._test_accumulate_gradients_module(gradient_as_bucket_view=True)\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_failure_recovery(self):\n store = c10d.FileStore(self.file_name, self.world_size)\n process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)\n\n # need to create a separate file for the recovered FileStore, because\n # the original one will be deleted when destructing the first FileStore.\n recovery_filename = self.file_name + \"_recovery\"\n\n if self.rank == 0:\n # the file will be deleted by the recovered FileStore\n open(recovery_filename, \"w\").close()\n\n # not necessary to run barrier here, as DDP will synchronize\n\n class TestModel(nn.Module):\n def __init__(self):\n super(TestModel, self).__init__()\n self.fc1 = nn.Linear(2, 10, bias=False)\n self.fc2 = nn.Linear(10, 4, bias=False)\n self.relu = nn.ReLU()\n\n def forward(self, x):\n x = self.relu(self.fc1(x))\n x = self.relu(self.fc2(x))\n return F.softmax(x, dim=1)\n\n device_id = gpus_for_rank(self.world_size)[self.rank][0]\n model = TestModel().float().to(device_id)\n ddp = DistributedDataParallel(\n model,\n device_ids=[device_id],\n process_group=process_group,\n )\n\n batch_size = 4\n criterion = nn.CrossEntropyLoss()\n input = torch.rand([batch_size, 2], dtype=torch.float)\n target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(\n device_id\n )\n\n for _ in range(6):\n output = ddp(input)\n loss = criterion(output, target)\n loss.backward()\n\n del ddp\n del process_group\n del store # this will delete self.file_name\n\n store = c10d.FileStore(recovery_filename, self.world_size)\n process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)\n ddp = DistributedDataParallel(\n model,\n device_ids=[device_id],\n process_group=process_group,\n )\n\n input = torch.rand([batch_size, 2], dtype=torch.float)\n target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(\n device_id\n )\n for _ in range(6):\n output = ddp(input)\n loss = criterion(output, target)\n loss.backward()\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_pass_default_pg(self):\n dist.init_process_group(\n \"nccl\",\n init_method=f\"file://{self.file_name}\",\n world_size=self.world_size,\n rank=self.rank,\n )\n\n default_pg = c10d.distributed_c10d._get_default_group()\n dist.destroy_process_group(default_pg)\n self.assertFalse(dist.is_initialized())\n\n def _test_grad_layout(self, replica_devices, layer_devs, local_batch_size):\n store = c10d.FileStore(self.file_name, self.world_size)\n process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)\n\n global_batch_size = local_batch_size * self.world_size\n\n # Carry out some trials with small buckets and some with big buckets.\n bucketsizes = (0.000001, 25)\n # Tuples of lists. Each list describes per-layer characteristics for one trial.\n layer_formats = (\n [torch.contiguous_format] * 4,\n [torch.channels_last] * 2 + [torch.contiguous_format] * 2,\n [torch.channels_last] * 4,\n )\n layer_dtypes = (\n [torch.float] * 4,\n [torch.float] * 2 + [torch.half] * 2,\n [torch.half] * 4,\n )\n\n input_dev = layer_devs[0] if isinstance(layer_devs, list) else layer_devs\n target_dev = layer_devs[-1] if isinstance(layer_devs, list) else layer_devs\n input = torch.randn(\n (global_batch_size, 8, 8, 8), device=input_dev, dtype=torch.float\n )\n target = torch.randn(\n (global_batch_size, 8, 4, 4), device=target_dev, dtype=torch.float\n )\n local_batch_start = self.rank * local_batch_size\n local_batch_end = (self.rank + 1) * local_batch_size\n\n # Reducer.cpp sneakily creates one \"initial bucket\" that ignores the \"bucket_cap_mb\"\n # argument. The following makes sure the initial bucket also complies.\n @contextmanager\n def first_bucket_size(ddp_bucket_mb):\n old_DEFAULT_FIRST_BUCKET_BYTES = dist._DEFAULT_FIRST_BUCKET_BYTES\n dist._DEFAULT_FIRST_BUCKET_BYTES = int(ddp_bucket_mb * 1.0e6)\n try:\n yield\n finally:\n dist._DEFAULT_FIRST_BUCKET_BYTES = old_DEFAULT_FIRST_BUCKET_BYTES\n\n with torch.backends.cudnn.flags(\n enabled=True, deterministic=True, benchmark=False\n ):\n for formats, dtypes, bucketsize in product(\n layer_formats, layer_dtypes, bucketsizes\n ):\n with first_bucket_size(bucketsize):\n model_msg = (\n \"rank = {} formats = {} dtypes = {} bucketsize = {} \".format(\n self.rank, formats, dtypes, bucketsize\n )\n )\n try:\n m = ConvNet(layer_devs, formats, dtypes)\n m_ddp = DistributedDataParallel(\n copy.deepcopy(m),\n device_ids=replica_devices,\n process_group=process_group,\n bucket_cap_mb=bucketsize,\n )\n opt = torch.optim.SGD(m.parameters(), lr=0.1)\n opt_ddp = torch.optim.SGD(m_ddp.parameters(), lr=0.1)\n has_half = any(p.dtype is torch.half for p in m.parameters())\n tol = 1.0e-3 if has_half else 1.0e-5\n except BaseException:\n # Prints case-specific debugging info to narrow down failing case.\n print(\n \"Caught exception during model creation for \" + model_msg,\n flush=True,\n )\n raise\n # 3 iters: First iter creates grads, second iter retests after rebucketing,\n # third iter tries zeroed grads.\n for it in range(3):\n iter_msg = \"iter = {} \".format(it) + model_msg\n named_msg = iter_msg\n try:\n F.mse_loss(m(input).float(), target).backward()\n F.mse_loss(\n m_ddp(input[local_batch_start:local_batch_end]).float(),\n target[local_batch_start:local_batch_end],\n ).backward()\n for i, ((layer_name, m_child), m_ddp_child) in enumerate(\n zip(m.named_children(), m_ddp.module.children())\n ):\n named_msg = layer_name + \".weight\" + \" \" + iter_msg\n self.assertTrue(\n m_child.weight.grad.is_contiguous(\n memory_format=formats[i]\n ),\n named_msg,\n )\n self.assertTrue(\n m_ddp_child.weight.grad.is_contiguous(\n memory_format=formats[i]\n ),\n named_msg,\n )\n for j, ((param_name, p), p_ddp) in enumerate(\n zip(\n m_child.named_parameters(),\n m_ddp_child.parameters(),\n )\n ):\n named_msg = (\n layer_name + \".\" + param_name + \" \" + iter_msg\n )\n self.assertEqual(\n p.grad, p_ddp.grad, rtol=tol, atol=tol\n )\n opt.step()\n opt_ddp.step()\n if it == 0:\n for p, p_ddp in zip(m.parameters(), m_ddp.parameters()):\n p.grad = None\n p_ddp.grad = None\n else:\n m.zero_grad()\n m_ddp.zero_grad()\n except BaseException:\n # Makes sure we still get info if an error occurred somewhere other than the asserts.\n print(\n \"Caught exception during iterations at \" + named_msg,\n flush=True,\n )\n raise\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n @skip_if_rocm\n def test_grad_layout_1devicemodule_1replicaperprocess(self):\n dev0 = torch.device(\"cuda:\" + str(gpus_for_rank(self.world_size)[self.rank][0]))\n # Tells DDP to use just one device.\n replica_devices = [dev0]\n # Tells _test_grad_layout to construct ConvNet with all layers on this process's first assigned device.\n layer_devs = dev0\n local_batch_size = 8\n self._test_grad_layout(replica_devices, layer_devs, local_batch_size)\n\n @requires_nccl()\n @skip_if_lt_x_gpu(4)\n @skip_if_rocm\n def test_grad_layout_2devicemodule(self):\n int_devices = gpus_for_rank(self.world_size)[self.rank][:2]\n dev0 = torch.device(\"cuda:\" + str(int_devices[0]))\n dev1 = torch.device(\"cuda:\" + str(int_devices[1]))\n # DDP's default behavior for a multi-device module is \"don't replicate.\"\n replica_devices = None\n # Tells _test_grad_layout to constructs this process's ConvNet on 2 devices, with 2 layers on each device.\n layer_devs = [dev0] * 2 + [dev1] * 2\n local_batch_size = 8\n self._test_grad_layout(replica_devices, layer_devs, local_batch_size)\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_param_layout_mismatch_error(self):\n store = c10d.FileStore(self.file_name, self.world_size)\n process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)\n\n dev0 = torch.device(\"cuda:\" + str(gpus_for_rank(self.world_size)[self.rank][0]))\n layer_devs = dev0\n layer_formats = (\n [torch.contiguous_format] * 4\n if self.rank == 0\n else [torch.channels_last] * 4\n )\n layer_dtypes = [torch.float] * 4\n\n m = ConvNet(layer_devs, layer_formats, layer_dtypes)\n if self.rank == 0:\n m_ddp = DistributedDataParallel(\n m, device_ids=[dev0], process_group=process_group\n )\n else:\n with self.assertRaisesRegex(\n RuntimeError,\n \".* appears not to match strides of the same param in process 0\",\n ):\n m_ddp = DistributedDataParallel(\n m, device_ids=[dev0], process_group=process_group\n )\n\n def _gpu_model_with_ddp_comm_hook(\n self,\n process_group,\n hook=None,\n gradient_as_bucket_view=False,\n state=None,\n static_graph=False,\n ):\n device_id = gpus_for_rank(self.world_size)[self.rank][0]\n gpu_model = DistributedDataParallel(\n ModuleForDdpCommHook().to(device_id),\n device_ids=[device_id],\n process_group=process_group,\n gradient_as_bucket_view=gradient_as_bucket_view,\n )\n\n if static_graph:\n gpu_model._set_static_graph()\n\n # Register a DDP communication hook if any.\n if hook is not None:\n gpu_model.register_comm_hook(state, hook)\n\n return gpu_model\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_ddp_comm_hook_future_passing_gpu_nccl(self):\n \"\"\"\n This unit test verifies whether the Future object is passed properly using nccl backend.\n The hook callback function creates a Future object and sets a value to it.\n \"\"\"\n store = c10d.FileStore(self.file_name, self.world_size)\n process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)\n\n # Get GPU model with simple_hook registered.\n gpu_model = self._gpu_model_with_ddp_comm_hook(process_group, self._simple_hook)\n\n # check whether the grads are equal to what simple_hook's then callback returns.\n # without the comm_hook, result would be 0.25 * torch.ones(2, 2).\n self._run_and_verify_hook(gpu_model, 8, 2 * torch.ones(2, 2))\n\n def _test_ddp_comm_hook_allreduce_hook_nccl(\n self, gradient_as_bucket_view=False, static_graph=False\n ):\n \"\"\"\n This unit test verifies whether a DDP communication hook that just calls\n allreduce gives the same result with the case of no hook registered.\n Without the then callback, the future_value in reducer is no longer\n a PyObject, and this unit test verifies future_value is properly checked.\n \"\"\"\n store = c10d.FileStore(self.file_name, self.world_size)\n process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)\n\n def allreduce_hook(\n state: object, bucket: dist.GradBucket\n ) -> torch.futures.Future[torch.Tensor]:\n tensors = [bucket.buffer() / self.world_size]\n return (\n process_group.allreduce(tensors)\n .get_future()\n .then(lambda fut: fut.value()[0])\n )\n\n # Get GPU model with allreduce_hook registered.\n gpu_model = self._gpu_model_with_ddp_comm_hook(\n process_group, allreduce_hook, gradient_as_bucket_view, static_graph\n )\n\n # check whether the grads are equal to what DDP without hook would return.\n self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))\n\n def _test_default_ddp_comm_hooks_nccl(self, gradient_as_bucket_view=False):\n \"\"\"\n This unit test verifies whether default Python DDP communication hooks ALLREDUCE and FP16_COMPRESS\n can give the same result with the case of no hook registered.\n \"\"\"\n store = c10d.FileStore(self.file_name, self.world_size)\n process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)\n\n # For these default DDP comm hooks, the only state is process group.\n state = process_group\n for hook in [default.allreduce_hook, default.fp16_compress_hook]:\n # Get GPU model with the hook registered.\n # The first arg 'process_group' is used for initializing the test environment,\n # so it cannot be replaced by 'state', although they have the same value.\n gpu_model = self._gpu_model_with_ddp_comm_hook(\n process_group, hook, gradient_as_bucket_view, state\n )\n\n # check whether the grads are equal to what DDP without hook would return.\n self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))\n\n def _test_fp16_compress_wrapper(self, gradient_as_bucket_view=False):\n \"\"\"\n This unit test verifies whether wrapping the ALLREDUCE and POWER_SGD hooks with\n the FP16_WRAPPER can give the same result as when there is no hook registered.\n \"\"\"\n store = c10d.FileStore(self.file_name, self.world_size)\n process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)\n powerSGD_state = powerSGD.PowerSGDState(process_group=process_group)\n\n hook_args = [\n (powerSGD.powerSGD_hook, powerSGD_state),\n (default.allreduce_hook, process_group),\n ]\n\n for hook, state in hook_args:\n gpu_model = self._gpu_model_with_ddp_comm_hook(\n process_group,\n default.fp16_compress_wrapper(hook),\n gradient_as_bucket_view,\n state,\n )\n\n # check whether the grads are equal to what DDP without hook would return.\n self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))\n\n def _test_hook_then_optimizer(\n self,\n functional_optim_cls,\n *functional_optim_args,\n gradient_as_bucket_view=False,\n **functional_optim_kwargs\n ):\n store = c10d.FileStore(self.file_name, self.world_size)\n process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)\n hook, hook_state = default.allreduce_hook, process_group\n opt_hook_state = default._OptimizerHookState(\n functional_optim_cls,\n *functional_optim_args,\n **functional_optim_kwargs,\n )\n gpu_model = self._gpu_model_with_ddp_comm_hook(\n process_group,\n default._hook_then_optimizer(hook, opt_hook_state),\n gradient_as_bucket_view,\n hook_state,\n )\n prev_params = copy.deepcopy(list(gpu_model.parameters()))\n # Run model with optimizer as part of hook\n for _ in range(8):\n gpu_model.zero_grad()\n self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))\n new_params = list(gpu_model.parameters())\n # Run plain model with allreduce hook and separate optimizer step.\n # Verify gradients are the same.\n gpu_model_allreduce = self._gpu_model_with_ddp_comm_hook(\n process_group, default.allreduce_hook, gradient_as_bucket_view, hook_state\n )\n mapping = {v: k for k, v in functional_optim_map.items()}\n sgd = mapping.get(functional_optim_cls)(\n gpu_model_allreduce.parameters(),\n *functional_optim_args,\n **functional_optim_kwargs,\n )\n for _ in range(8):\n gpu_model_allreduce.zero_grad()\n self._run_and_verify_hook(gpu_model_allreduce, 8, 0.25 * torch.ones(2, 2))\n sgd.step()\n post_opt_params = list(gpu_model_allreduce.parameters())\n for opt_as_hook_param, post_opt_param in zip(new_params, post_opt_params):\n self.assertEqual(opt_as_hook_param, post_opt_param)\n\n def _test_powerSGD_ddp_comm_hook_nccl(self, gradient_as_bucket_view=False):\n \"\"\"\n This unit test verifies whether Python DDP communication hook POWER_SGD\n can give the same result with the case of no hook registered.\n \"\"\"\n store = c10d.FileStore(self.file_name, self.world_size)\n process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)\n\n # Get GPU model with the hook registered.\n # Test the hook with different algorithmic configs.\n for use_error_feedback, warm_start in product([True, False], [True, False]):\n state = powerSGD.PowerSGDState(\n process_group=process_group,\n matrix_approximation_rank=1,\n use_error_feedback=use_error_feedback,\n warm_start=warm_start,\n )\n for hook in [powerSGD.powerSGD_hook, powerSGD.batched_powerSGD_hook]:\n gpu_model = self._gpu_model_with_ddp_comm_hook(\n process_group, hook, gradient_as_bucket_view, state\n )\n\n # check whether the grads are equal to what DDP without hook would return.\n self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))\n\n def _test_builtin_ddp_comm_hooks_nccl(self, gradient_as_bucket_view=False):\n \"\"\"\n This unit test verifies whether built-in C++ DDP communication hooks ALLREDUCE and FP16_COMPRESS\n can give the same result with the case of no hook registered.\n \"\"\"\n store = c10d.FileStore(self.file_name, self.world_size)\n process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)\n\n for comm_hook_type in [\n dist.BuiltinCommHookType.ALLREDUCE,\n dist.BuiltinCommHookType.FP16_COMPRESS,\n ]:\n # Get GPU model with the built-in communication hook.\n gpu_model = self._gpu_model_with_builtin_ddp_comm_hook(\n process_group, comm_hook_type, gradient_as_bucket_view\n )\n\n # check whether the grads are equal to what DDP without hook would return.\n self._run_and_verify_hook(gpu_model, 8, 0.25 * torch.ones(2, 2))\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_ddp_comm_hook_allreduce_hook_nccl(self):\n self._test_ddp_comm_hook_allreduce_hook_nccl()\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_default_ddp_comm_hooks_nccl(self):\n self._test_default_ddp_comm_hooks_nccl()\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_fp16_compress_wrapper_nccl(self):\n self._test_fp16_compress_wrapper()\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_hook_then_sgd_nccl(self):\n sgd_lr = 1e-2\n sgd_momentum = 0.9\n sgd_weight_decay = 0.01\n self._test_hook_then_optimizer(\n _FunctionalSGD,\n sgd_lr,\n momentum=sgd_momentum,\n weight_decay=sgd_weight_decay,\n )\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_hook_then_sgd_nccl_grad_as_bucket_view(self):\n sgd_lr = 1e-2\n sgd_momentum = 0.9\n sgd_weight_decay = 0.01\n self._test_hook_then_optimizer(\n _FunctionalSGD,\n sgd_lr,\n momentum=sgd_momentum,\n weight_decay=sgd_weight_decay,\n gradient_as_bucket_view=True\n )\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_hook_then_adamw_nccl(self):\n adamw_lr = 1e-2\n adamw_betas = (0.9, 0.99)\n adamw_eps = 1e-6\n self._test_hook_then_optimizer(\n _FunctionalAdamW,\n adamw_lr,\n betas=adamw_betas,\n eps=adamw_eps,\n gradient_as_bucket_view=True\n )\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_hook_then_adam_nccl(self):\n adam_lr = 1e-2\n adam_betas = (0.9, 0.99)\n adam_eps = 1e-6\n self._test_hook_then_optimizer(\n _FunctionalAdam,\n adam_lr,\n betas=adam_betas,\n eps=adam_eps,\n gradient_as_bucket_view=True\n )\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_hook_then_adam_nccl_grad_as_bucket_view(self):\n adam_lr = 1e-2\n adam_betas = (0.9, 0.99)\n adam_eps = 1e-6\n self._test_hook_then_optimizer(\n _FunctionalAdam,\n adam_lr,\n betas=adam_betas,\n eps=adam_eps,\n gradient_as_bucket_view=True\n )\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_builtin_ddp_comm_hooks_nccl(self):\n self._test_builtin_ddp_comm_hooks_nccl()\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_powerSGD_ddp_comm_hook_nccl(self):\n self._test_powerSGD_ddp_comm_hook_nccl()\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_ddp_comm_hook_allreduce_hook_nccl_grad_is_view(self):\n self._test_ddp_comm_hook_allreduce_hook_nccl(gradient_as_bucket_view=True)\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_ddp_comm_hook_allreduce_hook_nccl_static_graph(self):\n self._test_ddp_comm_hook_allreduce_hook_nccl(static_graph=True)\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_default_ddp_comm_hooks_nccl_is_view(self):\n self._test_default_ddp_comm_hooks_nccl(gradient_as_bucket_view=True)\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_fp16_compress_wrapper_is_view(self):\n self._test_fp16_compress_wrapper(gradient_as_bucket_view=True)\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_builtin_ddp_comm_hooks_nccl_grad_is_view(self):\n self._test_builtin_ddp_comm_hooks_nccl(gradient_as_bucket_view=True)\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_powerSGD_ddp_comm_hook_nccl_grad_is_view(self):\n self._test_powerSGD_ddp_comm_hook_nccl(gradient_as_bucket_view=True)\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_ddp_comm_hook_allreduce_with_then_hook_nccl(self):\n \"\"\"\n This unit test verifies whether a DDP communication hook that calls allreduce and then\n multiplies the result by ten and divides by two gives the expected result.\n \"\"\"\n store = c10d.FileStore(self.file_name, self.world_size)\n process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)\n\n def allreduce_with_then_hook(\n state: object, bucket: dist.GradBucket\n ) -> torch.futures.Future[torch.Tensor]:\n tensors = [bucket.buffer() / self.world_size]\n fut = process_group.allreduce(tensors).get_future()\n\n def mult(fut):\n # Multiply the result by 10.\n return 10 * fut.value()[0]\n\n def div(fut):\n # Divide the result by 2.\n return 0.5 * fut.value()\n\n return fut.then(mult).then(div)\n\n # Get GPU model with allreduce_with_then_hook registered.\n gpu_model = self._gpu_model_with_ddp_comm_hook(\n process_group, allreduce_with_then_hook\n )\n\n # check whether the grads are equal to what allreduce returns multuplied by 5.\n # without the comm_hook, result would be still 0.25 * torch.ones(2, 2).\n self._run_and_verify_hook(gpu_model, 8, 1.25 * torch.ones(2, 2))\n\n class AcceptsParam(torch.nn.Module):\n def __init__(self, p, factor):\n super().__init__()\n self.a = p\n self.f = factor\n\n def forward(self, input):\n return input + self.a * self.f\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_ddp_weight_sharing(self):\n store = c10d.FileStore(self.file_name, self.world_size)\n process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)\n\n size = 2048 * 2048\n dev = self.rank\n world = self.world_size\n\n p = torch.nn.Parameter(torch.randn(size, requires_grad=True))\n\n for try_set_to_none, use_bucket_view in product((False, True), (False, True)):\n m = torch.nn.Sequential(\n self.AcceptsParam(p, dev + 1), self.AcceptsParam(p, dev + 1)\n ).cuda(dev)\n\n m = torch.nn.parallel.DistributedDataParallel(\n m,\n bucket_cap_mb=1,\n gradient_as_bucket_view=use_bucket_view,\n device_ids=[dev],\n process_group=process_group,\n )\n\n for i in range(3):\n m.zero_grad(set_to_none=try_set_to_none)\n m(1).sum().backward()\n\n # Each param value is multiplied by \"rank + 1\" twice in forward, so the grad\n # values produced by a particular rank should be 2. * (rank + 1).\n # Summing these over ranks and dividing by world size gives the expected result:\n analytic = torch.full_like(\n p, 2.0 * (world * (world + 1.0) / 2.0) / world, device=dev\n )\n for name, p in m.named_parameters():\n self.assertEqual(\n p.grad,\n analytic,\n \"mismatch at \"\n + name\n + \".grad for \"\n + \"set_to_none = {}, use_bucket_view = {}\".format(\n try_set_to_none, use_bucket_view\n ),\n )\n\n # A list of tests for ddp with activation checkpointing\n # when gradient_as_bucket_view=True, False.\n # Most of the tests are referred to\n # https://github.com/facebookresearch/fairscale/blob/master/tests/nn/pipe/test_checkpoint_ddp.py\n class CheckpointOnceModule(nn.Module):\n def __init__(self):\n super().__init__()\n self.l1 = nn.Linear(20, 20)\n self.l2 = nn.Linear(20, 20)\n\n def forward(self, inp):\n x = self.l1(inp)\n x = checkpoint(self.l2, x)\n return x\n\n class CheckpointTwiceModule(CheckpointOnceModule):\n def __init__(self):\n super().__init__()\n\n def forward(self, inp):\n x = self.l1(inp)\n x = checkpoint(self.l2, x)\n x = checkpoint(self.l2, x)\n return x\n\n def _prepare_dummy_data(self):\n ddp_bs = 16\n bs = ddp_bs * self.world_size\n input = torch.rand((bs, 20), device=\"cuda\", requires_grad=True)\n target = torch.randn((bs, 20), device=\"cuda\")\n offset = self.rank * ddp_bs\n ddp_input = input[offset : offset + ddp_bs]\n ddp_target = target[offset : offset + ddp_bs]\n return input, ddp_input, target, ddp_target\n\n def _train_model(self, model, input_var, target, loss, run_checkpoint=False):\n model.train()\n if run_checkpoint:\n output = checkpoint(model, input_var)\n else:\n output = model(input_var)\n l = loss(output, target)\n l.backward()\n\n def _test_ddp_checkpointing(\n self,\n input_model,\n process_group,\n use_bucket_view,\n find_unused_parameters=False,\n static_graph=False,\n run_checkpoint=False,\n ):\n # to reprodce the same training results\n torch.cuda.set_device(self.rank)\n torch.manual_seed(31415)\n model = copy.deepcopy(input_model).cuda()\n ddp_model = copy.deepcopy(input_model).cuda()\n ddp_model = nn.parallel.DistributedDataParallel(\n ddp_model,\n bucket_cap_mb=1,\n gradient_as_bucket_view=use_bucket_view,\n device_ids=[self.rank],\n process_group=process_group,\n find_unused_parameters=find_unused_parameters,\n )\n if static_graph:\n ddp_model._set_static_graph()\n self.assertEqual(\n ddp_model._get_ddp_logging_data().get(\"static_graph\", 0), static_graph\n )\n input, ddp_input, target, ddp_target = self._prepare_dummy_data()\n loss = nn.MSELoss()\n for i in range(5):\n model.zero_grad(set_to_none=False)\n ddp_model.zero_grad(set_to_none=False)\n self._train_model(model, input, target, loss, run_checkpoint=run_checkpoint)\n self._train_model(\n ddp_model, ddp_input, ddp_target, loss, run_checkpoint=run_checkpoint\n )\n for i, j in zip(model.parameters(), ddp_model.parameters()):\n self.assertTrue(i.grad is not None)\n self.assertTrue(j.grad is not None)\n self.assertEqual(i.grad, j.grad, rtol=1.3e-06, atol=5e-5)\n\n # DDP works as expect when layer is checkpointed only once\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_ddp_checkpointing_once(self):\n store = c10d.FileStore(self.file_name, self.world_size)\n process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)\n for use_bucket_view, static_graph in product((False, True), (False, True)):\n self._test_ddp_checkpointing(\n self.CheckpointOnceModule(),\n process_group=process_group,\n use_bucket_view=use_bucket_view,\n static_graph=static_graph,\n )\n\n # DDP will fail when there are unused_parameters in the model\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_ddp_checkpointing_unused_params(self):\n store = c10d.FileStore(self.file_name, self.world_size)\n process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)\n for use_bucket_view in (True, False):\n with self.assertRaisesRegex(\n RuntimeError,\n \"Expected to mark a variable ready only once.\",\n ):\n model = self._test_ddp_checkpointing(\n self.CheckpointOnceModule(),\n process_group=process_group,\n use_bucket_view=use_bucket_view,\n find_unused_parameters=True,\n static_graph=False,\n )\n # test passes when static_graph is true\n model = self._test_ddp_checkpointing(\n self.CheckpointOnceModule(),\n process_group=process_group,\n use_bucket_view=use_bucket_view,\n find_unused_parameters=True,\n static_graph=True,\n )\n\n # DDP will fail when the same layer is checkponted twice\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_ddp_checkpointing_twice(self):\n store = c10d.FileStore(self.file_name, self.world_size)\n process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)\n for use_bucket_view in (True, False):\n with self.assertRaisesRegex(\n RuntimeError,\n \"Expected to mark a variable ready only once.\",\n ):\n model = self._test_ddp_checkpointing(\n self.CheckpointTwiceModule(),\n process_group=process_group,\n use_bucket_view=use_bucket_view,\n static_graph=False,\n )\n model = self._test_ddp_checkpointing(\n self.CheckpointTwiceModule(),\n process_group=process_group,\n use_bucket_view=use_bucket_view,\n static_graph=True,\n )\n\n # DDP works as expected if there is weight sharing among layers\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_ddp_checkpointing_weight_sharing(self):\n store = c10d.FileStore(self.file_name, self.world_size)\n process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)\n torch.cuda.set_device(self.rank)\n for use_bucket_view, static_graph in product((False, True), (False, True)):\n torch.manual_seed(31415)\n l1 = nn.Linear(20, 20)\n l2 = nn.Linear(20, 20)\n l1.weight = l2.weight\n model = nn.Sequential(l1, l2)\n self._test_ddp_checkpointing(\n model,\n process_group=process_group,\n use_bucket_view=use_bucket_view,\n static_graph=static_graph,\n run_checkpoint=True,\n )\n\n\nclass NcclErrorHandlingTest(MultiProcessTestCase):\n def setUp(self):\n super(NcclErrorHandlingTest, self).setUp()\n # Need to skip return code checking for these tests since the child\n # processes don't exit cleanly.\n self.skip_return_code_checks = [\n self.test_nccl_errors_blocking_abort.__wrapped__,\n self.test_nccl_errors_blocking_sigkill.__wrapped__,\n self.test_nccl_errors_blocking_sigterm.__wrapped__,\n self.test_nccl_errors_blocking_nonzero_exit.__wrapped__,\n ]\n # NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests\n # that use NCCL_BLOCKING_WAIT will test it as expected.\n os.environ[\"NCCL_ASYNC_ERROR_HANDLING\"] = \"1\"\n self._spawn_processes()\n\n def tearDown(self):\n super(NcclErrorHandlingTest, self).tearDown()\n try:\n os.remove(self.file_name)\n except OSError:\n pass\n\n @property\n def op_timeout_sec(self):\n return 1\n\n @property\n def world_size(self):\n return 3\n\n @property\n def blocking_wait_error_msg(self):\n return \"Caught collective operation timeout\"\n\n def _run_all_reduce(self, pg):\n pg.allreduce(torch.rand(10).cuda(self.rank))\n\n @requires_nccl()\n @requires_nccl_version((2, 4, 0), \"Need NCCL 2.4+ for error checking\")\n @skip_if_lt_x_gpu(3)\n @skip_if_rocm\n def test_nccl_errors_nonblocking(self):\n # Note: we unset and restore NCCL_ASYNC_ERROR_HANDLING for this test\n # since test_c10d_common runs with async error handling by default, but this\n # tests behavior when it is not enabled.\n prev_nccl_async_error_handling = os.environ.get(\n \"NCCL_ASYNC_ERROR_HANDLING\", None\n )\n os.environ[\"NCCL_ASYNC_ERROR_HANDLING\"] = \"0\"\n store = c10d.FileStore(self.file_name, self.world_size)\n process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)\n process_group.allreduce(torch.rand(10).cuda(self.rank))\n if self.rank == 0:\n # This allreduce does not block Python thread as allreduce enqueues\n # the cuda operation, and then wait only blocks the current cuda\n # stream.\n work = process_group.allreduce(torch.rand(10).cuda(self.rank))\n work.wait()\n\n # Now the work scheduled next should hang forever since the previous\n # allreduce will never complete.\n t = threading.Thread(target=self._run_all_reduce, args=(process_group,))\n t.daemon = True\n t.start()\n t.join(int(get_timeout(self.id()) / 5))\n self.assertTrue(t.is_alive())\n\n if prev_nccl_async_error_handling is not None:\n os.environ[\"NCCL_ASYNC_ERROR_HANDLING\"] = prev_nccl_async_error_handling\n\n def _test_nccl_errors_blocking(self, func):\n store = c10d.FileStore(self.file_name, self.world_size)\n process_group = c10d.ProcessGroupNCCL(\n store,\n self.rank,\n self.world_size,\n timeout=timedelta(seconds=self.op_timeout_sec),\n )\n process_group.allreduce(torch.rand(10).cuda(self.rank))\n if self.rank == 0:\n work = process_group.allreduce(torch.rand(10).cuda(self.rank))\n with self.assertRaisesRegex(RuntimeError, self.blocking_wait_error_msg):\n # Operation would time out in blocking mode.\n work.wait()\n # Run some GPU operations to make sure cuda has not gotten stuck.\n # It was observed cuda could get stuck if NCCL communicators were\n # not properly aborted before throwing RuntimeError.\n a = torch.rand(10).cuda(self.rank)\n elif self.rank == 1:\n # Clean up structures (ex: files for FileStore before going down)\n del process_group\n func()\n else:\n # Wait for timeout\n time.sleep(2 * self.op_timeout_sec)\n\n # Now verify communicators on this rank have been aborted by the watchdog thread.\n self._wait_for_comm_abort(process_group)\n\n @with_nccl_blocking_wait\n @requires_nccl()\n @requires_nccl_version((2, 4, 0), \"Need NCCL 2.4+ for error checking\")\n @skip_if_lt_x_gpu(3)\n @skip_if_rocm\n def test_nccl_errors_blocking_clean_exit(self):\n self._test_nccl_errors_blocking(lambda: sys.exit(0))\n\n @with_nccl_blocking_wait\n @requires_nccl()\n @requires_nccl_version((2, 4, 0), \"Need NCCL 2.4+ for error checking\")\n @skip_if_lt_x_gpu(3)\n @skip_if_rocm\n def test_nccl_errors_blocking_nonzero_exit(self):\n self._test_nccl_errors_blocking(lambda: sys.exit(1))\n\n @with_nccl_blocking_wait\n @requires_nccl()\n @requires_nccl_version((2, 4, 0), \"Need NCCL 2.4+ for error checking\")\n @skip_if_lt_x_gpu(3)\n @skip_if_rocm\n @sandcastle_skip(\n \"Frequently times out see https://github.com/pytorch/pytorch/issues/58920\"\n )\n def test_nccl_errors_blocking_abort(self):\n self._test_nccl_errors_blocking(lambda: os.abort())\n\n @with_nccl_blocking_wait\n @requires_nccl()\n @requires_nccl_version((2, 4, 0), \"Need NCCL 2.4+ for error checking\")\n @skip_if_lt_x_gpu(3)\n @skip_if_rocm\n def test_nccl_errors_blocking_sigkill(self):\n self._test_nccl_errors_blocking(lambda: os.kill(os.getpid(), signal.SIGKILL))\n\n @with_nccl_blocking_wait\n @requires_nccl()\n @requires_nccl_version((2, 4, 0), \"Need NCCL 2.4+ for error checking\")\n @skip_if_lt_x_gpu(3)\n @skip_if_rocm\n def test_nccl_errors_blocking_sigterm(self):\n self._test_nccl_errors_blocking(lambda: os.kill(os.getpid(), signal.SIGTERM))\n\n @with_nccl_blocking_wait\n @requires_nccl()\n @requires_nccl_version((2, 4, 0), \"Need NCCL 2.4+ for error checking\")\n @skip_if_lt_x_gpu(3)\n def test_nccl_blocking_wait_with_barrier(self):\n store = c10d.FileStore(self.file_name, self.world_size)\n process_group = c10d.ProcessGroupNCCL(\n store,\n self.rank,\n self.world_size,\n timeout=timedelta(seconds=self.op_timeout_sec),\n )\n process_group.barrier().wait()\n if self.rank == 0:\n with self.assertRaisesRegex(RuntimeError, self.blocking_wait_error_msg):\n # This should timeout\n process_group.barrier().wait()\n\n def _run_invalid_nccl_blocking_wait_env(self, val):\n os.environ[\"NCCL_BLOCKING_WAIT\"] = val\n store = c10d.FileStore(self.file_name, self.world_size)\n with self.assertRaises(RuntimeError):\n process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)\n\n @requires_nccl()\n @skip_if_lt_x_gpu(3)\n def test_invalid_nccl_blocking_wait_env(self):\n self._run_invalid_nccl_blocking_wait_env(\"abc\")\n self._run_invalid_nccl_blocking_wait_env(\"-1\")\n self._run_invalid_nccl_blocking_wait_env(\"2147483647\")\n self._run_invalid_nccl_blocking_wait_env(\"4294967295\")\n\n def _wait_for_comm_abort(self, process_group):\n \"\"\"\n Waits for the watchdog thread to abort communicators for the process group.\n \"\"\"\n while True:\n try:\n process_group.allreduce(torch.rand(10).cuda(self.rank))\n except Exception as e:\n if \"NCCL communicator was aborted\" in str(e):\n return\n else:\n raise e\n time.sleep(1)\n\n @with_nccl_blocking_wait\n @requires_nccl()\n @skip_if_lt_x_gpu(3)\n def test_nccl_timeout(self):\n store = c10d.FileStore(self.file_name, self.world_size)\n\n # Initialize process_group.\n timeout = 1\n process_group = c10d.ProcessGroupNCCL(\n store, self.rank, self.world_size, timeout=timedelta(seconds=timeout)\n )\n process_group.allreduce(torch.rand(10).cuda(self.rank)).wait()\n\n if self.rank == 0:\n # This should timeout in about 1 second.\n start = time.time()\n # Watchdog may abort timed out work resulting in NCCL error instead of operation timed out.\n with self.assertRaisesRegex(RuntimeError, self.blocking_wait_error_msg):\n process_group.allreduce(torch.rand(10).cuda(self.rank)).wait()\n else:\n # Sleep to ensure timeout.\n time.sleep(2 * timeout)\n\n self._wait_for_comm_abort(process_group)\n\n\nclass CommTest(test_c10d_common.AbstractCommTest, MultiProcessTestCase):\n def setUp(self):\n super(CommTest, self).setUp()\n # NCCL_BLOCKING_WAIT overrides NCCL_ASYNC_ERROR_HANDLING hence tests\n # that use NCCL_BLOCKING_WAIT will test it as expected.\n os.environ[\"NCCL_ASYNC_ERROR_HANDLING\"] = \"1\"\n self._spawn_processes()\n\n def tearDown(self):\n super(CommTest, self).tearDown()\n try:\n os.remove(self.file_name)\n except OSError:\n pass\n\n def _test_broadcast_coalesced(self, process_group, device, root_rank):\n half = torch.float16\n\n # No support for float16 for CPU tensors\n if device == torch.device(\"cpu\"):\n half = torch.float32\n\n target = torch.arange(60, dtype=half, device=device).chunk(5)\n target += torch.arange(60, dtype=torch.float32, device=device).chunk(5)\n target += torch.arange(60, dtype=half, device=device).chunk(5)\n target += torch.arange(60, dtype=torch.float64, device=device).chunk(5)\n target += torch.arange(60, dtype=half, device=device).chunk(5)\n target += torch.arange(60, dtype=torch.float32, device=device).chunk(5)\n\n # The tensors to pass to broadcast are idential to the target\n # only on the process that is the root of the broadcast.\n if self.rank == root_rank:\n tensors = list(tensor.clone() for tensor in target)\n else:\n tensors = list(torch.zeros_like(tensor) for tensor in target)\n\n if self.rank != root_rank:\n self.assertNotEqual(tensors, target)\n\n c10d._broadcast_coalesced(\n process_group, tensors, buffer_size=256, src=root_rank\n )\n\n if self.rank != root_rank:\n self.assertEqual(tensors, target)\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_broadcast_coalesced_nccl(self):\n store = c10d.FileStore(self.file_name, self.world_size)\n process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)\n device = torch.device(\"cuda:%d\" % self.rank)\n ranks = [0, 1]\n for root_rank in ranks:\n self._test_broadcast_coalesced(process_group, device, root_rank)\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_sequence_num_set_default_pg_nccl(self):\n torch.cuda.set_device(self.rank)\n self._test_sequence_num_set_default_pg(backend=\"nccl\")\n\n @skip_if_lt_x_gpu(2)\n @requires_nccl()\n def test_sequence_num_incremented_nccl_default(self):\n self._test_sequence_num_incremented_default_group(\"nccl\")\n\n @skip_if_lt_x_gpu(4)\n @requires_nccl()\n def test_sequence_num_incremented_nccl_subgroup(self):\n if self.world_size < 4:\n return sandcastle_skip(\"Test requires world_size of at least 4\")\n self._test_sequence_num_incremented_subgroup(\"nccl\")\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_sequence_num_set_nccl_new_group(self):\n torch.cuda.set_device(self.rank)\n self._test_sequence_num_set_new_group(backend=\"nccl\")\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_pass_nccl_options_high_priority_stream(self):\n pg_opts = c10d.ProcessGroupNCCL.Options()\n pg_opts.is_high_priority_stream = True\n\n store = c10d.FileStore(self.file_name, self.world_size)\n # Test init_process_group accepts options\n dist.init_process_group(\n \"nccl\",\n world_size=self.world_size,\n rank=self.rank,\n store=store,\n pg_options=pg_opts,\n )\n\n # Test with new_group\n pg = c10d.new_group([0, 1], pg_options=pg_opts)\n # test if the process group constructed with high priority stream\n self.assertTrue(pg.options.is_high_priority_stream)\n # test the process group works as expected\n t = torch.tensor([self.rank + 1] * 10).cuda(self.rank)\n pg.allreduce(t).wait()\n expected_tensor = torch.tensor([3] * 10).cuda(self.rank)\n self.assertEqual(expected_tensor, t)\n\n @requires_nccl()\n @skip_if_lt_x_gpu(4)\n def test_nccl_barrier(self):\n store = c10d.FileStore(self.file_name, self.world_size)\n c10d.init_process_group(\n backend=\"nccl\", rank=self.rank, world_size=self.world_size, store=store\n )\n\n t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)\n c10d.all_reduce(t)\n expected_tensor = torch.tensor([3] * 10).cuda(2 * self.rank)\n self.assertEqual(expected_tensor, t)\n\n # Test with new_group\n pg = c10d.new_group([0, 1])\n t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)\n pg.allreduce(t).wait()\n self.assertEqual(expected_tensor, t)\n\n pg = c10d.new_group([0])\n if self.rank == 0:\n t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)\n expected_tensor = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)\n pg.allreduce(t).wait()\n self.assertEqual(expected_tensor, t)\n\n pg = c10d.new_group([1])\n if self.rank == 1:\n t = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)\n expected_tensor = torch.tensor([self.rank + 1] * 10).cuda(2 * self.rank)\n pg.allreduce(t).wait()\n self.assertEqual(expected_tensor, t)\n\n @requires_nccl()\n @skip_if_lt_x_gpu(4)\n def test_nccl_barrier_timeout(self):\n store = c10d.FileStore(self.file_name, self.world_size)\n if self.rank == 0:\n with self.assertRaisesRegex(\n RuntimeError, \"Timed out initializing process group\"\n ):\n c10d.init_process_group(\n backend=\"nccl\",\n rank=self.rank,\n world_size=self.world_size,\n store=store,\n timeout=timedelta(seconds=1),\n )\n\n @requires_nccl()\n @skip_if_lt_x_gpu(4)\n def test_nccl_barrier_timeout_new_group(self):\n store = c10d.FileStore(self.file_name, self.world_size)\n c10d.init_process_group(\n backend=\"nccl\",\n rank=self.rank,\n world_size=self.world_size,\n store=store,\n timeout=timedelta(seconds=1),\n )\n\n if self.rank == 0:\n with self.assertRaisesRegex(\n RuntimeError, \"Timed out initializing process group\"\n ):\n c10d.new_group([0, 1], timeout=timedelta(seconds=1))\n\n with self.assertRaisesRegex(\n RuntimeError, \"Timed out initializing process group\"\n ):\n c10d.new_group([0], timeout=timedelta(seconds=1))\n\n @requires_nccl()\n @skip_if_lt_x_gpu(4)\n def test_nccl_barrier_timeout_new_group_non_member(self):\n store = c10d.FileStore(self.file_name, self.world_size)\n c10d.init_process_group(\n backend=\"nccl\",\n rank=self.rank,\n world_size=self.world_size,\n store=store,\n timeout=timedelta(seconds=1),\n )\n\n if self.rank == 1:\n with self.assertRaisesRegex(\n RuntimeError, \"Timed out initializing process group\"\n ):\n c10d.new_group([0, 1], timeout=timedelta(seconds=1))\n\n with self.assertRaisesRegex(\n RuntimeError, \"Timed out initializing process group\"\n ):\n c10d.new_group([0], timeout=timedelta(seconds=1))\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_nccl_barrier_device_ids(self):\n store = c10d.FileStore(self.file_name, self.world_size)\n c10d.init_process_group(\n backend=\"nccl\", rank=self.rank, world_size=self.world_size, store=store\n )\n\n c10d.barrier(device_ids=[self.rank])\n\n @requires_nccl()\n @skip_if_lt_x_gpu(2)\n def test_nccl_barrier_device_ids_function_argument(self):\n store = c10d.FileStore(self.file_name, self.world_size)\n c10d.init_process_group(\n backend=\"nccl\", rank=self.rank, world_size=self.world_size, store=store\n )\n\n with self.assertRaisesRegex(RuntimeError, \"Invalid function argument\"):\n c10d.barrier(device_ids=self.rank)\n\n\nif __name__ == \"__main__\":\n assert (\n not torch.cuda._initialized\n ), \"test_distributed must not have initialized CUDA context on main process\"\n\n run_tests()\n", "from typing import Tuple, Dict, Callable, Any\n\nimport torch\nimport torch.fx\nimport torchvision.models as models\nimport torch.fx.passes.splitter_base as splitter_base\nimport torch.fx.passes.operator_support as op_support\nimport torch.fx.passes.net_min_base as net_min_base\nfrom torch.fx.experimental.fx2trt.fx2trt import TRTInterpreter, InputTensorSpec, TRTModule\n\n\n# The purpose of this example is to demonstrate the overall flow of lowering a PyTorch\n# model to TensorRT via FX with existing FX based tooling. The general lowering flow\n# would be like:\n#\n# 1. Use splitter to split the model if there're ops in the model that we don't want to\n# lower to TensorRT for some reasons like the ops are not supported in TensorRT or\n# running them on other backends provides better performance.\n# 2. Lower the model (or part of the model if splitter is used) to TensorRT via fx2trt.\n#\n# For this example, we use ResNet18 as example model and split out the linear layer to\n# not run on TensorRT just to demonstrate how the splitter works. At the end of this\n# example we did a benchmark for a model (named `split_mod`) with all the ops running\n# on TensorRT execpt linear layer running on PyTorch Cuda versus a model (named `rn18`)\n# fully on PyTorch Cuda.\n\n\ndef lower_mod_to_trt(mod: torch.fx.GraphModule, inputs: Tuple[torch.Tensor]):\n \"\"\"\n Helper function that given a GraphModule `mod` and its `inputs`, build a\n TRTModule that runs the original `mod` on TensorRT.\n \"\"\"\n interp = TRTInterpreter(mod, InputTensorSpec.from_tensors(inputs))\n engine, input_names, output_names = interp.run(*inputs)\n return TRTModule(engine, input_names, output_names)\n\n\nclass OpSupport(op_support.OperatorSupport):\n \"\"\"\n This class is used by splitter to determine which nodes are supported, i.e.\n should be split to the accelerator part (TensorRT).\n \"\"\"\n def is_node_supported(\n self, submodules: Dict[str, torch.nn.Module], node: torch.fx.Node\n ):\n \"\"\"\n Here we want linear layer to not run on TensorRT. Thus, we return\n False for linear layer and True for all other ops.\n \"\"\"\n target = op_support.get_node_target(submodules, node)\n\n if target == \"torch.nn.modules.linear.Linear\":\n return False\n\n return True\n\n\nclass TensorRTMinimizer(net_min_base._MinimizerBase):\n \"\"\"\n Need to define a Minimizer class for TensorRT because it's used in Splitter.\n \"\"\"\n def __init__(\n self,\n module: torch.fx.GraphModule,\n sample_input: Tuple[torch.Tensor],\n compare_fn: Callable[[Any, Any, Any], Tuple[float, bool]],\n settings: net_min_base._MinimizerSettingBase = None,\n ):\n if settings is None:\n settings = net_min_base._MinimizerSettingBase()\n\n super().__init__(module, sample_input, compare_fn, settings)\n\n def run_a(self, mod, inputs):\n \"\"\"\n The output of this function serves as an reference.\n \"\"\"\n mod.eval()\n with torch.no_grad():\n return mod(*inputs)\n\n def run_b(self, mod, inputs):\n \"\"\"\n Here we actually run mod on TensorRT return TensorRT result.\n \"\"\"\n mod.eval()\n try:\n mod = lower_mod_to_trt(mod, inputs)\n output = mod(*inputs)\n except RuntimeError as e:\n raise net_min_base.FxNetMinimizerRunFuncError(\n f\"Encounter an error when processing \\n{mod.graph}\\n {e}\"\n )\n else:\n return output\n\n\n# This in the future will be a global TensorRTSplitter and we don't need to create\n# it per example.\nclass TensorRTSplitter(splitter_base._SplitterBase):\n \"\"\"\n Splitter for TensorRT.\n \"\"\"\n def __init__(\n self,\n module: torch.fx.GraphModule,\n sample_input: Tuple[torch.Tensor],\n operator_support: op_support.OperatorSupport = None,\n settings: splitter_base._SplitterSettingBase = None\n ):\n if not operator_support:\n operator_support = op_support.OperatorSupport()\n\n if not settings:\n settings = splitter_base._SplitterSettingBase()\n settings.allow_non_tensor = True\n settings.skip_fusion = True\n\n super().__init__(module, sample_input, operator_support, settings)\n\n def _lower_model_to_backend(self, mod, inputs):\n \"\"\"\n Lower a GraphModule `mod` to TensorRT with `inputs`.\n \"\"\"\n mod = lower_mod_to_trt(mod, inputs)\n return mod\n\n def _find_culprit(self, mod, inputs):\n \"\"\"\n This function serves the preview functionality in Splitter. When previewing\n splitting result, if something wrong happens during lowering model to TensorRT\n or running a TensorRT model, this function will be called to find any culprit\n that is responsible for the error.\n \"\"\"\n # Since we don't care about accuracy here, we pass in a dummy compare function.\n minimizer = TensorRTMinimizer(mod, inputs, lambda a, b, c: (1, True))\n minimizer.settings.traverse_method = \"sequential\"\n minimizer.settings.find_all = True\n culprits = minimizer.minimize()\n\n if len(culprits) == 0:\n reports = \"Unable to find a culprit!\\n\"\n else:\n reports = \"Found some problematic nodes:\\n\"\n for node in culprits:\n reports += f\"{node.format_node()}\\n\"\n\n return reports\n\n\nif __name__ == \"__main__\":\n # Create ResNet18 `rn18` and inputs `x`\n rn18 = models.resnet18().eval().cuda()\n x = torch.randn(5, 3, 224, 224, device=\"cuda\")\n\n # Trace the model with FX.\n traced_rn18 = torch.fx.symbolic_trace(rn18)\n\n # Create a splitter which takes in traced ResNet18.\n splitter = TensorRTSplitter(traced_rn18, (x,), OpSupport())\n\n # node_support_preview() shows the details of node supporting information based\n # on the DummyOpSupport we created.\n #\n # In the output, we have supported node types\n # and unsupported node types. Nodes in the model with supported types will be\n # split into accelerator submodules while nodes with unsupported types will be\n # split into cpu submodules.\n splitter.node_support_preview()\n \"\"\"\n output:\n\n Supported node types in the model:\n torch.nn.modules.conv.Conv2d: ((torch.float32,), {})\n torch.nn.modules.batchnorm.BatchNorm2d: ((torch.float32,), {})\n torch.nn.modules.activation.ReLU: ((torch.float32,), {})\n torch.nn.modules.pooling.MaxPool2d: ((torch.float32,), {})\n _operator.add: ((torch.float32, torch.float32), {})\n torch.nn.modules.pooling.AdaptiveAvgPool2d: ((torch.float32,), {})\n torch.flatten: ((torch.float32,), {})\n\n Unsupported node types in the model:\n torch.nn.modules.linear.Linear: ((torch.float32,), {})\n \"\"\"\n\n # split_preview() shows the details of how the model looks like after split.\n # And for every accelerator module in the split model, it would run a check\n # by lowering and running the module. If any error is catched during the\n # checking process, it will try to find which nodes are causing the trouble\n # here with minimizer.\n #\n # Notice that after split, the model will have some submodules called either\n # `_run_on_acc_{}` or `_run_on_cpu_{}`. We have all the supported nodes in\n # `_run_on_acc_{}` modules and all other nodes in `_run_on_cpu_{}` modules.\n #\n # In the output, we can see it estimates the max qps based on PCIe bandwidth,\n # this is something we need to consider when lowering to acceleartors chips,\n # because the data will be flowing between cpu and accelerator which might not\n # matter in GPU case.\n splitter.split_preview()\n \"\"\"\n output:\n\n Before removing small acc subgraphs, total 2 subgraphs are created: 1 acc subgraphs and 1 cpu subgraphs.\n After removing small acc subgraphs, total 2 subgraphs are created: 1 acc subgraphs and 1 cpu subgraphs.\n _run_on_acc_0: 68 node(s)\n _run_on_cpu_1: 1 node(s)\n\n Processing acc submodule _run_on_acc_0\n Checking inputs...\n Checking outputs...\n Total input size in bytes is 3010560, total output size in bytes is 10240, theoretical max qps (bounds by PCIe bandwidth)\n for this submodule is 35665.85034013606.\n Lowering and running succeed!\n\n Theoretical max qps (bounds by PCIe bandwidth) for this model is 35665.85034013606, bottleneck is submodule _run_on_acc_0.\n \"\"\"\n\n # After split we have two submodules, one is `_run_on_acc_0` and one is `_run_on_cpu_1`.\n # We have only one op in `_run_on_cpu_1` which is a linear layer while all other ops are\n # in `_run_on_acc_0`.\n split_mod = splitter()\n print(split_mod.graph)\n \"\"\"\n output:\n\n graph():\n %x : torch.Tensor [#users=1] = placeholder[target=x]\n %_run_on_acc_0 : [#users=1] = call_module[target=_run_on_acc_0](args = (%x,), kwargs = {})\n %_run_on_cpu_1 : [#users=1] = call_module[target=_run_on_cpu_1](args = (%_run_on_acc_0,), kwargs = {})\n return _run_on_cpu_1\n \"\"\"\n\n # We want to lower _run_on_acc_0 to TensorRT.\n split_mod._run_on_acc_0 = lower_mod_to_trt(split_mod._run_on_acc_0, (x,)) # type: ignore[arg-type]\n\n # Assert results are equal with the original model.\n rn18 = rn18.cuda()\n torch.testing.assert_allclose(split_mod(x), rn18(x))\n\n import time\n NITER = 100\n\n s = time.time()\n for _ in range(NITER):\n split_mod(x)\n torch.cuda.synchronize()\n print('trt time (ms/iter)', (time.time() - s) / NITER * 1000)\n \"\"\"\n output:\n\n trt time (ms/iter) 1.978142261505127\n \"\"\"\n\n s = time.time()\n for _ in range(NITER):\n rn18(x)\n torch.cuda.synchronize()\n print('stock PyTorch time (ms/iter)', (time.time() - s) / NITER * 1000)\n \"\"\"\n output:\n\n stock PyTorch time (ms/iter) 3.8208484649658203\n \"\"\"\n" ]
[ [ "torch.testing._internal.common_distributed.with_dist_debug_levels", "torch.nn.functional.softmax", "torch.testing._internal.common_utils.find_free_port", "torch.randperm", "torch.nn.LazyLinear", "torch.distributed.ReduceScatterOptions", "torch.testing._internal.common_utils.sandcastle_skip", "torch.distributed.algorithms.ddp_comm_hooks.default_hooks._OptimizerHookState", "torch.distributed.distributed_c10d._get_default_group", "torch.no_grad", "torch.device", "torch.distributed.get_rank", "torch.full_like", "torch.distributed._broadcast_coalesced", "torch.nn.CrossEntropyLoss", "torch.ones", "torch.distributed.init_process_group", "torch.distributed.ReduceOptions", "torch.distributed._get_debug_mode", "torch.randn", "torch.distributed.algorithms.ddp_comm_hooks.default_hooks.fp16_compress_wrapper", "torch.distributed.barrier", "torch.distributed.BroadcastOptions", "torch.distributed.FileStore", "torch.tensor", "torch.distributed.rendezvous", "torch.rand", "torch.arange", "torch.testing._internal.common_distributed.skip_if_lt_x_gpu", "torch.distributed.AllreduceOptions", "torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook.PowerSGDState", "torch.nn.Sequential", "torch.testing._internal.common_distributed.requires_nccl_version", "torch.distributed.algorithms.ddp_comm_hooks.default_hooks._hook_then_optimizer", "torch.isinf", "torch.empty", "torch.nn.init.constant_", "torch.nn.ReLU", "torch.testing._internal.common_distributed.requires_nccl", "torch.distributed.is_initialized", "torch.zeros_like", "torch.cuda.FloatTensor", "torch.nn.Linear", "torch.distributed.is_available", "torch.distributed.destroy_process_group", "torch.cuda.device_count", "torch.distributed.get_world_size", "torch.testing._internal.common_utils.run_tests", "torch.nn.parallel.DistributedDataParallel", "torch.distributed.optim.functional_optim_map.items", "torch.cuda.set_device", "torch.manual_seed", "torch.distributed.ProcessGroupNCCL.Options", "torch.utils.checkpoint.checkpoint", "torch.distributed.new_group", "torch.backends.cudnn.flags", "torch.distributed.ProcessGroupNCCL", "torch.distributed.all_reduce", "torch.nn.MSELoss" ], [ "torch.cuda.synchronize", "torch.randn", "torch.fx.experimental.fx2trt.fx2trt.InputTensorSpec.from_tensors", "torch.fx.passes.operator_support.get_node_target", "torch.fx.symbolic_trace", "torch.fx.passes.net_min_base._MinimizerSettingBase", "torch.no_grad", "torch.fx.experimental.fx2trt.fx2trt.TRTModule", "torch.fx.passes.operator_support.OperatorSupport", "torch.fx.passes.splitter_base._SplitterSettingBase", "torch.fx.passes.net_min_base.FxNetMinimizerRunFuncError" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
disanda/MSV
[ "066ed236a4c5df8b4b5e366020fe2954b7a6915a", "066ed236a4c5df8b4b5e366020fe2954b7a6915a" ]
[ "ablation-study/1.E_align_z.py", "ablation-study/7.E_align_x_AT1.py" ]
[ "import sys\nsys.path.append(\"..\")\nimport os\nimport math\nimport torch\nimport torchvision\nimport model.E.Ablation_Study.E_Blur_Z as BE\nfrom model.utils.custom_adam import LREQAdam\nimport metric.pytorch_ssim as pytorch_ssim\nimport lpips\nimport numpy as np\nimport tensorboardX\nimport argparse\nfrom model.stylegan1.net import Generator, Mapping #StyleGANv1\nfrom training_utils import *\n\ndef train(tensor_writer = None, args = None):\n type = args.mtype\n\n model_path = args.checkpoint_dir_GAN\n if type == 1: # StyleGAN1\n #model_path = './checkpoint/stylegan_v1/ffhq1024/'\n Gs = Generator(startf=args.start_features, maxf=512, layer_count=int(math.log(args.img_size,2)-1), latent_size=512, channels=3)\n Gs.load_state_dict(torch.load(model_path+'Gs_dict.pth'))\n\n Gm = Mapping(num_layers=int(math.log(args.img_size,2)-1)*2, mapping_layers=8, latent_size=512, dlatent_size=512, mapping_fmaps=512) #num_layers: 14->256 / 16->512 / 18->1024\n Gm.load_state_dict(torch.load(model_path+'Gm_dict.pth'))\n\n Gm.buffer1 = torch.load(model_path+'./center_tensor.pt')\n const_ = Gs.const\n const1 = const_.repeat(args.batch_size,1,1,1).detach().clone().cuda()\n layer_num = int(math.log(args.img_size,2)-1)*2 # 14->256 / 16 -> 512 / 18->1024 \n layer_idx = torch.arange(layer_num)[np.newaxis, :, np.newaxis] # shape:[1,18,1], layer_idx = [0,1,2,3,4,5,6。。。,17]\n ones = torch.ones(layer_idx.shape, dtype=torch.float32) # shape:[1,18,1], ones = [1,1,1,1,1,1,1,1]\n coefs = torch.where(layer_idx < layer_num//2, 0.7 * ones, ones) # 18个变量前8个裁剪比例truncation_psi [0.7,0.7,...,1,1,1]\n coefs = coefs.cuda()\n\n Gs.cuda()\n Gm.cuda()\n\n E = BE.BE(startf=args.start_features, maxf=512, layer_count=int(math.log(args.img_size,2)-1), latent_size=512, channels=3)\n\n else:\n print('error')\n return\n\n if args.checkpoint_dir_E != None:\n E.load_state_dict(torch.load(args.checkpoint_dir_E))\n E.cuda()\n writer = tensor_writer\n\n E_optimizer = LREQAdam([{'params': E.parameters()},], lr=args.lr, betas=(args.beta_1, 0.99), weight_decay=0) \n loss_lpips = lpips.LPIPS(net='vgg').to('cuda')\n\n batch_size = args.batch_size\n it_d = 0\n for iteration in range(0,args.iterations):\n set_seed(iteration%30000)\n z_c1 = torch.randn(batch_size, args.z_dim).cuda() #[n, 512]\n\n if type == 1:\n w1 = Gm(z_c1,coefs_m=coefs) #[batch_size,18,512]\n imgs1 = Gs.forward(w1,int(math.log(args.img_size,2)-2)) # 7->512 / 6->256\n z_c2, _ = E(imgs1)\n z_c2 = z_c2.squeeze(-1).squeeze(-1)\n w2 = Gm(z_c2,coefs_m=coefs)\n imgs2 = Gs.forward(w2,int(math.log(args.img_size,2)-2))\n else:\n print('model type error')\n return\n\n E_optimizer.zero_grad()\n\n#loss Images\n loss_imgs, loss_imgs_info = space_loss(imgs1,imgs2,lpips_model=loss_lpips)\n\n loss_msiv = loss_imgs\n E_optimizer.zero_grad()\n loss_msiv.backward(retain_graph=True)\n E_optimizer.step()\n\n#Latent-Vectors\n\n## w\n #loss_w, loss_w_info = space_loss(w1,w2,image_space = False)\n## c\n loss_c, loss_c_info = space_loss(z_c1,z_c2,image_space = False)\n\n loss_mslv = loss_c*0.01\n E_optimizer.zero_grad()\n loss_mslv.backward()\n E_optimizer.step()\n\n\n print('ep_%d_iter_%d'%(iteration//30000,iteration%30000))\n print('[loss_imgs_mse[img,img_mean,img_std], loss_imgs_kl, loss_imgs_cosine, loss_imgs_ssim, loss_imgs_lpips]')\n print('---------ImageSpace--------')\n print('loss_imgs_info: %s'%loss_imgs_info)\n print('---------LatentSpace--------')\n print('loss_c_info: %s'%loss_c_info)\n\n it_d += 1\n writer.add_scalar('loss_imgs_mse', loss_imgs_info[0][0], global_step=it_d)\n writer.add_scalar('loss_imgs_mse_mean', loss_imgs_info[0][1], global_step=it_d)\n writer.add_scalar('loss_imgs_mse_std', loss_imgs_info[0][2], global_step=it_d)\n writer.add_scalar('loss_imgs_kl', loss_imgs_info[1], global_step=it_d)\n writer.add_scalar('loss_imgs_cosine', loss_imgs_info[2], global_step=it_d)\n writer.add_scalar('loss_imgs_ssim', loss_imgs_info[3], global_step=it_d)\n writer.add_scalar('loss_imgs_lpips', loss_imgs_info[4], global_step=it_d)\n\n writer.add_scalar('loss_c_mse', loss_c_info[0][0], global_step=it_d)\n writer.add_scalar('loss_c_mse_mean', loss_c_info[0][1], global_step=it_d)\n writer.add_scalar('loss_c_mse_std', loss_c_info[0][2], global_step=it_d)\n writer.add_scalar('loss_c_kl', loss_c_info[1], global_step=it_d)\n writer.add_scalar('loss_c_cosine', loss_c_info[2], global_step=it_d)\n writer.add_scalar('loss_c_ssim', loss_c_info[3], global_step=it_d)\n writer.add_scalar('loss_c_lpips', loss_c_info[4], global_step=it_d)\n\n writer.add_scalars('Latent Space C', {'loss_c_mse':loss_c_info[0][0],'loss_c_mse_mean':loss_c_info[0][1],'loss_c_mse_std':loss_c_info[0][2],'loss_c_kl':loss_c_info[1],'loss_c_cosine':loss_c_info[2]}, global_step=it_d)\n\n\n if iteration % 100 == 0:\n n_row = batch_size\n test_img = torch.cat((imgs1[:n_row],imgs2[:n_row]))*0.5+0.5\n torchvision.utils.save_image(test_img, resultPath1_1+'/ep%d_iter%d.jpg'%(iteration//30000,iteration%30000),nrow=n_row) # nrow=3\n with open(resultPath+'/Loss.txt', 'a+') as f:\n print('i_'+str(iteration),file=f)\n print('[loss_imgs_mse[img,img_mean,img_std], loss_imgs_kl, loss_imgs_cosine, loss_imgs_ssim, loss_imgs_lpips]',file=f)\n print('---------ImageSpace--------',file=f)\n print('loss_imgs_info: %s'%loss_imgs_info,file=f)\n print('---------LatentSpace--------',file=f)\n print('loss_c_info: %s'%loss_c_info,file=f)\n if iteration % 5000 == 0:\n torch.save(E.state_dict(), resultPath1_2+'/E_model_ep%d_iter%d.pth'%(iteration//30000,iteration%30000))\n #torch.save(Gm.buffer1,resultPath1_2+'/center_tensor_iter%d.pt'%iteration)\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description='the training args')\n parser.add_argument('--iterations', type=int, default=60001) # epoch = iterations//30000\n parser.add_argument('--lr', type=float, default=0.0015)\n parser.add_argument('--beta_1', type=float, default=0.0)\n parser.add_argument('--batch_size', type=int, default=2)\n parser.add_argument('--experiment_dir', default=None) #None\n parser.add_argument('--checkpoint_dir_GAN', default='../checkpoint/stylegan_v1/ffhq1024/') #None ./checkpoint/stylegan_v1/ffhq1024/ or ./checkpoint/stylegan_v2/stylegan2_ffhq1024.pth or ./checkpoint/biggan/256/G-256.pt\n parser.add_argument('--config_dir', default='./checkpoint/biggan/256/biggan-deep-256-config.json') # BigGAN needs it\n parser.add_argument('--checkpoint_dir_E', default=None)\n parser.add_argument('--img_size',type=int, default=1024)\n parser.add_argument('--img_channels', type=int, default=3)# RGB:3 ,L:1\n parser.add_argument('--z_dim', type=int, default=512) # PGGAN , StyleGANs are 512. BIGGAN is 128\n parser.add_argument('--mtype', type=int, default=1) # StyleGANv1=1, StyleGANv2=2, PGGAN=3, BigGAN=4\n parser.add_argument('--start_features', type=int, default=16) # 16->1024 32->512 64->256\n args = parser.parse_args()\n\n if not os.path.exists('./result'): os.mkdir('./result')\n resultPath = args.experiment_dir\n if resultPath == None:\n resultPath = \"./result/StyleGANv1-AlationStudy-Z\"\n if not os.path.exists(resultPath): os.mkdir(resultPath)\n\n resultPath1_1 = resultPath+\"/imgs\"\n if not os.path.exists(resultPath1_1): os.mkdir(resultPath1_1)\n\n resultPath1_2 = resultPath+\"/models\"\n if not os.path.exists(resultPath1_2): os.mkdir(resultPath1_2)\n\n writer_path = os.path.join(resultPath, './summaries')\n if not os.path.exists(writer_path): os.mkdir(writer_path)\n writer = tensorboardX.SummaryWriter(writer_path)\n\n use_gpu = True\n device = torch.device(\"cuda\" if use_gpu else \"cpu\")\n\n train(tensor_writer=writer, args = args)\n", "import sys\nsys.path.append(\"..\")\nimport os\nimport math\nimport torch\nimport torchvision\nimport model.E.E_Blur as BE\nfrom model.utils.custom_adam import LREQAdam\nimport metric.pytorch_ssim as pytorch_ssim\nimport lpips\nimport numpy as np\nimport tensorboardX\nimport argparse\nfrom model.stylegan1.net import Generator, Mapping #StyleGANv1\nfrom training_utils import *\n\ndef train(tensor_writer = None, args = None):\n type = args.mtype\n\n model_path = args.checkpoint_dir_GAN\n if type == 1: # StyleGAN1\n #model_path = './checkpoint/stylegan_v1/ffhq1024/'\n Gs = Generator(startf=args.start_features, maxf=512, layer_count=int(math.log(args.img_size,2)-1), latent_size=512, channels=3)\n Gs.load_state_dict(torch.load(model_path+'Gs_dict.pth'))\n\n Gm = Mapping(num_layers=int(math.log(args.img_size,2)-1)*2, mapping_layers=8, latent_size=512, dlatent_size=512, mapping_fmaps=512) #num_layers: 14->256 / 16->512 / 18->1024\n Gm.load_state_dict(torch.load(model_path+'Gm_dict.pth'))\n\n Gm.buffer1 = torch.load(model_path+'./center_tensor.pt')\n const_ = Gs.const\n const1 = const_.repeat(args.batch_size,1,1,1).detach().clone().cuda()\n layer_num = int(math.log(args.img_size,2)-1)*2 # 14->256 / 16 -> 512 / 18->1024 \n layer_idx = torch.arange(layer_num)[np.newaxis, :, np.newaxis] # shape:[1,18,1], layer_idx = [0,1,2,3,4,5,6。。。,17]\n ones = torch.ones(layer_idx.shape, dtype=torch.float32) # shape:[1,18,1], ones = [1,1,1,1,1,1,1,1]\n coefs = torch.where(layer_idx < layer_num//2, 0.7 * ones, ones) # 18个变量前8个裁剪比例truncation_psi [0.7,0.7,...,1,1,1]\n\n Gs.cuda()\n Gm.eval()\n\n E = BE.BE(startf=args.start_features, maxf=512, layer_count=int(math.log(args.img_size,2)-1), latent_size=512, channels=3)\n\n else:\n print('error')\n return\n\n if args.checkpoint_dir_E != None:\n E.load_state_dict(torch.load(args.checkpoint_dir_E))\n E.cuda()\n writer = tensor_writer\n\n E_optimizer = LREQAdam([{'params': E.parameters()},], lr=args.lr, betas=(args.beta_1, 0.99), weight_decay=0) \n loss_lpips = lpips.LPIPS(net='vgg').to('cuda')\n\n batch_size = args.batch_size\n it_d = 0\n for iteration in range(0,args.iterations):\n set_seed(iteration%30000)\n z = torch.randn(batch_size, args.z_dim) #[32, 512]\n\n if type == 1:\n with torch.no_grad(): #这里需要生成图片和变量\n w1 = Gm(z,coefs_m=coefs).cuda() #[batch_size,18,512]\n imgs1 = Gs.forward(w1,int(math.log(args.img_size,2)-2)) # 7->512 / 6->256\n const2,w2 = E(imgs1)\n imgs2 = Gs.forward(w2,int(math.log(args.img_size,2)-2))\n else:\n print('model type error')\n return\n\n E_optimizer.zero_grad()\n\n#loss Images\n loss_imgs, loss_imgs_info = space_loss(imgs1,imgs2,lpips_model=loss_lpips)\n E_optimizer.zero_grad()\n loss_imgs.backward(retain_graph=True)\n E_optimizer.step()\n\n#loss AT1\n imgs_medium_1 = imgs1[:,:,:,imgs1.shape[3]//8:-imgs1.shape[3]//8]\n imgs_medium_2 = imgs2[:,:,:,imgs2.shape[3]//8:-imgs2.shape[3]//8]\n loss_medium, loss_medium_info = space_loss(imgs_medium_1,imgs_medium_2,lpips_model=loss_lpips)\n\n loss_medium = 5*loss_medium \n E_optimizer.zero_grad()\n loss_medium.backward(retain_graph=True)\n E_optimizer.step()\n\n#Latent-Vectors\n\n## w\n loss_w, loss_w_info = space_loss(w1,w2,image_space = False)\n\n## c\n loss_c, loss_c_info = space_loss(const1,const2,image_space = False)\n\n loss_mslv = (loss_w + loss_c)*0.01\n E_optimizer.zero_grad()\n loss_mslv.backward()\n E_optimizer.step()\n\n\n print('ep_%d_iter_%d'%(iteration//30000,iteration%30000))\n print('[loss_imgs_mse[img,img_mean,img_std], loss_imgs_kl, loss_imgs_cosine, loss_imgs_ssim, loss_imgs_lpips]')\n print('---------ImageSpace--------')\n print('loss_medium_info: %s'%loss_medium_info)\n print('loss_imgs_info: %s'%loss_imgs_info)\n print('---------LatentSpace--------')\n print('loss_w_info: %s'%loss_w_info)\n print('loss_c_info: %s'%loss_c_info)\n\n\n it_d += 1\n\n writer.add_scalar('loss_medium_mse', loss_medium_info[0][0], global_step=it_d)\n writer.add_scalar('loss_medium_mse_mean', loss_medium_info[0][1], global_step=it_d)\n writer.add_scalar('loss_medium_mse_std', loss_medium_info[0][2], global_step=it_d)\n writer.add_scalar('loss_medium_kl', loss_medium_info[1], global_step=it_d)\n writer.add_scalar('loss_medium_cosine', loss_medium_info[2], global_step=it_d)\n writer.add_scalar('loss_medium_ssim', loss_medium_info[3], global_step=it_d)\n writer.add_scalar('loss_medium_lpips', loss_medium_info[4], global_step=it_d)\n\n writer.add_scalar('loss_imgs_mse', loss_imgs_info[0][0], global_step=it_d)\n writer.add_scalar('loss_imgs_mse_mean', loss_imgs_info[0][1], global_step=it_d)\n writer.add_scalar('loss_imgs_mse_std', loss_imgs_info[0][2], global_step=it_d)\n writer.add_scalar('loss_imgs_kl', loss_imgs_info[1], global_step=it_d)\n writer.add_scalar('loss_imgs_cosine', loss_imgs_info[2], global_step=it_d)\n writer.add_scalar('loss_imgs_ssim', loss_imgs_info[3], global_step=it_d)\n writer.add_scalar('loss_imgs_lpips', loss_imgs_info[4], global_step=it_d)\n\n writer.add_scalar('loss_w_mse', loss_w_info[0][0], global_step=it_d)\n writer.add_scalar('loss_w_mse_mean', loss_w_info[0][1], global_step=it_d)\n writer.add_scalar('loss_w_mse_std', loss_w_info[0][2], global_step=it_d)\n writer.add_scalar('loss_w_kl', loss_w_info[1], global_step=it_d)\n writer.add_scalar('loss_w_cosine', loss_w_info[2], global_step=it_d)\n writer.add_scalar('loss_w_ssim', loss_w_info[3], global_step=it_d)\n writer.add_scalar('loss_w_lpips', loss_w_info[4], global_step=it_d)\n\n writer.add_scalar('loss_c_mse', loss_c_info[0][0], global_step=it_d)\n writer.add_scalar('loss_c_mse_mean', loss_c_info[0][1], global_step=it_d)\n writer.add_scalar('loss_c_mse_std', loss_c_info[0][2], global_step=it_d)\n writer.add_scalar('loss_c_kl', loss_c_info[1], global_step=it_d)\n writer.add_scalar('loss_c_cosine', loss_c_info[2], global_step=it_d)\n writer.add_scalar('loss_c_ssim', loss_c_info[3], global_step=it_d)\n writer.add_scalar('loss_c_lpips', loss_c_info[4], global_step=it_d)\n\n writer.add_scalars('Latent Space W', {'loss_w_mse':loss_w_info[0][0],'loss_w_mse_mean':loss_w_info[0][1],'loss_w_mse_std':loss_w_info[0][2],'loss_w_kl':loss_w_info[1],'loss_w_cosine':loss_w_info[2]}, global_step=it_d)\n writer.add_scalars('Latent Space C', {'loss_c_mse':loss_c_info[0][0],'loss_c_mse_mean':loss_c_info[0][1],'loss_c_mse_std':loss_c_info[0][2],'loss_c_kl':loss_c_info[1],'loss_c_cosine':loss_c_info[2]}, global_step=it_d)\n\n\n if iteration % 100 == 0:\n n_row = batch_size\n test_img = torch.cat((imgs1[:n_row],imgs2[:n_row]))*0.5+0.5\n torchvision.utils.save_image(test_img, resultPath1_1+'/ep%d_iter%d.jpg'%(iteration//30000,iteration%30000),nrow=n_row) # nrow=3\n with open(resultPath+'/Loss.txt', 'a+') as f:\n print('i_'+str(iteration),file=f)\n print('[loss_imgs_mse[img,img_mean,img_std], loss_imgs_kl, loss_imgs_cosine, loss_imgs_ssim, loss_imgs_lpips]',file=f)\n print('---------ImageSpace--------',file=f)\n print('loss_medium_info: %s'%loss_medium_info,file=f)\n print('loss_imgs_info: %s'%loss_imgs_info,file=f)\n print('---------LatentSpace--------',file=f)\n print('loss_w_info: %s'%loss_w_info,file=f)\n print('loss_c_info: %s'%loss_c_info,file=f)\n if iteration % 5000 == 0:\n torch.save(E.state_dict(), resultPath1_2+'/E_model_ep%d_iter%d.pth'%(iteration//30000,iteration%30000))\n #torch.save(Gm.buffer1,resultPath1_2+'/center_tensor_iter%d.pt'%iteration)\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description='the training args')\n parser.add_argument('--iterations', type=int, default=60001) # epoch = iterations//30000\n parser.add_argument('--lr', type=float, default=0.0015)\n parser.add_argument('--beta_1', type=float, default=0.0)\n parser.add_argument('--batch_size', type=int, default=2)\n parser.add_argument('--experiment_dir', default=None) #None\n parser.add_argument('--checkpoint_dir_GAN', default='../checkpoint/stylegan_v1/ffhq1024/') #None ./checkpoint/stylegan_v1/ffhq1024/ or ./checkpoint/stylegan_v2/stylegan2_ffhq1024.pth or ./checkpoint/biggan/256/G-256.pt\n parser.add_argument('--config_dir', default='./checkpoint/biggan/256/biggan-deep-256-config.json') # BigGAN needs it\n parser.add_argument('--checkpoint_dir_E', default=None)\n parser.add_argument('--img_size',type=int, default=1024)\n parser.add_argument('--img_channels', type=int, default=3)# RGB:3 ,L:1\n parser.add_argument('--z_dim', type=int, default=512) # PGGAN , StyleGANs are 512. BIGGAN is 128\n parser.add_argument('--mtype', type=int, default=1) # StyleGANv1=1, StyleGANv2=2, PGGAN=3, BigGAN=4\n parser.add_argument('--start_features', type=int, default=16) # 16->1024 32->512 64->256\n args = parser.parse_args()\n\n if not os.path.exists('./result'): os.mkdir('./result')\n resultPath = args.experiment_dir\n if resultPath == None:\n resultPath = \"./result/StyleGANv1-AlationStudy-x-x1\"\n if not os.path.exists(resultPath): os.mkdir(resultPath)\n\n resultPath1_1 = resultPath+\"/imgs\"\n if not os.path.exists(resultPath1_1): os.mkdir(resultPath1_1)\n\n resultPath1_2 = resultPath+\"/models\"\n if not os.path.exists(resultPath1_2): os.mkdir(resultPath1_2)\n\n writer_path = os.path.join(resultPath, './summaries')\n if not os.path.exists(writer_path): os.mkdir(writer_path)\n writer = tensorboardX.SummaryWriter(writer_path)\n\n use_gpu = True\n device = torch.device(\"cuda\" if use_gpu else \"cpu\")\n\n train(tensor_writer=writer, args = args)\n" ]
[ [ "torch.ones", "torch.load", "torch.cat", "torch.randn", "torch.where", "torch.arange", "torch.device" ], [ "torch.ones", "torch.load", "torch.cat", "torch.randn", "torch.no_grad", "torch.where", "torch.arange", "torch.device" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jcharlong/scipy
[ "153467a9174b0c6f4b90ffeed5871e5018658108", "153467a9174b0c6f4b90ffeed5871e5018658108", "153467a9174b0c6f4b90ffeed5871e5018658108", "153467a9174b0c6f4b90ffeed5871e5018658108", "153467a9174b0c6f4b90ffeed5871e5018658108" ]
[ "benchmarks/benchmarks/linalg.py", "scipy/stats/tests/test_mstats_basic.py", "scipy/optimize/tests/test_optimize.py", "scipy/fft/__init__.py", "scipy/linalg/tests/test_misc.py" ]
[ "import math\n\nimport numpy.linalg as nl\n\nimport numpy as np\nfrom numpy.testing import assert_\nfrom numpy.random import rand\n\nfrom .common import Benchmark, safe_import\n\nwith safe_import():\n import scipy.linalg as sl\n\n\ndef random(size):\n return rand(*size)\n\n\nclass Bench(Benchmark):\n params = [\n [20, 100, 500, 1000],\n ['contig', 'nocont'],\n ['numpy', 'scipy']\n ]\n param_names = ['size', 'contiguous', 'module']\n\n def __init__(self):\n # likely not useful to benchmark svd for large sizes\n self.time_svd.__func__.params = [[20, 100, 500]] + self.params[1:]\n\n def setup(self, size, contig, module):\n if module == 'numpy' and size >= 200:\n # skip: slow, and not useful to benchmark numpy\n raise NotImplementedError()\n\n a = random([size, size])\n # larger diagonal ensures non-singularity:\n for i in range(size):\n a[i, i] = 10*(.1+a[i, i])\n b = random([size])\n\n if contig != 'contig':\n a = a[-1::-1, -1::-1] # turn into a non-contiguous array\n assert_(not a.flags['CONTIGUOUS'])\n\n self.a = a\n self.b = b\n\n def time_solve(self, size, contig, module):\n if module == 'numpy':\n nl.solve(self.a, self.b)\n else:\n sl.solve(self.a, self.b)\n\n def time_solve_triangular(self, size, contig, module):\n # treats self.a as a lower-triangular matrix by ignoring the strictly\n # upper-triangular part\n if module == 'numpy':\n pass\n else:\n sl.solve_triangular(self.a, self.b, lower=True)\n\n def time_inv(self, size, contig, module):\n if module == 'numpy':\n nl.inv(self.a)\n else:\n sl.inv(self.a)\n\n def time_det(self, size, contig, module):\n if module == 'numpy':\n nl.det(self.a)\n else:\n sl.det(self.a)\n\n def time_eigvals(self, size, contig, module):\n if module == 'numpy':\n nl.eigvals(self.a)\n else:\n sl.eigvals(self.a)\n\n def time_svd(self, size, contig, module):\n if module == 'numpy':\n nl.svd(self.a)\n else:\n sl.svd(self.a)\n\n # Retain old benchmark results (remove this if changing the benchmark)\n time_det.version = \"87e530ee50eb6b6c06c7a8abe51c2168e133d5cbd486f4c1c2b9cedc5a078325\"\n time_eigvals.version = \"9d68d3a6b473df9bdda3d3fd25c7f9aeea7d5cee869eec730fb2a2bcd1dfb907\"\n time_inv.version = \"20beee193c84a5713da9749246a7c40ef21590186c35ed00a4fe854cce9e153b\"\n time_solve.version = \"1fe788070f1c9132cbe78a47fdb4cce58266427fc636d2aa9450e3c7d92c644c\"\n time_svd.version = \"0ccbda456d096e459d4a6eefc6c674a815179e215f83931a81cfa8c18e39d6e3\"\n\n\nclass Norm(Benchmark):\n params = [\n [(20, 20), (100, 100), (1000, 1000), (20, 1000), (1000, 20)],\n ['contig', 'nocont'],\n ['numpy', 'scipy']\n ]\n param_names = ['shape', 'contiguous', 'module']\n\n def setup(self, shape, contig, module):\n a = np.random.randn(*shape)\n if contig != 'contig':\n a = a[-1::-1,-1::-1] # turn into a non-contiguous array\n assert_(not a.flags['CONTIGUOUS'])\n self.a = a\n\n def time_1_norm(self, size, contig, module):\n if module == 'numpy':\n nl.norm(self.a, ord=1)\n else:\n sl.norm(self.a, ord=1)\n\n def time_inf_norm(self, size, contig, module):\n if module == 'numpy':\n nl.norm(self.a, ord=np.inf)\n else:\n sl.norm(self.a, ord=np.inf)\n\n def time_frobenius_norm(self, size, contig, module):\n if module == 'numpy':\n nl.norm(self.a)\n else:\n sl.norm(self.a)\n\n\nclass Lstsq(Benchmark):\n \"\"\"\n Test the speed of four least-squares solvers on not full rank matrices.\n Also check the difference in the solutions.\n\n The matrix has the size ``(m, 2/3*m)``; the rank is ``1/2 * m``.\n Matrix values are random in the range (-5, 5), the same is for the right\n hand side. The complex matrix is the sum of real and imaginary matrices.\n \"\"\"\n\n param_names = ['dtype', 'size', 'driver']\n params = [\n [np.float64, np.complex128],\n [10, 100, 1000],\n ['gelss', 'gelsy', 'gelsd', 'numpy'],\n ]\n\n def setup(self, dtype, size, lapack_driver):\n if lapack_driver == 'numpy' and size >= 200:\n # skip: slow, and not useful to benchmark numpy\n raise NotImplementedError()\n\n rng = np.random.default_rng(1234)\n n = math.ceil(2./3. * size)\n k = math.ceil(1./2. * size)\n m = size\n\n if dtype is np.complex128:\n A = ((10 * rng.random((m,k)) - 5) +\n 1j*(10 * rng.random((m,k)) - 5))\n temp = ((10 * rng.random((k,n)) - 5) +\n 1j*(10 * rng.random((k,n)) - 5))\n b = ((10 * rng.random((m,1)) - 5) +\n 1j*(10 * rng.random((m,1)) - 5))\n else:\n A = (10 * rng.random((m,k)) - 5)\n temp = 10 * rng.random((k,n)) - 5\n b = 10 * rng.random((m,1)) - 5\n\n self.A = A.dot(temp)\n self.b = b\n\n def time_lstsq(self, dtype, size, lapack_driver):\n if lapack_driver == 'numpy':\n np.linalg.lstsq(self.A, self.b,\n rcond=np.finfo(self.A.dtype).eps * 100)\n else:\n sl.lstsq(self.A, self.b, cond=None, overwrite_a=False,\n overwrite_b=False, check_finite=False,\n lapack_driver=lapack_driver)\n\n # Retain old benchmark results (remove this if changing the benchmark)\n time_lstsq.version = \"15ee0be14a0a597c7d1c9a3dab2c39e15c8ac623484410ffefa406bf6b596ebe\"\n\n\nclass SpecialMatrices(Benchmark):\n param_names = ['size']\n params = [[4, 128]]\n\n def setup(self, size):\n self.x = np.arange(1, size + 1).astype(float)\n self.small_blocks = [np.ones([2, 2])] * (size//2)\n self.big_blocks = [np.ones([size//2, size//2]),\n np.ones([size//2, size//2])]\n\n def time_block_diag_small(self, size):\n sl.block_diag(*self.small_blocks)\n\n def time_block_diag_big(self, size):\n sl.block_diag(*self.big_blocks)\n\n def time_circulant(self, size):\n sl.circulant(self.x)\n\n def time_companion(self, size):\n sl.companion(self.x)\n\n def time_dft(self, size):\n sl.dft(size)\n\n def time_hadamard(self, size):\n sl.hadamard(size)\n\n def time_hankel(self, size):\n sl.hankel(self.x)\n\n def time_helmert(self, size):\n sl.helmert(size)\n\n def time_hilbert(self, size):\n sl.hilbert(size)\n\n def time_invhilbert(self, size):\n sl.invhilbert(size)\n\n def time_leslie(self, size):\n sl.leslie(self.x, self.x[1:])\n\n def time_pascal(self, size):\n sl.pascal(size)\n\n def time_invpascal(self, size):\n sl.invpascal(size)\n\n def time_toeplitz(self, size):\n sl.toeplitz(self.x)\n\n def time_tri(self, size):\n sl.tri(size)\n\n\nclass GetFuncs(Benchmark):\n def setup(self):\n self.x = np.eye(1)\n\n def time_get_blas_funcs(self):\n sl.blas.get_blas_funcs('gemm', dtype=float)\n\n def time_get_blas_funcs_2(self):\n sl.blas.get_blas_funcs(('gemm', 'axpy'), (self.x, self.x))\n\n def time_small_cholesky(self):\n sl.cholesky(self.x)\n", "\"\"\"\nTests for the stats.mstats module (support for masked arrays)\n\"\"\"\nimport warnings\nimport platform\n\nimport numpy as np\nfrom numpy import nan\nimport numpy.ma as ma\nfrom numpy.ma import masked, nomask\n\nimport scipy.stats.mstats as mstats\nfrom scipy import stats\nfrom .common_tests import check_named_results\nimport pytest\nfrom pytest import raises as assert_raises\nfrom numpy.ma.testutils import (assert_equal, assert_almost_equal,\n assert_array_almost_equal, assert_array_almost_equal_nulp, assert_,\n assert_allclose, assert_array_equal)\nfrom numpy.testing import suppress_warnings\nfrom scipy.stats import mstats_basic\n\nclass TestMquantiles:\n def test_mquantiles_limit_keyword(self):\n # Regression test for Trac ticket #867\n data = np.array([[6., 7., 1.],\n [47., 15., 2.],\n [49., 36., 3.],\n [15., 39., 4.],\n [42., 40., -999.],\n [41., 41., -999.],\n [7., -999., -999.],\n [39., -999., -999.],\n [43., -999., -999.],\n [40., -999., -999.],\n [36., -999., -999.]])\n desired = [[19.2, 14.6, 1.45],\n [40.0, 37.5, 2.5],\n [42.8, 40.05, 3.55]]\n quants = mstats.mquantiles(data, axis=0, limit=(0, 50))\n assert_almost_equal(quants, desired)\n\n\ndef check_equal_gmean(array_like, desired, axis=None, dtype=None, rtol=1e-7):\n # Note this doesn't test when axis is not specified\n x = mstats.gmean(array_like, axis=axis, dtype=dtype)\n assert_allclose(x, desired, rtol=rtol)\n assert_equal(x.dtype, dtype)\n\ndef check_equal_hmean(array_like, desired, axis=None, dtype=None, rtol=1e-7):\n x = stats.hmean(array_like, axis=axis, dtype=dtype)\n assert_allclose(x, desired, rtol=rtol)\n assert_equal(x.dtype, dtype)\n\n\nclass TestGeoMean:\n def test_1d(self):\n a = [1, 2, 3, 4]\n desired = np.power(1*2*3*4, 1./4.)\n check_equal_gmean(a, desired, rtol=1e-14)\n\n def test_1d_ma(self):\n # Test a 1d masked array\n a = ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])\n desired = 45.2872868812\n check_equal_gmean(a, desired)\n\n a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1])\n desired = np.power(1*2*3, 1./3.)\n check_equal_gmean(a, desired, rtol=1e-14)\n\n def test_1d_ma_value(self):\n # Test a 1d masked array with a masked value\n a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100], mask=[0, 0, 0, 0, 0, 0, 0, 0, 0, 1])\n desired = 41.4716627439\n check_equal_gmean(a, desired)\n\n def test_1d_ma0(self):\n # Test a 1d masked array with zero element\n a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 0])\n desired = 41.4716627439\n with np.errstate(divide='ignore'):\n check_equal_gmean(a, desired)\n\n def test_1d_ma_inf(self):\n # Test a 1d masked array with negative element\n a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, -1])\n desired = 41.4716627439\n with np.errstate(invalid='ignore'):\n check_equal_gmean(a, desired)\n\n @pytest.mark.skipif(not hasattr(np, 'float96'), reason='cannot find float96 so skipping')\n def test_1d_float96(self):\n a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1])\n desired_dt = np.power(1*2*3, 1./3.).astype(np.float96)\n check_equal_gmean(a, desired_dt, dtype=np.float96, rtol=1e-14)\n\n def test_2d_ma(self):\n a = ma.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]],\n mask=[[0, 0, 0, 0], [1, 0, 0, 1], [0, 1, 1, 0]])\n desired = np.array([1, 2, 3, 4])\n check_equal_gmean(a, desired, axis=0, rtol=1e-14)\n\n desired = ma.array([np.power(1*2*3*4, 1./4.),\n np.power(2*3, 1./2.),\n np.power(1*4, 1./2.)])\n check_equal_gmean(a, desired, axis=-1, rtol=1e-14)\n\n # Test a 2d masked array\n a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]\n desired = 52.8885199\n check_equal_gmean(np.ma.array(a), desired)\n\n\nclass TestHarMean:\n def test_1d(self):\n a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1])\n desired = 3. / (1./1 + 1./2 + 1./3)\n check_equal_hmean(a, desired, rtol=1e-14)\n\n a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100])\n desired = 34.1417152147\n check_equal_hmean(a, desired)\n\n a = np.ma.array([10, 20, 30, 40, 50, 60, 70, 80, 90, 100],\n mask=[0, 0, 0, 0, 0, 0, 0, 0, 0, 1])\n desired = 31.8137186141\n check_equal_hmean(a, desired)\n\n @pytest.mark.skipif(not hasattr(np, 'float96'), reason='cannot find float96 so skipping')\n def test_1d_float96(self):\n a = ma.array([1, 2, 3, 4], mask=[0, 0, 0, 1])\n desired_dt = np.asarray(3. / (1./1 + 1./2 + 1./3), dtype=np.float96)\n check_equal_hmean(a, desired_dt, dtype=np.float96)\n\n def test_2d(self):\n a = ma.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]],\n mask=[[0, 0, 0, 0], [1, 0, 0, 1], [0, 1, 1, 0]])\n desired = ma.array([1, 2, 3, 4])\n check_equal_hmean(a, desired, axis=0, rtol=1e-14)\n\n desired = [4./(1/1.+1/2.+1/3.+1/4.), 2./(1/2.+1/3.), 2./(1/1.+1/4.)]\n check_equal_hmean(a, desired, axis=-1, rtol=1e-14)\n\n a = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]\n desired = 38.6696271841\n check_equal_hmean(np.ma.array(a), desired)\n\n\nclass TestRanking:\n def test_ranking(self):\n x = ma.array([0,1,1,1,2,3,4,5,5,6,])\n assert_almost_equal(mstats.rankdata(x),\n [1,3,3,3,5,6,7,8.5,8.5,10])\n x[[3,4]] = masked\n assert_almost_equal(mstats.rankdata(x),\n [1,2.5,2.5,0,0,4,5,6.5,6.5,8])\n assert_almost_equal(mstats.rankdata(x, use_missing=True),\n [1,2.5,2.5,4.5,4.5,4,5,6.5,6.5,8])\n x = ma.array([0,1,5,1,2,4,3,5,1,6,])\n assert_almost_equal(mstats.rankdata(x),\n [1,3,8.5,3,5,7,6,8.5,3,10])\n x = ma.array([[0,1,1,1,2], [3,4,5,5,6,]])\n assert_almost_equal(mstats.rankdata(x),\n [[1,3,3,3,5], [6,7,8.5,8.5,10]])\n assert_almost_equal(mstats.rankdata(x, axis=1),\n [[1,3,3,3,5], [1,2,3.5,3.5,5]])\n assert_almost_equal(mstats.rankdata(x,axis=0),\n [[1,1,1,1,1], [2,2,2,2,2,]])\n\n\nclass TestCorr:\n def test_pearsonr(self):\n # Tests some computations of Pearson's r\n x = ma.arange(10)\n with warnings.catch_warnings():\n # The tests in this context are edge cases, with perfect\n # correlation or anticorrelation, or totally masked data.\n # None of these should trigger a RuntimeWarning.\n warnings.simplefilter(\"error\", RuntimeWarning)\n\n assert_almost_equal(mstats.pearsonr(x, x)[0], 1.0)\n assert_almost_equal(mstats.pearsonr(x, x[::-1])[0], -1.0)\n\n x = ma.array(x, mask=True)\n pr = mstats.pearsonr(x, x)\n assert_(pr[0] is masked)\n assert_(pr[1] is masked)\n\n x1 = ma.array([-1.0, 0.0, 1.0])\n y1 = ma.array([0, 0, 3])\n r, p = mstats.pearsonr(x1, y1)\n assert_almost_equal(r, np.sqrt(3)/2)\n assert_almost_equal(p, 1.0/3)\n\n # (x2, y2) have the same unmasked data as (x1, y1).\n mask = [False, False, False, True]\n x2 = ma.array([-1.0, 0.0, 1.0, 99.0], mask=mask)\n y2 = ma.array([0, 0, 3, -1], mask=mask)\n r, p = mstats.pearsonr(x2, y2)\n assert_almost_equal(r, np.sqrt(3)/2)\n assert_almost_equal(p, 1.0/3)\n\n def test_pearsonr_misaligned_mask(self):\n mx = np.ma.masked_array([1, 2, 3, 4, 5, 6], mask=[0, 1, 0, 0, 0, 0])\n my = np.ma.masked_array([9, 8, 7, 6, 5, 9], mask=[0, 0, 1, 0, 0, 0])\n x = np.array([1, 4, 5, 6])\n y = np.array([9, 6, 5, 9])\n mr, mp = mstats.pearsonr(mx, my)\n r, p = stats.pearsonr(x, y)\n assert_equal(mr, r)\n assert_equal(mp, p)\n\n def test_spearmanr(self):\n # Tests some computations of Spearman's rho\n (x, y) = ([5.05,6.75,3.21,2.66], [1.65,2.64,2.64,6.95])\n assert_almost_equal(mstats.spearmanr(x,y)[0], -0.6324555)\n (x, y) = ([5.05,6.75,3.21,2.66,np.nan],[1.65,2.64,2.64,6.95,np.nan])\n (x, y) = (ma.fix_invalid(x), ma.fix_invalid(y))\n assert_almost_equal(mstats.spearmanr(x,y)[0], -0.6324555)\n\n x = [2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,\n 1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7]\n y = [22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,\n 0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4]\n assert_almost_equal(mstats.spearmanr(x,y)[0], 0.6887299)\n x = [2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,\n 1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7, np.nan]\n y = [22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,\n 0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4, np.nan]\n (x, y) = (ma.fix_invalid(x), ma.fix_invalid(y))\n assert_almost_equal(mstats.spearmanr(x,y)[0], 0.6887299)\n # Next test is to make sure calculation uses sufficient precision.\n # The denominator's value is ~n^3 and used to be represented as an\n # int. 2000**3 > 2**32 so these arrays would cause overflow on\n # some machines.\n x = list(range(2000))\n y = list(range(2000))\n y[0], y[9] = y[9], y[0]\n y[10], y[434] = y[434], y[10]\n y[435], y[1509] = y[1509], y[435]\n # rho = 1 - 6 * (2 * (9^2 + 424^2 + 1074^2))/(2000 * (2000^2 - 1))\n # = 1 - (1 / 500)\n # = 0.998\n assert_almost_equal(mstats.spearmanr(x,y)[0], 0.998)\n\n # test for namedtuple attributes\n res = mstats.spearmanr(x, y)\n attributes = ('correlation', 'pvalue')\n check_named_results(res, attributes, ma=True)\n\n def test_spearmanr_alternative(self):\n # check against R\n # options(digits=16)\n # cor.test(c(2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,\n # 1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7),\n # c(22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,\n # 0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4),\n # alternative='two.sided', method='spearman')\n x = [2.0, 47.4, 42.0, 10.8, 60.1, 1.7, 64.0, 63.1,\n 1.0, 1.4, 7.9, 0.3, 3.9, 0.3, 6.7]\n y = [22.6, 8.3, 44.4, 11.9, 24.6, 0.6, 5.7, 41.6,\n 0.0, 0.6, 6.7, 3.8, 1.0, 1.2, 1.4]\n\n r_exp = 0.6887298747763864 # from cor.test\n\n r, p = mstats.spearmanr(x, y)\n assert_allclose(r, r_exp)\n assert_allclose(p, 0.004519192910756)\n\n r, p = mstats.spearmanr(x, y, alternative='greater')\n assert_allclose(r, r_exp)\n assert_allclose(p, 0.002259596455378)\n\n r, p = mstats.spearmanr(x, y, alternative='less')\n assert_allclose(r, r_exp)\n assert_allclose(p, 0.9977404035446)\n\n # intuitive test (with obvious positive correlation)\n n = 100\n x = np.linspace(0, 5, n)\n y = 0.1*x + np.random.rand(n) # y is positively correlated w/ x\n\n stat1, p1 = mstats.spearmanr(x, y)\n\n stat2, p2 = mstats.spearmanr(x, y, alternative=\"greater\")\n assert_allclose(p2, p1 / 2) # positive correlation -> small p\n\n stat3, p3 = mstats.spearmanr(x, y, alternative=\"less\")\n assert_allclose(p3, 1 - p1 / 2) # positive correlation -> large p\n\n assert stat1 == stat2 == stat3\n\n with pytest.raises(ValueError, match=\"alternative must be 'less'...\"):\n mstats.spearmanr(x, y, alternative=\"ekki-ekki\")\n\n @pytest.mark.skipif(platform.machine() == 'ppc64le',\n reason=\"fails/crashes on ppc64le\")\n def test_kendalltau(self):\n # check case with with maximum disorder and p=1\n x = ma.array(np.array([9, 2, 5, 6]))\n y = ma.array(np.array([4, 7, 9, 11]))\n # Cross-check with exact result from R:\n # cor.test(x,y,method=\"kendall\",exact=1)\n expected = [0.0, 1.0]\n assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)\n\n # simple case without ties\n x = ma.array(np.arange(10))\n y = ma.array(np.arange(10))\n # Cross-check with exact result from R:\n # cor.test(x,y,method=\"kendall\",exact=1)\n expected = [1.0, 5.511463844797e-07]\n assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)\n\n # check exception in case of invalid method keyword\n assert_raises(ValueError, mstats.kendalltau, x, y, method='banana')\n\n # swap a couple of values\n b = y[1]\n y[1] = y[2]\n y[2] = b\n # Cross-check with exact result from R:\n # cor.test(x,y,method=\"kendall\",exact=1)\n expected = [0.9555555555555556, 5.511463844797e-06]\n assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)\n\n # swap a couple more\n b = y[5]\n y[5] = y[6]\n y[6] = b\n # Cross-check with exact result from R:\n # cor.test(x,y,method=\"kendall\",exact=1)\n expected = [0.9111111111111111, 2.976190476190e-05]\n assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)\n\n # same in opposite direction\n x = ma.array(np.arange(10))\n y = ma.array(np.arange(10)[::-1])\n # Cross-check with exact result from R:\n # cor.test(x,y,method=\"kendall\",exact=1)\n expected = [-1.0, 5.511463844797e-07]\n assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)\n\n # swap a couple of values\n b = y[1]\n y[1] = y[2]\n y[2] = b\n # Cross-check with exact result from R:\n # cor.test(x,y,method=\"kendall\",exact=1)\n expected = [-0.9555555555555556, 5.511463844797e-06]\n assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)\n\n # swap a couple more\n b = y[5]\n y[5] = y[6]\n y[6] = b\n # Cross-check with exact result from R:\n # cor.test(x,y,method=\"kendall\",exact=1)\n expected = [-0.9111111111111111, 2.976190476190e-05]\n assert_almost_equal(np.asarray(mstats.kendalltau(x, y)), expected)\n\n # Tests some computations of Kendall's tau\n x = ma.fix_invalid([5.05, 6.75, 3.21, 2.66, np.nan])\n y = ma.fix_invalid([1.65, 26.5, -5.93, 7.96, np.nan])\n z = ma.fix_invalid([1.65, 2.64, 2.64, 6.95, np.nan])\n assert_almost_equal(np.asarray(mstats.kendalltau(x, y)),\n [+0.3333333, 0.75])\n assert_almost_equal(np.asarray(mstats.kendalltau(x, y, method='asymptotic')),\n [+0.3333333, 0.4969059])\n assert_almost_equal(np.asarray(mstats.kendalltau(x, z)),\n [-0.5477226, 0.2785987])\n #\n x = ma.fix_invalid([0, 0, 0, 0, 20, 20, 0, 60, 0, 20,\n 10, 10, 0, 40, 0, 20, 0, 0, 0, 0, 0, np.nan])\n y = ma.fix_invalid([0, 80, 80, 80, 10, 33, 60, 0, 67, 27,\n 25, 80, 80, 80, 80, 80, 80, 0, 10, 45, np.nan, 0])\n result = mstats.kendalltau(x, y)\n assert_almost_equal(np.asarray(result), [-0.1585188, 0.4128009])\n\n # test for namedtuple attributes\n attributes = ('correlation', 'pvalue')\n check_named_results(result, attributes, ma=True)\n\n @pytest.mark.skipif(platform.machine() == 'ppc64le',\n reason=\"fails/crashes on ppc64le\")\n @pytest.mark.slow\n def test_kendalltau_large(self):\n # make sure internal variable use correct precision with\n # larger arrays\n x = np.arange(2000, dtype=float)\n x = ma.masked_greater(x, 1995)\n y = np.arange(2000, dtype=float)\n y = np.concatenate((y[1000:], y[:1000]))\n assert_(np.isfinite(mstats.kendalltau(x, y)[1]))\n\n\n def test_kendalltau_seasonal(self):\n # Tests the seasonal Kendall tau.\n x = [[nan, nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1],\n [4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3],\n [3, 2, 5, 6, 18, 4, 9, 1, 1, nan, 1, 1, nan],\n [nan, 6, 11, 4, 17, nan, 6, 1, 1, 2, 5, 1, 1]]\n x = ma.fix_invalid(x).T\n output = mstats.kendalltau_seasonal(x)\n assert_almost_equal(output['global p-value (indep)'], 0.008, 3)\n assert_almost_equal(output['seasonal p-value'].round(2),\n [0.18,0.53,0.20,0.04])\n\n def test_kendall_p_exact_medium(self):\n # Test for the exact method with medium samples (some n >= 171)\n # expected values generated using SymPy\n expectations = {(100, 2393): 0.62822615287956040664,\n (101, 2436): 0.60439525773513602669,\n (170, 0): 2.755801935583541e-307,\n (171, 0): 0.0,\n (171, 1): 2.755801935583541e-307,\n (172, 1): 0.0,\n (200, 9797): 0.74753983745929675209,\n (201, 9656): 0.40959218958120363618}\n for nc, expected in expectations.items():\n res = mstats_basic._kendall_p_exact(nc[0], nc[1])\n assert_almost_equal(res, expected)\n\n @pytest.mark.slow\n def test_kendall_p_exact_large(self):\n # Test for the exact method with large samples (n >= 171)\n # expected values generated using SymPy\n expectations = {(400, 38965): 0.48444283672113314099,\n (401, 39516): 0.66363159823474837662,\n (800, 156772): 0.42265448483120932055,\n (801, 157849): 0.53437553412194416236,\n (1600, 637472): 0.84200727400323538419,\n (1601, 630304): 0.34465255088058593946}\n\n for nc, expected in expectations.items():\n res = mstats_basic._kendall_p_exact(nc[0], nc[1])\n assert_almost_equal(res, expected)\n\n\n def test_pointbiserial(self):\n x = [1,0,1,1,1,1,0,1,0,0,0,1,1,0,0,0,1,1,1,0,0,0,0,0,0,0,0,1,0,\n 0,0,0,0,1,-1]\n y = [14.8,13.8,12.4,10.1,7.1,6.1,5.8,4.6,4.3,3.5,3.3,3.2,3.0,\n 2.8,2.8,2.5,2.4,2.3,2.1,1.7,1.7,1.5,1.3,1.3,1.2,1.2,1.1,\n 0.8,0.7,0.6,0.5,0.2,0.2,0.1,np.nan]\n assert_almost_equal(mstats.pointbiserialr(x, y)[0], 0.36149, 5)\n\n # test for namedtuple attributes\n res = mstats.pointbiserialr(x, y)\n attributes = ('correlation', 'pvalue')\n check_named_results(res, attributes, ma=True)\n\n\nclass TestTrimming:\n\n def test_trim(self):\n a = ma.arange(10)\n assert_equal(mstats.trim(a), [0,1,2,3,4,5,6,7,8,9])\n a = ma.arange(10)\n assert_equal(mstats.trim(a,(2,8)), [None,None,2,3,4,5,6,7,8,None])\n a = ma.arange(10)\n assert_equal(mstats.trim(a,limits=(2,8),inclusive=(False,False)),\n [None,None,None,3,4,5,6,7,None,None])\n a = ma.arange(10)\n assert_equal(mstats.trim(a,limits=(0.1,0.2),relative=True),\n [None,1,2,3,4,5,6,7,None,None])\n\n a = ma.arange(12)\n a[[0,-1]] = a[5] = masked\n assert_equal(mstats.trim(a, (2,8)),\n [None, None, 2, 3, 4, None, 6, 7, 8, None, None, None])\n\n x = ma.arange(100).reshape(10, 10)\n expected = [1]*10 + [0]*70 + [1]*20\n trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=None)\n assert_equal(trimx._mask.ravel(), expected)\n trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=0)\n assert_equal(trimx._mask.ravel(), expected)\n trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=-1)\n assert_equal(trimx._mask.T.ravel(), expected)\n\n # same as above, but with an extra masked row inserted\n x = ma.arange(110).reshape(11, 10)\n x[1] = masked\n expected = [1]*20 + [0]*70 + [1]*20\n trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=None)\n assert_equal(trimx._mask.ravel(), expected)\n trimx = mstats.trim(x, (0.1,0.2), relative=True, axis=0)\n assert_equal(trimx._mask.ravel(), expected)\n trimx = mstats.trim(x.T, (0.1,0.2), relative=True, axis=-1)\n assert_equal(trimx.T._mask.ravel(), expected)\n\n def test_trim_old(self):\n x = ma.arange(100)\n assert_equal(mstats.trimboth(x).count(), 60)\n assert_equal(mstats.trimtail(x,tail='r').count(), 80)\n x[50:70] = masked\n trimx = mstats.trimboth(x)\n assert_equal(trimx.count(), 48)\n assert_equal(trimx._mask, [1]*16 + [0]*34 + [1]*20 + [0]*14 + [1]*16)\n x._mask = nomask\n x.shape = (10,10)\n assert_equal(mstats.trimboth(x).count(), 60)\n assert_equal(mstats.trimtail(x).count(), 80)\n\n def test_trimr(self):\n x = ma.arange(10)\n result = mstats.trimr(x, limits=(0.15, 0.14), inclusive=(False, False))\n expected = ma.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9],\n mask=[1, 1, 0, 0, 0, 0, 0, 0, 0, 1])\n assert_equal(result, expected)\n assert_equal(result.mask, expected.mask)\n\n def test_trimmedmean(self):\n data = ma.array([77, 87, 88,114,151,210,219,246,253,262,\n 296,299,306,376,428,515,666,1310,2611])\n assert_almost_equal(mstats.trimmed_mean(data,0.1), 343, 0)\n assert_almost_equal(mstats.trimmed_mean(data,(0.1,0.1)), 343, 0)\n assert_almost_equal(mstats.trimmed_mean(data,(0.2,0.2)), 283, 0)\n\n def test_trimmed_stde(self):\n data = ma.array([77, 87, 88,114,151,210,219,246,253,262,\n 296,299,306,376,428,515,666,1310,2611])\n assert_almost_equal(mstats.trimmed_stde(data,(0.2,0.2)), 56.13193, 5)\n assert_almost_equal(mstats.trimmed_stde(data,0.2), 56.13193, 5)\n\n def test_winsorization(self):\n data = ma.array([77, 87, 88,114,151,210,219,246,253,262,\n 296,299,306,376,428,515,666,1310,2611])\n assert_almost_equal(mstats.winsorize(data,(0.2,0.2)).var(ddof=1),\n 21551.4, 1)\n assert_almost_equal(\n mstats.winsorize(data, (0.2,0.2),(False,False)).var(ddof=1),\n 11887.3, 1)\n data[5] = masked\n winsorized = mstats.winsorize(data)\n assert_equal(winsorized.mask, data.mask)\n\n def test_winsorization_nan(self):\n data = ma.array([np.nan, np.nan, 0, 1, 2])\n assert_raises(ValueError, mstats.winsorize, data, (0.05, 0.05),\n nan_policy='raise')\n # Testing propagate (default behavior)\n assert_equal(mstats.winsorize(data, (0.4, 0.4)),\n ma.array([2, 2, 2, 2, 2]))\n assert_equal(mstats.winsorize(data, (0.8, 0.8)),\n ma.array([np.nan, np.nan, np.nan, np.nan, np.nan]))\n assert_equal(mstats.winsorize(data, (0.4, 0.4), nan_policy='omit'),\n ma.array([np.nan, np.nan, 2, 2, 2]))\n assert_equal(mstats.winsorize(data, (0.8, 0.8), nan_policy='omit'),\n ma.array([np.nan, np.nan, 2, 2, 2]))\n\nclass TestMoments:\n # Comparison numbers are found using R v.1.5.1\n # note that length(testcase) = 4\n # testmathworks comes from documentation for the\n # Statistics Toolbox for Matlab and can be found at both\n # https://www.mathworks.com/help/stats/kurtosis.html\n # https://www.mathworks.com/help/stats/skewness.html\n # Note that both test cases came from here.\n testcase = [1,2,3,4]\n testmathworks = ma.fix_invalid([1.165, 0.6268, 0.0751, 0.3516, -0.6965,\n np.nan])\n testcase_2d = ma.array(\n np.array([[0.05245846, 0.50344235, 0.86589117, 0.36936353, 0.46961149],\n [0.11574073, 0.31299969, 0.45925772, 0.72618805, 0.75194407],\n [0.67696689, 0.91878127, 0.09769044, 0.04645137, 0.37615733],\n [0.05903624, 0.29908861, 0.34088298, 0.66216337, 0.83160998],\n [0.64619526, 0.94894632, 0.27855892, 0.0706151, 0.39962917]]),\n mask=np.array([[True, False, False, True, False],\n [True, True, True, False, True],\n [False, False, False, False, False],\n [True, True, True, True, True],\n [False, False, True, False, False]], dtype=bool))\n\n def _assert_equal(self, actual, expect, *, shape=None, dtype=None):\n expect = np.asarray(expect)\n if shape is not None:\n expect = np.broadcast_to(expect, shape)\n assert_array_equal(actual, expect)\n if dtype is None:\n dtype = expect.dtype\n assert actual.dtype == dtype\n\n def test_moment(self):\n y = mstats.moment(self.testcase,1)\n assert_almost_equal(y,0.0,10)\n y = mstats.moment(self.testcase,2)\n assert_almost_equal(y,1.25)\n y = mstats.moment(self.testcase,3)\n assert_almost_equal(y,0.0)\n y = mstats.moment(self.testcase,4)\n assert_almost_equal(y,2.5625)\n\n # check array_like input for moment\n y = mstats.moment(self.testcase, [1, 2, 3, 4])\n assert_allclose(y, [0, 1.25, 0, 2.5625])\n\n # check moment input consists only of integers\n y = mstats.moment(self.testcase, 0.0)\n assert_allclose(y, 1.0)\n assert_raises(ValueError, mstats.moment, self.testcase, 1.2)\n y = mstats.moment(self.testcase, [1.0, 2, 3, 4.0])\n assert_allclose(y, [0, 1.25, 0, 2.5625])\n\n # test empty input\n y = mstats.moment([])\n self._assert_equal(y, np.nan, dtype=np.float64)\n y = mstats.moment(np.array([], dtype=np.float32))\n self._assert_equal(y, np.nan, dtype=np.float32)\n y = mstats.moment(np.zeros((1, 0)), axis=0)\n self._assert_equal(y, [], shape=(0,), dtype=np.float64)\n y = mstats.moment([[]], axis=1)\n self._assert_equal(y, np.nan, shape=(1,), dtype=np.float64)\n y = mstats.moment([[]], moment=[0, 1], axis=0)\n self._assert_equal(y, [], shape=(2, 0))\n\n x = np.arange(10.)\n x[9] = np.nan\n assert_equal(mstats.moment(x, 2), ma.masked) # NaN value is ignored\n\n def test_variation(self):\n y = mstats.variation(self.testcase)\n assert_almost_equal(y,0.44721359549996, 10)\n\n def test_variation_ddof(self):\n # test variation with delta degrees of freedom\n # regression test for gh-13341\n a = np.array([1, 2, 3, 4, 5])\n y = mstats.variation(a, ddof=1)\n assert_almost_equal(y, 0.5270462766947299)\n\n def test_skewness(self):\n y = mstats.skew(self.testmathworks)\n assert_almost_equal(y,-0.29322304336607,10)\n y = mstats.skew(self.testmathworks,bias=0)\n assert_almost_equal(y,-0.437111105023940,10)\n y = mstats.skew(self.testcase)\n assert_almost_equal(y,0.0,10)\n\n def test_kurtosis(self):\n # Set flags for axis = 0 and fisher=0 (Pearson's definition of kurtosis\n # for compatibility with Matlab)\n y = mstats.kurtosis(self.testmathworks, 0, fisher=0, bias=1)\n assert_almost_equal(y, 2.1658856802973, 10)\n # Note that MATLAB has confusing docs for the following case\n # kurtosis(x,0) gives an unbiased estimate of Pearson's skewness\n # kurtosis(x) gives a biased estimate of Fisher's skewness (Pearson-3)\n # The MATLAB docs imply that both should give Fisher's\n y = mstats.kurtosis(self.testmathworks, fisher=0, bias=0)\n assert_almost_equal(y, 3.663542721189047, 10)\n y = mstats.kurtosis(self.testcase, 0, 0)\n assert_almost_equal(y, 1.64)\n\n # test that kurtosis works on multidimensional masked arrays\n correct_2d = ma.array(np.array([-1.5, -3., -1.47247052385, 0.,\n -1.26979517952]),\n mask=np.array([False, False, False, True,\n False], dtype=bool))\n assert_array_almost_equal(mstats.kurtosis(self.testcase_2d, 1),\n correct_2d)\n for i, row in enumerate(self.testcase_2d):\n assert_almost_equal(mstats.kurtosis(row), correct_2d[i])\n\n correct_2d_bias_corrected = ma.array(\n np.array([-1.5, -3., -1.88988209538, 0., -0.5234638463918877]),\n mask=np.array([False, False, False, True, False], dtype=bool))\n assert_array_almost_equal(mstats.kurtosis(self.testcase_2d, 1,\n bias=False),\n correct_2d_bias_corrected)\n for i, row in enumerate(self.testcase_2d):\n assert_almost_equal(mstats.kurtosis(row, bias=False),\n correct_2d_bias_corrected[i])\n\n # Check consistency between stats and mstats implementations\n assert_array_almost_equal_nulp(mstats.kurtosis(self.testcase_2d[2, :]),\n stats.kurtosis(self.testcase_2d[2, :]),\n nulp=4)\n\n def test_mode(self):\n a1 = [0,0,0,1,1,1,2,3,3,3,3,4,5,6,7]\n a2 = np.reshape(a1, (3,5))\n a3 = np.array([1,2,3,4,5,6])\n a4 = np.reshape(a3, (3,2))\n ma1 = ma.masked_where(ma.array(a1) > 2, a1)\n ma2 = ma.masked_where(a2 > 2, a2)\n ma3 = ma.masked_where(a3 < 2, a3)\n ma4 = ma.masked_where(ma.array(a4) < 2, a4)\n assert_equal(mstats.mode(a1, axis=None), (3,4))\n assert_equal(mstats.mode(a1, axis=0), (3,4))\n assert_equal(mstats.mode(ma1, axis=None), (0,3))\n assert_equal(mstats.mode(a2, axis=None), (3,4))\n assert_equal(mstats.mode(ma2, axis=None), (0,3))\n assert_equal(mstats.mode(a3, axis=None), (1,1))\n assert_equal(mstats.mode(ma3, axis=None), (2,1))\n assert_equal(mstats.mode(a2, axis=0), ([[0,0,0,1,1]], [[1,1,1,1,1]]))\n assert_equal(mstats.mode(ma2, axis=0), ([[0,0,0,1,1]], [[1,1,1,1,1]]))\n assert_equal(mstats.mode(a2, axis=-1), ([[0],[3],[3]], [[3],[3],[1]]))\n assert_equal(mstats.mode(ma2, axis=-1), ([[0],[1],[0]], [[3],[1],[0]]))\n assert_equal(mstats.mode(ma4, axis=0), ([[3,2]], [[1,1]]))\n assert_equal(mstats.mode(ma4, axis=-1), ([[2],[3],[5]], [[1],[1],[1]]))\n\n a1_res = mstats.mode(a1, axis=None)\n\n # test for namedtuple attributes\n attributes = ('mode', 'count')\n check_named_results(a1_res, attributes, ma=True)\n\n def test_mode_modifies_input(self):\n # regression test for gh-6428: mode(..., axis=None) may not modify\n # the input array\n im = np.zeros((100, 100))\n im[:50, :] += 1\n im[:, :50] += 1\n cp = im.copy()\n mstats.mode(im, None)\n assert_equal(im, cp)\n\n\nclass TestPercentile:\n def setup_method(self):\n self.a1 = [3, 4, 5, 10, -3, -5, 6]\n self.a2 = [3, -6, -2, 8, 7, 4, 2, 1]\n self.a3 = [3., 4, 5, 10, -3, -5, -6, 7.0]\n\n def test_percentile(self):\n x = np.arange(8) * 0.5\n assert_equal(mstats.scoreatpercentile(x, 0), 0.)\n assert_equal(mstats.scoreatpercentile(x, 100), 3.5)\n assert_equal(mstats.scoreatpercentile(x, 50), 1.75)\n\n def test_2D(self):\n x = ma.array([[1, 1, 1],\n [1, 1, 1],\n [4, 4, 3],\n [1, 1, 1],\n [1, 1, 1]])\n assert_equal(mstats.scoreatpercentile(x, 50), [1, 1, 1])\n\n\nclass TestVariability:\n \"\"\" Comparison numbers are found using R v.1.5.1\n note that length(testcase) = 4\n \"\"\"\n testcase = ma.fix_invalid([1,2,3,4,np.nan])\n\n def test_sem(self):\n # This is not in R, so used: sqrt(var(testcase)*3/4) / sqrt(3)\n y = mstats.sem(self.testcase)\n assert_almost_equal(y, 0.6454972244)\n n = self.testcase.count()\n assert_allclose(mstats.sem(self.testcase, ddof=0) * np.sqrt(n/(n-2)),\n mstats.sem(self.testcase, ddof=2))\n\n def test_zmap(self):\n # This is not in R, so tested by using:\n # (testcase[i]-mean(testcase,axis=0)) / sqrt(var(testcase)*3/4)\n y = mstats.zmap(self.testcase, self.testcase)\n desired_unmaskedvals = ([-1.3416407864999, -0.44721359549996,\n 0.44721359549996, 1.3416407864999])\n assert_array_almost_equal(desired_unmaskedvals,\n y.data[y.mask == False], decimal=12)\n\n def test_zscore(self):\n # This is not in R, so tested by using:\n # (testcase[i]-mean(testcase,axis=0)) / sqrt(var(testcase)*3/4)\n y = mstats.zscore(self.testcase)\n desired = ma.fix_invalid([-1.3416407864999, -0.44721359549996,\n 0.44721359549996, 1.3416407864999, np.nan])\n assert_almost_equal(desired, y, decimal=12)\n\n\nclass TestMisc:\n\n def test_obrientransform(self):\n args = [[5]*5+[6]*11+[7]*9+[8]*3+[9]*2+[10]*2,\n [6]+[7]*2+[8]*4+[9]*9+[10]*16]\n result = [5*[3.1828]+11*[0.5591]+9*[0.0344]+3*[1.6086]+2*[5.2817]+2*[11.0538],\n [10.4352]+2*[4.8599]+4*[1.3836]+9*[0.0061]+16*[0.7277]]\n assert_almost_equal(np.round(mstats.obrientransform(*args).T, 4),\n result, 4)\n\n def test_ks_2samp(self):\n x = [[nan,nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1],\n [4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3],\n [3, 2, 5, 6, 18, 4, 9, 1, 1, nan, 1, 1, nan],\n [nan, 6, 11, 4, 17, nan, 6, 1, 1, 2, 5, 1, 1]]\n x = ma.fix_invalid(x).T\n (winter, spring, summer, fall) = x.T\n\n assert_almost_equal(np.round(mstats.ks_2samp(winter, spring), 4),\n (0.1818, 0.9628))\n assert_almost_equal(np.round(mstats.ks_2samp(winter, spring, 'g'), 4),\n (0.1469, 0.6886))\n assert_almost_equal(np.round(mstats.ks_2samp(winter, spring, 'l'), 4),\n (0.1818, 0.6011))\n\n def test_friedmanchisq(self):\n # No missing values\n args = ([9.0,9.5,5.0,7.5,9.5,7.5,8.0,7.0,8.5,6.0],\n [7.0,6.5,7.0,7.5,5.0,8.0,6.0,6.5,7.0,7.0],\n [6.0,8.0,4.0,6.0,7.0,6.5,6.0,4.0,6.5,3.0])\n result = mstats.friedmanchisquare(*args)\n assert_almost_equal(result[0], 10.4737, 4)\n assert_almost_equal(result[1], 0.005317, 6)\n # Missing values\n x = [[nan,nan, 4, 2, 16, 26, 5, 1, 5, 1, 2, 3, 1],\n [4, 3, 5, 3, 2, 7, 3, 1, 1, 2, 3, 5, 3],\n [3, 2, 5, 6, 18, 4, 9, 1, 1,nan, 1, 1,nan],\n [nan, 6, 11, 4, 17,nan, 6, 1, 1, 2, 5, 1, 1]]\n x = ma.fix_invalid(x)\n result = mstats.friedmanchisquare(*x)\n assert_almost_equal(result[0], 2.0156, 4)\n assert_almost_equal(result[1], 0.5692, 4)\n\n # test for namedtuple attributes\n attributes = ('statistic', 'pvalue')\n check_named_results(result, attributes, ma=True)\n\n\ndef test_regress_simple():\n # Regress a line with sinusoidal noise. Test for #1273.\n x = np.linspace(0, 100, 100)\n y = 0.2 * np.linspace(0, 100, 100) + 10\n y += np.sin(np.linspace(0, 20, 100))\n\n result = mstats.linregress(x, y)\n\n # Result is of a correct class and with correct fields\n lr = stats._stats_mstats_common.LinregressResult\n assert_(isinstance(result, lr))\n attributes = ('slope', 'intercept', 'rvalue', 'pvalue', 'stderr')\n check_named_results(result, attributes, ma=True)\n assert 'intercept_stderr' in dir(result)\n\n # Slope and intercept are estimated correctly\n assert_almost_equal(result.slope, 0.19644990055858422)\n assert_almost_equal(result.intercept, 10.211269918932341)\n assert_almost_equal(result.stderr, 0.002395781449783862)\n assert_almost_equal(result.intercept_stderr, 0.13866936078570702)\n\ndef test_theilslopes():\n # Test for basic slope and intercept.\n slope, intercept, lower, upper = mstats.theilslopes([0, 1, 1])\n assert_almost_equal(slope, 0.5)\n assert_almost_equal(intercept, 0.5)\n\n # Test for correct masking.\n y = np.ma.array([0, 1, 100, 1], mask=[False, False, True, False])\n slope, intercept, lower, upper = mstats.theilslopes(y)\n assert_almost_equal(slope, 1./3)\n assert_almost_equal(intercept, 2./3)\n\n # Test of confidence intervals from example in Sen (1968).\n x = [1, 2, 3, 4, 10, 12, 18]\n y = [9, 15, 19, 20, 45, 55, 78]\n slope, intercept, lower, upper = mstats.theilslopes(y, x, 0.07)\n assert_almost_equal(slope, 4)\n assert_almost_equal(upper, 4.38, decimal=2)\n assert_almost_equal(lower, 3.71, decimal=2)\n\n\ndef test_siegelslopes():\n # method should be exact for straight line\n y = 2 * np.arange(10) + 0.5\n assert_equal(mstats.siegelslopes(y), (2.0, 0.5))\n assert_equal(mstats.siegelslopes(y, method='separate'), (2.0, 0.5))\n\n x = 2 * np.arange(10)\n y = 5 * x - 3.0\n assert_equal(mstats.siegelslopes(y, x), (5.0, -3.0))\n assert_equal(mstats.siegelslopes(y, x, method='separate'), (5.0, -3.0))\n\n # method is robust to outliers: brekdown point of 50%\n y[:4] = 1000\n assert_equal(mstats.siegelslopes(y, x), (5.0, -3.0))\n\n # if there are no outliers, results should be comparble to linregress\n x = np.arange(10)\n y = -2.3 + 0.3*x + stats.norm.rvs(size=10, random_state=231)\n slope_ols, intercept_ols, _, _, _ = stats.linregress(x, y)\n\n slope, intercept = mstats.siegelslopes(y, x)\n assert_allclose(slope, slope_ols, rtol=0.1)\n assert_allclose(intercept, intercept_ols, rtol=0.1)\n\n slope, intercept = mstats.siegelslopes(y, x, method='separate')\n assert_allclose(slope, slope_ols, rtol=0.1)\n assert_allclose(intercept, intercept_ols, rtol=0.1)\n\n\ndef test_plotting_positions():\n # Regression test for #1256\n pos = mstats.plotting_positions(np.arange(3), 0, 0)\n assert_array_almost_equal(pos.data, np.array([0.25, 0.5, 0.75]))\n\n\nclass TestNormalitytests():\n\n def test_vs_nonmasked(self):\n x = np.array((-2, -1, 0, 1, 2, 3)*4)**2\n assert_array_almost_equal(mstats.normaltest(x),\n stats.normaltest(x))\n assert_array_almost_equal(mstats.skewtest(x),\n stats.skewtest(x))\n assert_array_almost_equal(mstats.kurtosistest(x),\n stats.kurtosistest(x))\n\n funcs = [stats.normaltest, stats.skewtest, stats.kurtosistest]\n mfuncs = [mstats.normaltest, mstats.skewtest, mstats.kurtosistest]\n x = [1, 2, 3, 4]\n for func, mfunc in zip(funcs, mfuncs):\n assert_raises(ValueError, func, x)\n assert_raises(ValueError, mfunc, x)\n\n def test_axis_None(self):\n # Test axis=None (equal to axis=0 for 1-D input)\n x = np.array((-2,-1,0,1,2,3)*4)**2\n assert_allclose(mstats.normaltest(x, axis=None), mstats.normaltest(x))\n assert_allclose(mstats.skewtest(x, axis=None), mstats.skewtest(x))\n assert_allclose(mstats.kurtosistest(x, axis=None),\n mstats.kurtosistest(x))\n\n def test_maskedarray_input(self):\n # Add some masked values, test result doesn't change\n x = np.array((-2, -1, 0, 1, 2, 3)*4)**2\n xm = np.ma.array(np.r_[np.inf, x, 10],\n mask=np.r_[True, [False] * x.size, True])\n assert_allclose(mstats.normaltest(xm), stats.normaltest(x))\n assert_allclose(mstats.skewtest(xm), stats.skewtest(x))\n assert_allclose(mstats.kurtosistest(xm), stats.kurtosistest(x))\n\n def test_nd_input(self):\n x = np.array((-2, -1, 0, 1, 2, 3)*4)**2\n x_2d = np.vstack([x] * 2).T\n for func in [mstats.normaltest, mstats.skewtest, mstats.kurtosistest]:\n res_1d = func(x)\n res_2d = func(x_2d)\n assert_allclose(res_2d[0], [res_1d[0]] * 2)\n assert_allclose(res_2d[1], [res_1d[1]] * 2)\n\n def test_normaltest_result_attributes(self):\n x = np.array((-2, -1, 0, 1, 2, 3)*4)**2\n res = mstats.normaltest(x)\n attributes = ('statistic', 'pvalue')\n check_named_results(res, attributes, ma=True)\n\n def test_kurtosistest_result_attributes(self):\n x = np.array((-2, -1, 0, 1, 2, 3)*4)**2\n res = mstats.kurtosistest(x)\n attributes = ('statistic', 'pvalue')\n check_named_results(res, attributes, ma=True)\n\n def regression_test_9033(self):\n # x cleary non-normal but power of negtative denom needs\n # to be handled correctly to reject normality\n counts = [128, 0, 58, 7, 0, 41, 16, 0, 0, 167]\n x = np.hstack([np.full(c, i) for i, c in enumerate(counts)])\n assert_equal(mstats.kurtosistest(x)[1] < 0.01, True)\n\n @pytest.mark.parametrize(\"test\", [\"skewtest\", \"kurtosistest\"])\n @pytest.mark.parametrize(\"alternative\", [\"less\", \"greater\"])\n def test_alternative(self, test, alternative):\n x = stats.norm.rvs(loc=10, scale=2.5, size=30, random_state=123)\n\n stats_test = getattr(stats, test)\n mstats_test = getattr(mstats, test)\n\n z_ex, p_ex = stats_test(x, alternative=alternative)\n z, p = mstats_test(x, alternative=alternative)\n assert_allclose(z, z_ex, atol=1e-12)\n assert_allclose(p, p_ex, atol=1e-12)\n\n # test with masked arrays\n x[1:5] = np.nan\n x = np.ma.masked_array(x, mask=np.isnan(x))\n z_ex, p_ex = stats_test(x.compressed(), alternative=alternative)\n z, p = mstats_test(x, alternative=alternative)\n assert_allclose(z, z_ex, atol=1e-12)\n assert_allclose(p, p_ex, atol=1e-12)\n\n def test_bad_alternative(self):\n x = stats.norm.rvs(size=20, random_state=123)\n msg = r\"alternative must be 'less', 'greater' or 'two-sided'\"\n\n with pytest.raises(ValueError, match=msg):\n mstats.skewtest(x, alternative='error')\n\n with pytest.raises(ValueError, match=msg):\n mstats.kurtosistest(x, alternative='error')\n\n\nclass TestFOneway():\n def test_result_attributes(self):\n a = np.array([655, 788], dtype=np.uint16)\n b = np.array([789, 772], dtype=np.uint16)\n res = mstats.f_oneway(a, b)\n attributes = ('statistic', 'pvalue')\n check_named_results(res, attributes, ma=True)\n\n\nclass TestMannwhitneyu():\n # data from gh-1428\n x = np.array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,\n 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,\n 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2.,\n 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 2., 1., 1., 2.,\n 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1.,\n 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,\n 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n 1., 1., 1., 1., 1., 1., 1., 1., 3., 1., 1., 1., 1., 1.,\n 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n 1., 1., 1., 1., 1., 1.])\n\n y = np.array([1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1., 1., 1., 1.,\n 2., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2., 1., 1., 3.,\n 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1.,\n 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1.,\n 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2.,\n 2., 1., 1., 2., 1., 1., 2., 1., 2., 1., 1., 1., 1., 2.,\n 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n 1., 2., 1., 1., 1., 1., 1., 2., 2., 2., 1., 1., 1., 1.,\n 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,\n 2., 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1.,\n 1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 2., 1., 1.,\n 1., 1., 1., 1.])\n\n def test_result_attributes(self):\n res = mstats.mannwhitneyu(self.x, self.y)\n attributes = ('statistic', 'pvalue')\n check_named_results(res, attributes, ma=True)\n\n def test_against_stats(self):\n # gh-4641 reported that stats.mannwhitneyu returned half the p-value\n # of mstats.mannwhitneyu. Default alternative of stats.mannwhitneyu\n # is now two-sided, so they match.\n res1 = mstats.mannwhitneyu(self.x, self.y)\n res2 = stats.mannwhitneyu(self.x, self.y)\n assert res1.statistic == res2.statistic\n assert_allclose(res1.pvalue, res2.pvalue)\n\n\nclass TestKruskal():\n def test_result_attributes(self):\n x = [1, 3, 5, 7, 9]\n y = [2, 4, 6, 8, 10]\n\n res = mstats.kruskal(x, y)\n attributes = ('statistic', 'pvalue')\n check_named_results(res, attributes, ma=True)\n\n\n# TODO: for all ttest functions, add tests with masked array inputs\nclass TestTtest_rel():\n def test_vs_nonmasked(self):\n np.random.seed(1234567)\n outcome = np.random.randn(20, 4) + [0, 0, 1, 2]\n\n # 1-D inputs\n res1 = stats.ttest_rel(outcome[:, 0], outcome[:, 1])\n res2 = mstats.ttest_rel(outcome[:, 0], outcome[:, 1])\n assert_allclose(res1, res2)\n\n # 2-D inputs\n res1 = stats.ttest_rel(outcome[:, 0], outcome[:, 1], axis=None)\n res2 = mstats.ttest_rel(outcome[:, 0], outcome[:, 1], axis=None)\n assert_allclose(res1, res2)\n res1 = stats.ttest_rel(outcome[:, :2], outcome[:, 2:], axis=0)\n res2 = mstats.ttest_rel(outcome[:, :2], outcome[:, 2:], axis=0)\n assert_allclose(res1, res2)\n\n # Check default is axis=0\n res3 = mstats.ttest_rel(outcome[:, :2], outcome[:, 2:])\n assert_allclose(res2, res3)\n\n def test_fully_masked(self):\n np.random.seed(1234567)\n outcome = ma.masked_array(np.random.randn(3, 2),\n mask=[[1, 1, 1], [0, 0, 0]])\n with suppress_warnings() as sup:\n sup.filter(RuntimeWarning, \"invalid value encountered in absolute\")\n for pair in [(outcome[:, 0], outcome[:, 1]), ([np.nan, np.nan], [1.0, 2.0])]:\n t, p = mstats.ttest_rel(*pair)\n assert_array_equal(t, (np.nan, np.nan))\n assert_array_equal(p, (np.nan, np.nan))\n\n def test_result_attributes(self):\n np.random.seed(1234567)\n outcome = np.random.randn(20, 4) + [0, 0, 1, 2]\n\n res = mstats.ttest_rel(outcome[:, 0], outcome[:, 1])\n attributes = ('statistic', 'pvalue')\n check_named_results(res, attributes, ma=True)\n\n def test_invalid_input_size(self):\n assert_raises(ValueError, mstats.ttest_rel,\n np.arange(10), np.arange(11))\n x = np.arange(24)\n assert_raises(ValueError, mstats.ttest_rel,\n x.reshape(2, 3, 4), x.reshape(2, 4, 3), axis=1)\n assert_raises(ValueError, mstats.ttest_rel,\n x.reshape(2, 3, 4), x.reshape(2, 4, 3), axis=2)\n\n def test_empty(self):\n res1 = mstats.ttest_rel([], [])\n assert_(np.all(np.isnan(res1)))\n\n def test_zero_division(self):\n t, p = mstats.ttest_ind([0, 0, 0], [1, 1, 1])\n assert_equal((np.abs(t), p), (np.inf, 0))\n\n with suppress_warnings() as sup:\n sup.filter(RuntimeWarning, \"invalid value encountered in absolute\")\n t, p = mstats.ttest_ind([0, 0, 0], [0, 0, 0])\n assert_array_equal(t, np.array([np.nan, np.nan]))\n assert_array_equal(p, np.array([np.nan, np.nan]))\n\n def test_bad_alternative(self):\n msg = r\"alternative must be 'less', 'greater' or 'two-sided'\"\n with pytest.raises(ValueError, match=msg):\n mstats.ttest_ind([1, 2, 3], [4, 5, 6], alternative='foo')\n\n @pytest.mark.parametrize(\"alternative\", [\"less\", \"greater\"])\n def test_alternative(self, alternative):\n x = stats.norm.rvs(loc=10, scale=5, size=25, random_state=42)\n y = stats.norm.rvs(loc=8, scale=2, size=25, random_state=42)\n\n t_ex, p_ex = stats.ttest_rel(x, y, alternative=alternative)\n t, p = mstats.ttest_rel(x, y, alternative=alternative)\n assert_allclose(t, t_ex, rtol=1e-14)\n assert_allclose(p, p_ex, rtol=1e-14)\n\n # test with masked arrays\n x[1:10] = np.nan\n y[1:10] = np.nan\n x = np.ma.masked_array(x, mask=np.isnan(x))\n y = np.ma.masked_array(y, mask=np.isnan(y))\n t, p = mstats.ttest_rel(x, y, alternative=alternative)\n t_ex, p_ex = stats.ttest_rel(x.compressed(), y.compressed(),\n alternative=alternative)\n assert_allclose(t, t_ex, rtol=1e-14)\n assert_allclose(p, p_ex, rtol=1e-14)\n\n\nclass TestTtest_ind():\n def test_vs_nonmasked(self):\n np.random.seed(1234567)\n outcome = np.random.randn(20, 4) + [0, 0, 1, 2]\n\n # 1-D inputs\n res1 = stats.ttest_ind(outcome[:, 0], outcome[:, 1])\n res2 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1])\n assert_allclose(res1, res2)\n\n # 2-D inputs\n res1 = stats.ttest_ind(outcome[:, 0], outcome[:, 1], axis=None)\n res2 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1], axis=None)\n assert_allclose(res1, res2)\n res1 = stats.ttest_ind(outcome[:, :2], outcome[:, 2:], axis=0)\n res2 = mstats.ttest_ind(outcome[:, :2], outcome[:, 2:], axis=0)\n assert_allclose(res1, res2)\n\n # Check default is axis=0\n res3 = mstats.ttest_ind(outcome[:, :2], outcome[:, 2:])\n assert_allclose(res2, res3)\n\n # Check equal_var\n res4 = stats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=True)\n res5 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=True)\n assert_allclose(res4, res5)\n res4 = stats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=False)\n res5 = mstats.ttest_ind(outcome[:, 0], outcome[:, 1], equal_var=False)\n assert_allclose(res4, res5)\n\n def test_fully_masked(self):\n np.random.seed(1234567)\n outcome = ma.masked_array(np.random.randn(3, 2), mask=[[1, 1, 1], [0, 0, 0]])\n with suppress_warnings() as sup:\n sup.filter(RuntimeWarning, \"invalid value encountered in absolute\")\n for pair in [(outcome[:, 0], outcome[:, 1]), ([np.nan, np.nan], [1.0, 2.0])]:\n t, p = mstats.ttest_ind(*pair)\n assert_array_equal(t, (np.nan, np.nan))\n assert_array_equal(p, (np.nan, np.nan))\n\n def test_result_attributes(self):\n np.random.seed(1234567)\n outcome = np.random.randn(20, 4) + [0, 0, 1, 2]\n\n res = mstats.ttest_ind(outcome[:, 0], outcome[:, 1])\n attributes = ('statistic', 'pvalue')\n check_named_results(res, attributes, ma=True)\n\n def test_empty(self):\n res1 = mstats.ttest_ind([], [])\n assert_(np.all(np.isnan(res1)))\n\n def test_zero_division(self):\n t, p = mstats.ttest_ind([0, 0, 0], [1, 1, 1])\n assert_equal((np.abs(t), p), (np.inf, 0))\n\n with suppress_warnings() as sup:\n sup.filter(RuntimeWarning, \"invalid value encountered in absolute\")\n t, p = mstats.ttest_ind([0, 0, 0], [0, 0, 0])\n assert_array_equal(t, (np.nan, np.nan))\n assert_array_equal(p, (np.nan, np.nan))\n\n t, p = mstats.ttest_ind([0, 0, 0], [1, 1, 1], equal_var=False)\n assert_equal((np.abs(t), p), (np.inf, 0))\n assert_array_equal(mstats.ttest_ind([0, 0, 0], [0, 0, 0],\n equal_var=False), (np.nan, np.nan))\n\n def test_bad_alternative(self):\n msg = r\"alternative must be 'less', 'greater' or 'two-sided'\"\n with pytest.raises(ValueError, match=msg):\n mstats.ttest_ind([1, 2, 3], [4, 5, 6], alternative='foo')\n\n @pytest.mark.parametrize(\"alternative\", [\"less\", \"greater\"])\n def test_alternative(self, alternative):\n x = stats.norm.rvs(loc=10, scale=2, size=100, random_state=123)\n y = stats.norm.rvs(loc=8, scale=2, size=100, random_state=123)\n\n t_ex, p_ex = stats.ttest_ind(x, y, alternative=alternative)\n t, p = mstats.ttest_ind(x, y, alternative=alternative)\n assert_allclose(t, t_ex, rtol=1e-14)\n assert_allclose(p, p_ex, rtol=1e-14)\n\n # test with masked arrays\n x[1:10] = np.nan\n y[80:90] = np.nan\n x = np.ma.masked_array(x, mask=np.isnan(x))\n y = np.ma.masked_array(y, mask=np.isnan(y))\n t_ex, p_ex = stats.ttest_ind(x.compressed(), y.compressed(),\n alternative=alternative)\n t, p = mstats.ttest_ind(x, y, alternative=alternative)\n assert_allclose(t, t_ex, rtol=1e-14)\n assert_allclose(p, p_ex, rtol=1e-14)\n\n\nclass TestTtest_1samp():\n def test_vs_nonmasked(self):\n np.random.seed(1234567)\n outcome = np.random.randn(20, 4) + [0, 0, 1, 2]\n\n # 1-D inputs\n res1 = stats.ttest_1samp(outcome[:, 0], 1)\n res2 = mstats.ttest_1samp(outcome[:, 0], 1)\n assert_allclose(res1, res2)\n\n # 2-D inputs\n res1 = stats.ttest_1samp(outcome[:, 0], outcome[:, 1], axis=None)\n res2 = mstats.ttest_1samp(outcome[:, 0], outcome[:, 1], axis=None)\n assert_allclose(res1, res2)\n\n res1 = stats.ttest_1samp(outcome[:, :2], outcome[:, 2:], axis=0)\n res2 = mstats.ttest_1samp(outcome[:, :2], outcome[:, 2:], axis=0)\n assert_allclose(res1, res2, atol=1e-15)\n\n # Check default is axis=0\n res3 = mstats.ttest_1samp(outcome[:, :2], outcome[:, 2:])\n assert_allclose(res2, res3)\n\n def test_fully_masked(self):\n np.random.seed(1234567)\n outcome = ma.masked_array(np.random.randn(3), mask=[1, 1, 1])\n expected = (np.nan, np.nan)\n with suppress_warnings() as sup:\n sup.filter(RuntimeWarning, \"invalid value encountered in absolute\")\n for pair in [((np.nan, np.nan), 0.0), (outcome, 0.0)]:\n t, p = mstats.ttest_1samp(*pair)\n assert_array_equal(p, expected)\n assert_array_equal(t, expected)\n\n def test_result_attributes(self):\n np.random.seed(1234567)\n outcome = np.random.randn(20, 4) + [0, 0, 1, 2]\n\n res = mstats.ttest_1samp(outcome[:, 0], 1)\n attributes = ('statistic', 'pvalue')\n check_named_results(res, attributes, ma=True)\n\n def test_empty(self):\n res1 = mstats.ttest_1samp([], 1)\n assert_(np.all(np.isnan(res1)))\n\n def test_zero_division(self):\n t, p = mstats.ttest_1samp([0, 0, 0], 1)\n assert_equal((np.abs(t), p), (np.inf, 0))\n\n with suppress_warnings() as sup:\n sup.filter(RuntimeWarning, \"invalid value encountered in absolute\")\n t, p = mstats.ttest_1samp([0, 0, 0], 0)\n assert_(np.isnan(t))\n assert_array_equal(p, (np.nan, np.nan))\n\n def test_bad_alternative(self):\n msg = r\"alternative must be 'less', 'greater' or 'two-sided'\"\n with pytest.raises(ValueError, match=msg):\n mstats.ttest_1samp([1, 2, 3], 4, alternative='foo')\n\n @pytest.mark.parametrize(\"alternative\", [\"less\", \"greater\"])\n def test_alternative(self, alternative):\n x = stats.norm.rvs(loc=10, scale=2, size=100, random_state=123)\n\n t_ex, p_ex = stats.ttest_1samp(x, 9, alternative=alternative)\n t, p = mstats.ttest_1samp(x, 9, alternative=alternative)\n assert_allclose(t, t_ex, rtol=1e-14)\n assert_allclose(p, p_ex, rtol=1e-14)\n\n # test with masked arrays\n x[1:10] = np.nan\n x = np.ma.masked_array(x, mask=np.isnan(x))\n t_ex, p_ex = stats.ttest_1samp(x.compressed(), 9,\n alternative=alternative)\n t, p = mstats.ttest_1samp(x, 9, alternative=alternative)\n assert_allclose(t, t_ex, rtol=1e-14)\n assert_allclose(p, p_ex, rtol=1e-14)\n\n\nclass TestDescribe:\n \"\"\"\n Tests for mstats.describe.\n\n Note that there are also tests for `mstats.describe` in the\n class TestCompareWithStats.\n \"\"\"\n def test_basic_with_axis(self):\n # This is a basic test that is also a regression test for gh-7303.\n a = np.ma.masked_array([[0, 1, 2, 3, 4, 9],\n [5, 5, 0, 9, 3, 3]],\n mask=[[0, 0, 0, 0, 0, 1],\n [0, 0, 1, 1, 0, 0]])\n result = mstats.describe(a, axis=1)\n assert_equal(result.nobs, [5, 4])\n amin, amax = result.minmax\n assert_equal(amin, [0, 3])\n assert_equal(amax, [4, 5])\n assert_equal(result.mean, [2.0, 4.0])\n assert_equal(result.variance, [2.0, 1.0])\n assert_equal(result.skewness, [0.0, 0.0])\n assert_allclose(result.kurtosis, [-1.3, -2.0])\n\n\nclass TestCompareWithStats:\n \"\"\"\n Class to compare mstats results with stats results.\n\n It is in general assumed that scipy.stats is at a more mature stage than\n stats.mstats. If a routine in mstats results in similar results like in\n scipy.stats, this is considered also as a proper validation of scipy.mstats\n routine.\n\n Different sample sizes are used for testing, as some problems between stats\n and mstats are dependent on sample size.\n\n Author: Alexander Loew\n\n NOTE that some tests fail. This might be caused by\n a) actual differences or bugs between stats and mstats\n b) numerical inaccuracies\n c) different definitions of routine interfaces\n\n These failures need to be checked. Current workaround is to have disabled these tests,\n but issuing reports on scipy-dev\n\n \"\"\"\n def get_n(self):\n \"\"\" Returns list of sample sizes to be used for comparison. \"\"\"\n return [1000, 100, 10, 5]\n\n def generate_xy_sample(self, n):\n # This routine generates numpy arrays and corresponding masked arrays\n # with the same data, but additional masked values\n np.random.seed(1234567)\n x = np.random.randn(n)\n y = x + np.random.randn(n)\n xm = np.full(len(x) + 5, 1e16)\n ym = np.full(len(y) + 5, 1e16)\n xm[0:len(x)] = x\n ym[0:len(y)] = y\n mask = xm > 9e15\n xm = np.ma.array(xm, mask=mask)\n ym = np.ma.array(ym, mask=mask)\n return x, y, xm, ym\n\n def generate_xy_sample2D(self, n, nx):\n x = np.full((n, nx), np.nan)\n y = np.full((n, nx), np.nan)\n xm = np.full((n+5, nx), np.nan)\n ym = np.full((n+5, nx), np.nan)\n\n for i in range(nx):\n x[:, i], y[:, i], dx, dy = self.generate_xy_sample(n)\n\n xm[0:n, :] = x[0:n]\n ym[0:n, :] = y[0:n]\n xm = np.ma.array(xm, mask=np.isnan(xm))\n ym = np.ma.array(ym, mask=np.isnan(ym))\n return x, y, xm, ym\n\n def test_linregress(self):\n for n in self.get_n():\n x, y, xm, ym = self.generate_xy_sample(n)\n result1 = stats.linregress(x, y)\n result2 = stats.mstats.linregress(xm, ym)\n assert_allclose(np.asarray(result1), np.asarray(result2))\n\n def test_pearsonr(self):\n for n in self.get_n():\n x, y, xm, ym = self.generate_xy_sample(n)\n r, p = stats.pearsonr(x, y)\n rm, pm = stats.mstats.pearsonr(xm, ym)\n\n assert_almost_equal(r, rm, decimal=14)\n assert_almost_equal(p, pm, decimal=14)\n\n def test_spearmanr(self):\n for n in self.get_n():\n x, y, xm, ym = self.generate_xy_sample(n)\n r, p = stats.spearmanr(x, y)\n rm, pm = stats.mstats.spearmanr(xm, ym)\n assert_almost_equal(r, rm, 14)\n assert_almost_equal(p, pm, 14)\n\n def test_spearmanr_backcompat_useties(self):\n # A regression test to ensure we don't break backwards compat\n # more than we have to (see gh-9204).\n x = np.arange(6)\n assert_raises(ValueError, mstats.spearmanr, x, x, False)\n\n def test_gmean(self):\n for n in self.get_n():\n x, y, xm, ym = self.generate_xy_sample(n)\n r = stats.gmean(abs(x))\n rm = stats.mstats.gmean(abs(xm))\n assert_allclose(r, rm, rtol=1e-13)\n\n r = stats.gmean(abs(y))\n rm = stats.mstats.gmean(abs(ym))\n assert_allclose(r, rm, rtol=1e-13)\n\n def test_hmean(self):\n for n in self.get_n():\n x, y, xm, ym = self.generate_xy_sample(n)\n\n r = stats.hmean(abs(x))\n rm = stats.mstats.hmean(abs(xm))\n assert_almost_equal(r, rm, 10)\n\n r = stats.hmean(abs(y))\n rm = stats.mstats.hmean(abs(ym))\n assert_almost_equal(r, rm, 10)\n\n def test_skew(self):\n for n in self.get_n():\n x, y, xm, ym = self.generate_xy_sample(n)\n\n r = stats.skew(x)\n rm = stats.mstats.skew(xm)\n assert_almost_equal(r, rm, 10)\n\n r = stats.skew(y)\n rm = stats.mstats.skew(ym)\n assert_almost_equal(r, rm, 10)\n\n def test_moment(self):\n for n in self.get_n():\n x, y, xm, ym = self.generate_xy_sample(n)\n\n r = stats.moment(x)\n rm = stats.mstats.moment(xm)\n assert_almost_equal(r, rm, 10)\n\n r = stats.moment(y)\n rm = stats.mstats.moment(ym)\n assert_almost_equal(r, rm, 10)\n\n def test_zscore(self):\n for n in self.get_n():\n x, y, xm, ym = self.generate_xy_sample(n)\n\n # reference solution\n zx = (x - x.mean()) / x.std()\n zy = (y - y.mean()) / y.std()\n\n # validate stats\n assert_allclose(stats.zscore(x), zx, rtol=1e-10)\n assert_allclose(stats.zscore(y), zy, rtol=1e-10)\n\n # compare stats and mstats\n assert_allclose(stats.zscore(x), stats.mstats.zscore(xm[0:len(x)]),\n rtol=1e-10)\n assert_allclose(stats.zscore(y), stats.mstats.zscore(ym[0:len(y)]),\n rtol=1e-10)\n\n def test_kurtosis(self):\n for n in self.get_n():\n x, y, xm, ym = self.generate_xy_sample(n)\n r = stats.kurtosis(x)\n rm = stats.mstats.kurtosis(xm)\n assert_almost_equal(r, rm, 10)\n\n r = stats.kurtosis(y)\n rm = stats.mstats.kurtosis(ym)\n assert_almost_equal(r, rm, 10)\n\n def test_sem(self):\n # example from stats.sem doc\n a = np.arange(20).reshape(5, 4)\n am = np.ma.array(a)\n r = stats.sem(a, ddof=1)\n rm = stats.mstats.sem(am, ddof=1)\n\n assert_allclose(r, 2.82842712, atol=1e-5)\n assert_allclose(rm, 2.82842712, atol=1e-5)\n\n for n in self.get_n():\n x, y, xm, ym = self.generate_xy_sample(n)\n assert_almost_equal(stats.mstats.sem(xm, axis=None, ddof=0),\n stats.sem(x, axis=None, ddof=0), decimal=13)\n assert_almost_equal(stats.mstats.sem(ym, axis=None, ddof=0),\n stats.sem(y, axis=None, ddof=0), decimal=13)\n assert_almost_equal(stats.mstats.sem(xm, axis=None, ddof=1),\n stats.sem(x, axis=None, ddof=1), decimal=13)\n assert_almost_equal(stats.mstats.sem(ym, axis=None, ddof=1),\n stats.sem(y, axis=None, ddof=1), decimal=13)\n\n def test_describe(self):\n for n in self.get_n():\n x, y, xm, ym = self.generate_xy_sample(n)\n r = stats.describe(x, ddof=1)\n rm = stats.mstats.describe(xm, ddof=1)\n for ii in range(6):\n assert_almost_equal(np.asarray(r[ii]),\n np.asarray(rm[ii]),\n decimal=12)\n\n def test_describe_result_attributes(self):\n actual = mstats.describe(np.arange(5))\n attributes = ('nobs', 'minmax', 'mean', 'variance', 'skewness',\n 'kurtosis')\n check_named_results(actual, attributes, ma=True)\n\n def test_rankdata(self):\n for n in self.get_n():\n x, y, xm, ym = self.generate_xy_sample(n)\n r = stats.rankdata(x)\n rm = stats.mstats.rankdata(x)\n assert_allclose(r, rm)\n\n def test_tmean(self):\n for n in self.get_n():\n x, y, xm, ym = self.generate_xy_sample(n)\n assert_almost_equal(stats.tmean(x),stats.mstats.tmean(xm), 14)\n assert_almost_equal(stats.tmean(y),stats.mstats.tmean(ym), 14)\n\n def test_tmax(self):\n for n in self.get_n():\n x, y, xm, ym = self.generate_xy_sample(n)\n assert_almost_equal(stats.tmax(x,2.),\n stats.mstats.tmax(xm,2.), 10)\n assert_almost_equal(stats.tmax(y,2.),\n stats.mstats.tmax(ym,2.), 10)\n\n assert_almost_equal(stats.tmax(x, upperlimit=3.),\n stats.mstats.tmax(xm, upperlimit=3.), 10)\n assert_almost_equal(stats.tmax(y, upperlimit=3.),\n stats.mstats.tmax(ym, upperlimit=3.), 10)\n\n def test_tmin(self):\n for n in self.get_n():\n x, y, xm, ym = self.generate_xy_sample(n)\n assert_equal(stats.tmin(x), stats.mstats.tmin(xm))\n assert_equal(stats.tmin(y), stats.mstats.tmin(ym))\n\n assert_almost_equal(stats.tmin(x, lowerlimit=-1.),\n stats.mstats.tmin(xm, lowerlimit=-1.), 10)\n assert_almost_equal(stats.tmin(y, lowerlimit=-1.),\n stats.mstats.tmin(ym, lowerlimit=-1.), 10)\n\n def test_zmap(self):\n for n in self.get_n():\n x, y, xm, ym = self.generate_xy_sample(n)\n z = stats.zmap(x, y)\n zm = stats.mstats.zmap(xm, ym)\n assert_allclose(z, zm[0:len(z)], atol=1e-10)\n\n def test_variation(self):\n for n in self.get_n():\n x, y, xm, ym = self.generate_xy_sample(n)\n assert_almost_equal(stats.variation(x), stats.mstats.variation(xm),\n decimal=12)\n assert_almost_equal(stats.variation(y), stats.mstats.variation(ym),\n decimal=12)\n\n def test_tvar(self):\n for n in self.get_n():\n x, y, xm, ym = self.generate_xy_sample(n)\n assert_almost_equal(stats.tvar(x), stats.mstats.tvar(xm),\n decimal=12)\n assert_almost_equal(stats.tvar(y), stats.mstats.tvar(ym),\n decimal=12)\n\n def test_trimboth(self):\n a = np.arange(20)\n b = stats.trimboth(a, 0.1)\n bm = stats.mstats.trimboth(a, 0.1)\n assert_allclose(np.sort(b), bm.data[~bm.mask])\n\n def test_tsem(self):\n for n in self.get_n():\n x, y, xm, ym = self.generate_xy_sample(n)\n assert_almost_equal(stats.tsem(x), stats.mstats.tsem(xm),\n decimal=14)\n assert_almost_equal(stats.tsem(y), stats.mstats.tsem(ym),\n decimal=14)\n assert_almost_equal(stats.tsem(x, limits=(-2., 2.)),\n stats.mstats.tsem(xm, limits=(-2., 2.)),\n decimal=14)\n\n def test_skewtest(self):\n # this test is for 1D data\n for n in self.get_n():\n if n > 8:\n x, y, xm, ym = self.generate_xy_sample(n)\n r = stats.skewtest(x)\n rm = stats.mstats.skewtest(xm)\n assert_allclose(r, rm)\n\n def test_skewtest_result_attributes(self):\n x = np.array((-2, -1, 0, 1, 2, 3)*4)**2\n res = mstats.skewtest(x)\n attributes = ('statistic', 'pvalue')\n check_named_results(res, attributes, ma=True)\n\n def test_skewtest_2D_notmasked(self):\n # a normal ndarray is passed to the masked function\n x = np.random.random((20, 2)) * 20.\n r = stats.skewtest(x)\n rm = stats.mstats.skewtest(x)\n assert_allclose(np.asarray(r), np.asarray(rm))\n\n def test_skewtest_2D_WithMask(self):\n nx = 2\n for n in self.get_n():\n if n > 8:\n x, y, xm, ym = self.generate_xy_sample2D(n, nx)\n r = stats.skewtest(x)\n rm = stats.mstats.skewtest(xm)\n\n assert_equal(r[0][0], rm[0][0])\n assert_equal(r[0][1], rm[0][1])\n\n def test_normaltest(self):\n with np.errstate(over='raise'), suppress_warnings() as sup:\n sup.filter(UserWarning, \"kurtosistest only valid for n>=20\")\n for n in self.get_n():\n if n > 8:\n x, y, xm, ym = self.generate_xy_sample(n)\n r = stats.normaltest(x)\n rm = stats.mstats.normaltest(xm)\n assert_allclose(np.asarray(r), np.asarray(rm))\n\n def test_find_repeats(self):\n x = np.asarray([1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 4]).astype('float')\n tmp = np.asarray([1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5]).astype('float')\n mask = (tmp == 5.)\n xm = np.ma.array(tmp, mask=mask)\n x_orig, xm_orig = x.copy(), xm.copy()\n\n r = stats.find_repeats(x)\n rm = stats.mstats.find_repeats(xm)\n\n assert_equal(r, rm)\n assert_equal(x, x_orig)\n assert_equal(xm, xm_orig)\n\n # This crazy behavior is expected by count_tied_groups, but is not\n # in the docstring...\n _, counts = stats.mstats.find_repeats([])\n assert_equal(counts, np.array(0, dtype=np.intp))\n\n def test_kendalltau(self):\n for n in self.get_n():\n x, y, xm, ym = self.generate_xy_sample(n)\n r = stats.kendalltau(x, y)\n rm = stats.mstats.kendalltau(xm, ym)\n assert_almost_equal(r[0], rm[0], decimal=10)\n assert_almost_equal(r[1], rm[1], decimal=7)\n\n def test_obrientransform(self):\n for n in self.get_n():\n x, y, xm, ym = self.generate_xy_sample(n)\n r = stats.obrientransform(x)\n rm = stats.mstats.obrientransform(xm)\n assert_almost_equal(r.T, rm[0:len(x)])\n\n def test_ks_1samp(self):\n \"\"\"Checks that mstats.ks_1samp and stats.ks_1samp agree on masked arrays.\"\"\"\n for mode in ['auto', 'exact', 'asymp']:\n with suppress_warnings() as sup:\n for alternative in ['less', 'greater', 'two-sided']:\n for n in self.get_n():\n x, y, xm, ym = self.generate_xy_sample(n)\n res1 = stats.ks_1samp(x, stats.norm.cdf, alternative=alternative, mode=mode)\n res2 = stats.mstats.ks_1samp(xm, stats.norm.cdf, alternative=alternative, mode=mode)\n assert_equal(np.asarray(res1), np.asarray(res2))\n res3 = stats.ks_1samp(xm, stats.norm.cdf, alternative=alternative, mode=mode)\n assert_equal(np.asarray(res1), np.asarray(res3))\n\n def test_kstest_1samp(self):\n \"\"\"Checks that 1-sample mstats.kstest and stats.kstest agree on masked arrays.\"\"\"\n for mode in ['auto', 'exact', 'asymp']:\n with suppress_warnings() as sup:\n for alternative in ['less', 'greater', 'two-sided']:\n for n in self.get_n():\n x, y, xm, ym = self.generate_xy_sample(n)\n res1 = stats.kstest(x, 'norm', alternative=alternative, mode=mode)\n res2 = stats.mstats.kstest(xm, 'norm', alternative=alternative, mode=mode)\n assert_equal(np.asarray(res1), np.asarray(res2))\n res3 = stats.kstest(xm, 'norm', alternative=alternative, mode=mode)\n assert_equal(np.asarray(res1), np.asarray(res3))\n\n def test_ks_2samp(self):\n \"\"\"Checks that mstats.ks_2samp and stats.ks_2samp agree on masked arrays.\n gh-8431\"\"\"\n for mode in ['auto', 'exact', 'asymp']:\n with suppress_warnings() as sup:\n if mode in ['auto', 'exact']:\n sup.filter(RuntimeWarning,\n \"ks_2samp: Exact calculation unsuccessful. Switching to mode=asymp.\")\n for alternative in ['less', 'greater', 'two-sided']:\n for n in self.get_n():\n x, y, xm, ym = self.generate_xy_sample(n)\n res1 = stats.ks_2samp(x, y, alternative=alternative, mode=mode)\n res2 = stats.mstats.ks_2samp(xm, ym, alternative=alternative, mode=mode)\n assert_equal(np.asarray(res1), np.asarray(res2))\n res3 = stats.ks_2samp(xm, y, alternative=alternative, mode=mode)\n assert_equal(np.asarray(res1), np.asarray(res3))\n\n def test_kstest_2samp(self):\n \"\"\"Checks that 2-sample mstats.kstest and stats.kstest agree on masked arrays.\"\"\"\n for mode in ['auto', 'exact', 'asymp']:\n with suppress_warnings() as sup:\n if mode in ['auto', 'exact']:\n sup.filter(RuntimeWarning,\n \"ks_2samp: Exact calculation unsuccessful. Switching to mode=asymp.\")\n for alternative in ['less', 'greater', 'two-sided']:\n for n in self.get_n():\n x, y, xm, ym = self.generate_xy_sample(n)\n res1 = stats.kstest(x, y, alternative=alternative, mode=mode)\n res2 = stats.mstats.kstest(xm, ym, alternative=alternative, mode=mode)\n assert_equal(np.asarray(res1), np.asarray(res2))\n res3 = stats.kstest(xm, y, alternative=alternative, mode=mode)\n assert_equal(np.asarray(res1), np.asarray(res3))\n\n def test_nametuples_agree(self):\n result = stats.kstest([1, 2], [3, 4])\n assert_(isinstance(result, stats.stats.KstestResult))\n result2 = stats.stats.Ks_2sampResult(result.statistic, result.pvalue)\n assert_(isinstance(result2, stats.stats.Ks_2sampResult))\n assert_equal(result, result2)\n\n\nclass TestBrunnerMunzel:\n # Data from (Lumley, 1996)\n X = np.ma.masked_invalid([1, 2, 1, 1, 1, np.nan, 1, 1,\n 1, 1, 1, 2, 4, 1, 1, np.nan])\n Y = np.ma.masked_invalid([3, 3, 4, 3, np.nan, 1, 2, 3, 1, 1, 5, 4])\n significant = 14\n\n def test_brunnermunzel_one_sided(self):\n # Results are compared with R's lawstat package.\n u1, p1 = mstats.brunnermunzel(self.X, self.Y, alternative='less')\n u2, p2 = mstats.brunnermunzel(self.Y, self.X, alternative='greater')\n u3, p3 = mstats.brunnermunzel(self.X, self.Y, alternative='greater')\n u4, p4 = mstats.brunnermunzel(self.Y, self.X, alternative='less')\n\n assert_almost_equal(p1, p2, decimal=self.significant)\n assert_almost_equal(p3, p4, decimal=self.significant)\n assert_(p1 != p3)\n assert_almost_equal(u1, 3.1374674823029505,\n decimal=self.significant)\n assert_almost_equal(u2, -3.1374674823029505,\n decimal=self.significant)\n assert_almost_equal(u3, 3.1374674823029505,\n decimal=self.significant)\n assert_almost_equal(u4, -3.1374674823029505,\n decimal=self.significant)\n assert_almost_equal(p1, 0.0028931043330757342,\n decimal=self.significant)\n assert_almost_equal(p3, 0.99710689566692423,\n decimal=self.significant)\n\n def test_brunnermunzel_two_sided(self):\n # Results are compared with R's lawstat package.\n u1, p1 = mstats.brunnermunzel(self.X, self.Y, alternative='two-sided')\n u2, p2 = mstats.brunnermunzel(self.Y, self.X, alternative='two-sided')\n\n assert_almost_equal(p1, p2, decimal=self.significant)\n assert_almost_equal(u1, 3.1374674823029505,\n decimal=self.significant)\n assert_almost_equal(u2, -3.1374674823029505,\n decimal=self.significant)\n assert_almost_equal(p1, 0.0057862086661515377,\n decimal=self.significant)\n\n def test_brunnermunzel_default(self):\n # The default value for alternative is two-sided\n u1, p1 = mstats.brunnermunzel(self.X, self.Y)\n u2, p2 = mstats.brunnermunzel(self.Y, self.X)\n\n assert_almost_equal(p1, p2, decimal=self.significant)\n assert_almost_equal(u1, 3.1374674823029505,\n decimal=self.significant)\n assert_almost_equal(u2, -3.1374674823029505,\n decimal=self.significant)\n assert_almost_equal(p1, 0.0057862086661515377,\n decimal=self.significant)\n\n def test_brunnermunzel_alternative_error(self):\n alternative = \"error\"\n distribution = \"t\"\n assert_(alternative not in [\"two-sided\", \"greater\", \"less\"])\n assert_raises(ValueError,\n mstats.brunnermunzel,\n self.X,\n self.Y,\n alternative,\n distribution)\n\n def test_brunnermunzel_distribution_norm(self):\n u1, p1 = mstats.brunnermunzel(self.X, self.Y, distribution=\"normal\")\n u2, p2 = mstats.brunnermunzel(self.Y, self.X, distribution=\"normal\")\n assert_almost_equal(p1, p2, decimal=self.significant)\n assert_almost_equal(u1, 3.1374674823029505,\n decimal=self.significant)\n assert_almost_equal(u2, -3.1374674823029505,\n decimal=self.significant)\n assert_almost_equal(p1, 0.0017041417600383024,\n decimal=self.significant)\n\n def test_brunnermunzel_distribution_error(self):\n alternative = \"two-sided\"\n distribution = \"error\"\n assert_(alternative not in [\"t\", \"normal\"])\n assert_raises(ValueError,\n mstats.brunnermunzel,\n self.X,\n self.Y,\n alternative,\n distribution)\n\n def test_brunnermunzel_empty_imput(self):\n u1, p1 = mstats.brunnermunzel(self.X, [])\n u2, p2 = mstats.brunnermunzel([], self.Y)\n u3, p3 = mstats.brunnermunzel([], [])\n\n assert_(np.isnan(u1))\n assert_(np.isnan(p1))\n assert_(np.isnan(u2))\n assert_(np.isnan(p2))\n assert_(np.isnan(u3))\n assert_(np.isnan(p3))\n", "\"\"\"\nUnit tests for optimization routines from optimize.py\n\nAuthors:\n Ed Schofield, Nov 2005\n Andrew Straw, April 2008\n\nTo run it in its simplest form::\n nosetests test_optimize.py\n\n\"\"\"\nimport itertools\nimport numpy as np\nfrom numpy.testing import (assert_allclose, assert_equal,\n assert_, assert_almost_equal,\n assert_no_warnings, assert_warns,\n assert_array_less, suppress_warnings)\nimport pytest\nfrom pytest import raises as assert_raises\n\nfrom scipy import optimize\nfrom scipy.optimize._minimize import MINIMIZE_METHODS, MINIMIZE_SCALAR_METHODS\nfrom scipy.optimize._linprog import LINPROG_METHODS\nfrom scipy.optimize._root import ROOT_METHODS\nfrom scipy.optimize._root_scalar import ROOT_SCALAR_METHODS\nfrom scipy.optimize._qap import QUADRATIC_ASSIGNMENT_METHODS\nfrom scipy.optimize._differentiable_functions import ScalarFunction\nfrom scipy.optimize.optimize import MemoizeJac, show_options\n\n\ndef test_check_grad():\n # Verify if check_grad is able to estimate the derivative of the\n # logistic function.\n\n def logit(x):\n return 1 / (1 + np.exp(-x))\n\n def der_logit(x):\n return np.exp(-x) / (1 + np.exp(-x))**2\n\n x0 = np.array([1.5])\n\n r = optimize.check_grad(logit, der_logit, x0)\n assert_almost_equal(r, 0)\n\n r = optimize.check_grad(logit, der_logit, x0, epsilon=1e-6)\n assert_almost_equal(r, 0)\n\n # Check if the epsilon parameter is being considered.\n r = abs(optimize.check_grad(logit, der_logit, x0, epsilon=1e-1) - 0)\n assert_(r > 1e-7)\n\n\nclass CheckOptimize:\n \"\"\" Base test case for a simple constrained entropy maximization problem\n (the machine translation example of Berger et al in\n Computational Linguistics, vol 22, num 1, pp 39--72, 1996.)\n \"\"\"\n\n def setup_method(self):\n self.F = np.array([[1, 1, 1],\n [1, 1, 0],\n [1, 0, 1],\n [1, 0, 0],\n [1, 0, 0]])\n self.K = np.array([1., 0.3, 0.5])\n self.startparams = np.zeros(3, np.float64)\n self.solution = np.array([0., -0.524869316, 0.487525860])\n self.maxiter = 1000\n self.funccalls = 0\n self.gradcalls = 0\n self.trace = []\n\n def func(self, x):\n self.funccalls += 1\n if self.funccalls > 6000:\n raise RuntimeError(\"too many iterations in optimization routine\")\n log_pdot = np.dot(self.F, x)\n logZ = np.log(sum(np.exp(log_pdot)))\n f = logZ - np.dot(self.K, x)\n self.trace.append(np.copy(x))\n return f\n\n def grad(self, x):\n self.gradcalls += 1\n log_pdot = np.dot(self.F, x)\n logZ = np.log(sum(np.exp(log_pdot)))\n p = np.exp(log_pdot - logZ)\n return np.dot(self.F.transpose(), p) - self.K\n\n def hess(self, x):\n log_pdot = np.dot(self.F, x)\n logZ = np.log(sum(np.exp(log_pdot)))\n p = np.exp(log_pdot - logZ)\n return np.dot(self.F.T,\n np.dot(np.diag(p), self.F - np.dot(self.F.T, p)))\n\n def hessp(self, x, p):\n return np.dot(self.hess(x), p)\n\n\nclass CheckOptimizeParameterized(CheckOptimize):\n\n def test_cg(self):\n # conjugate gradient optimization routine\n if self.use_wrapper:\n opts = {'maxiter': self.maxiter, 'disp': self.disp,\n 'return_all': False}\n res = optimize.minimize(self.func, self.startparams, args=(),\n method='CG', jac=self.grad,\n options=opts)\n params, fopt, func_calls, grad_calls, warnflag = \\\n res['x'], res['fun'], res['nfev'], res['njev'], res['status']\n else:\n retval = optimize.fmin_cg(self.func, self.startparams,\n self.grad, (), maxiter=self.maxiter,\n full_output=True, disp=self.disp,\n retall=False)\n (params, fopt, func_calls, grad_calls, warnflag) = retval\n\n assert_allclose(self.func(params), self.func(self.solution),\n atol=1e-6)\n\n # Ensure that function call counts are 'known good'; these are from\n # SciPy 0.7.0. Don't allow them to increase.\n assert_(self.funccalls == 9, self.funccalls)\n assert_(self.gradcalls == 7, self.gradcalls)\n\n # Ensure that the function behaves the same; this is from SciPy 0.7.0\n assert_allclose(self.trace[2:4],\n [[0, -0.5, 0.5],\n [0, -5.05700028e-01, 4.95985862e-01]],\n atol=1e-14, rtol=1e-7)\n\n def test_cg_cornercase(self):\n def f(r):\n return 2.5 * (1 - np.exp(-1.5*(r - 0.5)))**2\n\n # Check several initial guesses. (Too far away from the\n # minimum, the function ends up in the flat region of exp.)\n for x0 in np.linspace(-0.75, 3, 71):\n sol = optimize.minimize(f, [x0], method='CG')\n assert_(sol.success)\n assert_allclose(sol.x, [0.5], rtol=1e-5)\n\n def test_bfgs(self):\n # Broyden-Fletcher-Goldfarb-Shanno optimization routine\n if self.use_wrapper:\n opts = {'maxiter': self.maxiter, 'disp': self.disp,\n 'return_all': False}\n res = optimize.minimize(self.func, self.startparams,\n jac=self.grad, method='BFGS', args=(),\n options=opts)\n\n params, fopt, gopt, Hopt, func_calls, grad_calls, warnflag = (\n res['x'], res['fun'], res['jac'], res['hess_inv'],\n res['nfev'], res['njev'], res['status'])\n else:\n retval = optimize.fmin_bfgs(self.func, self.startparams, self.grad,\n args=(), maxiter=self.maxiter,\n full_output=True, disp=self.disp,\n retall=False)\n (params, fopt, gopt, Hopt,\n func_calls, grad_calls, warnflag) = retval\n\n assert_allclose(self.func(params), self.func(self.solution),\n atol=1e-6)\n\n # Ensure that function call counts are 'known good'; these are from\n # SciPy 0.7.0. Don't allow them to increase.\n assert_(self.funccalls == 10, self.funccalls)\n assert_(self.gradcalls == 8, self.gradcalls)\n\n # Ensure that the function behaves the same; this is from SciPy 0.7.0\n assert_allclose(self.trace[6:8],\n [[0, -5.25060743e-01, 4.87748473e-01],\n [0, -5.24885582e-01, 4.87530347e-01]],\n atol=1e-14, rtol=1e-7)\n\n def test_bfgs_infinite(self):\n # Test corner case where -Inf is the minimum. See gh-2019.\n func = lambda x: -np.e**-x\n fprime = lambda x: -func(x)\n x0 = [0]\n with np.errstate(over='ignore'):\n if self.use_wrapper:\n opts = {'disp': self.disp}\n x = optimize.minimize(func, x0, jac=fprime, method='BFGS',\n args=(), options=opts)['x']\n else:\n x = optimize.fmin_bfgs(func, x0, fprime, disp=self.disp)\n assert_(not np.isfinite(func(x)))\n\n def test_powell(self):\n # Powell (direction set) optimization routine\n if self.use_wrapper:\n opts = {'maxiter': self.maxiter, 'disp': self.disp,\n 'return_all': False}\n res = optimize.minimize(self.func, self.startparams, args=(),\n method='Powell', options=opts)\n params, fopt, direc, numiter, func_calls, warnflag = (\n res['x'], res['fun'], res['direc'], res['nit'],\n res['nfev'], res['status'])\n else:\n retval = optimize.fmin_powell(self.func, self.startparams,\n args=(), maxiter=self.maxiter,\n full_output=True, disp=self.disp,\n retall=False)\n (params, fopt, direc, numiter, func_calls, warnflag) = retval\n\n assert_allclose(self.func(params), self.func(self.solution),\n atol=1e-6)\n # params[0] does not affect the objective function\n assert_allclose(params[1:], self.solution[1:], atol=5e-6)\n\n # Ensure that function call counts are 'known good'; these are from\n # SciPy 0.7.0. Don't allow them to increase.\n #\n # However, some leeway must be added: the exact evaluation\n # count is sensitive to numerical error, and floating-point\n # computations are not bit-for-bit reproducible across\n # machines, and when using e.g., MKL, data alignment\n # etc., affect the rounding error.\n #\n assert_(self.funccalls <= 116 + 20, self.funccalls)\n assert_(self.gradcalls == 0, self.gradcalls)\n\n\n @pytest.mark.xfail(reason=\"This part of test_powell fails on some \"\n \"platforms, but the solution returned by powell is \"\n \"still valid.\")\n def test_powell_gh14014(self):\n # This part of test_powell started failing on some CI platforms;\n # see gh-14014. Since the solution is still correct and the comments\n # in test_powell suggest that small differences in the bits are known\n # to change the \"trace\" of the solution, seems safe to xfail to get CI\n # green now and investigate later.\n\n # Powell (direction set) optimization routine\n if self.use_wrapper:\n opts = {'maxiter': self.maxiter, 'disp': self.disp,\n 'return_all': False}\n res = optimize.minimize(self.func, self.startparams, args=(),\n method='Powell', options=opts)\n params, fopt, direc, numiter, func_calls, warnflag = (\n res['x'], res['fun'], res['direc'], res['nit'],\n res['nfev'], res['status'])\n else:\n retval = optimize.fmin_powell(self.func, self.startparams,\n args=(), maxiter=self.maxiter,\n full_output=True, disp=self.disp,\n retall=False)\n (params, fopt, direc, numiter, func_calls, warnflag) = retval\n\n # Ensure that the function behaves the same; this is from SciPy 0.7.0\n assert_allclose(self.trace[34:39],\n [[0.72949016, -0.44156936, 0.47100962],\n [0.72949016, -0.44156936, 0.48052496],\n [1.45898031, -0.88313872, 0.95153458],\n [0.72949016, -0.44156936, 0.47576729],\n [1.72949016, -0.44156936, 0.47576729]],\n atol=1e-14, rtol=1e-7)\n\n def test_powell_bounded(self):\n # Powell (direction set) optimization routine\n # same as test_powell above, but with bounds\n bounds = [(-np.pi, np.pi) for _ in self.startparams]\n if self.use_wrapper:\n opts = {'maxiter': self.maxiter, 'disp': self.disp,\n 'return_all': False}\n res = optimize.minimize(self.func, self.startparams, args=(),\n bounds=bounds,\n method='Powell', options=opts)\n params, fopt, direc, numiter, func_calls, warnflag = (\n res['x'], res['fun'], res['direc'], res['nit'],\n res['nfev'], res['status'])\n\n assert func_calls == self.funccalls\n assert_allclose(self.func(params), self.func(self.solution),\n atol=1e-6)\n\n # Ensure that function call counts are 'known good'.\n # Generally, this takes 131 function calls. However, on some CI\n # checks it finds 138 funccalls. This 20 call leeway was also\n # included in the test_powell function.\n # The exact evaluation count is sensitive to numerical error, and\n # floating-point computations are not bit-for-bit reproducible\n # across machines, and when using e.g. MKL, data alignment etc.\n # affect the rounding error.\n assert self.funccalls <= 131 + 20\n assert self.gradcalls == 0\n\n def test_neldermead(self):\n # Nelder-Mead simplex algorithm\n if self.use_wrapper:\n opts = {'maxiter': self.maxiter, 'disp': self.disp,\n 'return_all': False}\n res = optimize.minimize(self.func, self.startparams, args=(),\n method='Nelder-mead', options=opts)\n params, fopt, numiter, func_calls, warnflag = (\n res['x'], res['fun'], res['nit'], res['nfev'],\n res['status'])\n else:\n retval = optimize.fmin(self.func, self.startparams,\n args=(), maxiter=self.maxiter,\n full_output=True, disp=self.disp,\n retall=False)\n (params, fopt, numiter, func_calls, warnflag) = retval\n\n assert_allclose(self.func(params), self.func(self.solution),\n atol=1e-6)\n\n # Ensure that function call counts are 'known good'; these are from\n # SciPy 0.7.0. Don't allow them to increase.\n assert_(self.funccalls == 167, self.funccalls)\n assert_(self.gradcalls == 0, self.gradcalls)\n\n # Ensure that the function behaves the same; this is from SciPy 0.7.0\n assert_allclose(self.trace[76:78],\n [[0.1928968, -0.62780447, 0.35166118],\n [0.19572515, -0.63648426, 0.35838135]],\n atol=1e-14, rtol=1e-7)\n\n def test_neldermead_initial_simplex(self):\n # Nelder-Mead simplex algorithm\n simplex = np.zeros((4, 3))\n simplex[...] = self.startparams\n for j in range(3):\n simplex[j+1, j] += 0.1\n\n if self.use_wrapper:\n opts = {'maxiter': self.maxiter, 'disp': False,\n 'return_all': True, 'initial_simplex': simplex}\n res = optimize.minimize(self.func, self.startparams, args=(),\n method='Nelder-mead', options=opts)\n params, fopt, numiter, func_calls, warnflag = (res['x'],\n res['fun'],\n res['nit'],\n res['nfev'],\n res['status'])\n assert_allclose(res['allvecs'][0], simplex[0])\n else:\n retval = optimize.fmin(self.func, self.startparams,\n args=(), maxiter=self.maxiter,\n full_output=True, disp=False, retall=False,\n initial_simplex=simplex)\n\n (params, fopt, numiter, func_calls, warnflag) = retval\n\n assert_allclose(self.func(params), self.func(self.solution),\n atol=1e-6)\n\n # Ensure that function call counts are 'known good'; these are from\n # SciPy 0.17.0. Don't allow them to increase.\n assert_(self.funccalls == 100, self.funccalls)\n assert_(self.gradcalls == 0, self.gradcalls)\n\n # Ensure that the function behaves the same; this is from SciPy 0.15.0\n assert_allclose(self.trace[50:52],\n [[0.14687474, -0.5103282, 0.48252111],\n [0.14474003, -0.5282084, 0.48743951]],\n atol=1e-14, rtol=1e-7)\n\n def test_neldermead_initial_simplex_bad(self):\n # Check it fails with a bad simplices\n bad_simplices = []\n\n simplex = np.zeros((3, 2))\n simplex[...] = self.startparams[:2]\n for j in range(2):\n simplex[j+1, j] += 0.1\n bad_simplices.append(simplex)\n\n simplex = np.zeros((3, 3))\n bad_simplices.append(simplex)\n\n for simplex in bad_simplices:\n if self.use_wrapper:\n opts = {'maxiter': self.maxiter, 'disp': False,\n 'return_all': False, 'initial_simplex': simplex}\n assert_raises(ValueError,\n optimize.minimize,\n self.func,\n self.startparams,\n args=(),\n method='Nelder-mead',\n options=opts)\n else:\n assert_raises(ValueError, optimize.fmin,\n self.func, self.startparams,\n args=(), maxiter=self.maxiter,\n full_output=True, disp=False, retall=False,\n initial_simplex=simplex)\n\n def test_ncg_negative_maxiter(self):\n # Regression test for gh-8241\n opts = {'maxiter': -1}\n result = optimize.minimize(self.func, self.startparams,\n method='Newton-CG', jac=self.grad,\n args=(), options=opts)\n assert_(result.status == 1)\n\n def test_ncg(self):\n # line-search Newton conjugate gradient optimization routine\n if self.use_wrapper:\n opts = {'maxiter': self.maxiter, 'disp': self.disp,\n 'return_all': False}\n retval = optimize.minimize(self.func, self.startparams,\n method='Newton-CG', jac=self.grad,\n args=(), options=opts)['x']\n else:\n retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,\n args=(), maxiter=self.maxiter,\n full_output=False, disp=self.disp,\n retall=False)\n\n params = retval\n\n assert_allclose(self.func(params), self.func(self.solution),\n atol=1e-6)\n\n # Ensure that function call counts are 'known good'; these are from\n # SciPy 0.7.0. Don't allow them to increase.\n assert_(self.funccalls == 7, self.funccalls)\n assert_(self.gradcalls <= 22, self.gradcalls) # 0.13.0\n # assert_(self.gradcalls <= 18, self.gradcalls) # 0.9.0\n # assert_(self.gradcalls == 18, self.gradcalls) # 0.8.0\n # assert_(self.gradcalls == 22, self.gradcalls) # 0.7.0\n\n # Ensure that the function behaves the same; this is from SciPy 0.7.0\n assert_allclose(self.trace[3:5],\n [[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],\n [-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],\n atol=1e-6, rtol=1e-7)\n\n def test_ncg_hess(self):\n # Newton conjugate gradient with Hessian\n if self.use_wrapper:\n opts = {'maxiter': self.maxiter, 'disp': self.disp,\n 'return_all': False}\n retval = optimize.minimize(self.func, self.startparams,\n method='Newton-CG', jac=self.grad,\n hess=self.hess,\n args=(), options=opts)['x']\n else:\n retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,\n fhess=self.hess,\n args=(), maxiter=self.maxiter,\n full_output=False, disp=self.disp,\n retall=False)\n\n params = retval\n\n assert_allclose(self.func(params), self.func(self.solution),\n atol=1e-6)\n\n # Ensure that function call counts are 'known good'; these are from\n # SciPy 0.7.0. Don't allow them to increase.\n assert_(self.funccalls <= 7, self.funccalls) # gh10673\n assert_(self.gradcalls <= 18, self.gradcalls) # 0.9.0\n # assert_(self.gradcalls == 18, self.gradcalls) # 0.8.0\n # assert_(self.gradcalls == 22, self.gradcalls) # 0.7.0\n\n # Ensure that the function behaves the same; this is from SciPy 0.7.0\n assert_allclose(self.trace[3:5],\n [[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],\n [-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],\n atol=1e-6, rtol=1e-7)\n\n def test_ncg_hessp(self):\n # Newton conjugate gradient with Hessian times a vector p.\n if self.use_wrapper:\n opts = {'maxiter': self.maxiter, 'disp': self.disp,\n 'return_all': False}\n retval = optimize.minimize(self.func, self.startparams,\n method='Newton-CG', jac=self.grad,\n hessp=self.hessp,\n args=(), options=opts)['x']\n else:\n retval = optimize.fmin_ncg(self.func, self.startparams, self.grad,\n fhess_p=self.hessp,\n args=(), maxiter=self.maxiter,\n full_output=False, disp=self.disp,\n retall=False)\n\n params = retval\n\n assert_allclose(self.func(params), self.func(self.solution),\n atol=1e-6)\n\n # Ensure that function call counts are 'known good'; these are from\n # SciPy 0.7.0. Don't allow them to increase.\n assert_(self.funccalls <= 7, self.funccalls) # gh10673\n assert_(self.gradcalls <= 18, self.gradcalls) # 0.9.0\n # assert_(self.gradcalls == 18, self.gradcalls) # 0.8.0\n # assert_(self.gradcalls == 22, self.gradcalls) # 0.7.0\n\n # Ensure that the function behaves the same; this is from SciPy 0.7.0\n assert_allclose(self.trace[3:5],\n [[-4.35700753e-07, -5.24869435e-01, 4.87527480e-01],\n [-4.35700753e-07, -5.24869401e-01, 4.87527774e-01]],\n atol=1e-6, rtol=1e-7)\n\n\ndef test_obj_func_returns_scalar():\n match = (\"The user-provided \"\n \"objective function must \"\n \"return a scalar value.\")\n with assert_raises(ValueError, match=match):\n optimize.minimize(lambda x: x, np.array([1, 1]), method='BFGS')\n\ndef test_neldermead_xatol_fatol():\n # gh4484\n # test we can call with fatol, xatol specified\n func = lambda x: x[0]**2 + x[1]**2\n\n optimize._minimize._minimize_neldermead(func, [1, 1], maxiter=2,\n xatol=1e-3, fatol=1e-3)\n assert_warns(DeprecationWarning,\n optimize._minimize._minimize_neldermead,\n func, [1, 1], xtol=1e-3, ftol=1e-3, maxiter=2)\n\n\ndef test_neldermead_adaptive():\n func = lambda x: np.sum(x**2)\n p0 = [0.15746215, 0.48087031, 0.44519198, 0.4223638, 0.61505159,\n 0.32308456, 0.9692297, 0.4471682, 0.77411992, 0.80441652,\n 0.35994957, 0.75487856, 0.99973421, 0.65063887, 0.09626474]\n\n res = optimize.minimize(func, p0, method='Nelder-Mead')\n assert_equal(res.success, False)\n\n res = optimize.minimize(func, p0, method='Nelder-Mead',\n options={'adaptive': True})\n assert_equal(res.success, True)\n\n\ndef test_bounded_powell_outsidebounds():\n # With the bounded Powell method if you start outside the bounds the final\n # should still be within the bounds (provided that the user doesn't make a\n # bad choice for the `direc` argument).\n func = lambda x: np.sum(x**2)\n bounds = (-1, 1), (-1, 1), (-1, 1)\n x0 = [-4, .5, -.8]\n\n # we're starting outside the bounds, so we should get a warning\n with assert_warns(optimize.OptimizeWarning):\n res = optimize.minimize(func, x0, bounds=bounds, method=\"Powell\")\n assert_allclose(res.x, np.array([0.] * len(x0)), atol=1e-6)\n assert_equal(res.success, True)\n assert_equal(res.status, 0)\n\n # However, now if we change the `direc` argument such that the\n # set of vectors does not span the parameter space, then we may\n # not end up back within the bounds. Here we see that the first\n # parameter cannot be updated!\n direc = [[0, 0, 0], [0, 1, 0], [0, 0, 1]]\n # we're starting outside the bounds, so we should get a warning\n with assert_warns(optimize.OptimizeWarning):\n res = optimize.minimize(func, x0,\n bounds=bounds, method=\"Powell\",\n options={'direc': direc})\n assert_allclose(res.x, np.array([-4., 0, 0]), atol=1e-6)\n assert_equal(res.success, False)\n assert_equal(res.status, 4)\n\n\ndef test_bounded_powell_vs_powell():\n # here we test an example where the bounded Powell method\n # will return a different result than the standard Powell\n # method.\n\n # first we test a simple example where the minimum is at\n # the origin and the minimum that is within the bounds is\n # larger than the minimum at the origin.\n func = lambda x: np.sum(x**2)\n bounds = (-5, -1), (-10, -0.1), (1, 9.2), (-4, 7.6), (-15.9, -2)\n x0 = [-2.1, -5.2, 1.9, 0, -2]\n\n options = {'ftol': 1e-10, 'xtol': 1e-10}\n\n res_powell = optimize.minimize(func, x0, method=\"Powell\", options=options)\n assert_allclose(res_powell.x, 0., atol=1e-6)\n assert_allclose(res_powell.fun, 0., atol=1e-6)\n\n res_bounded_powell = optimize.minimize(func, x0, options=options,\n bounds=bounds,\n method=\"Powell\")\n p = np.array([-1, -0.1, 1, 0, -2])\n assert_allclose(res_bounded_powell.x, p, atol=1e-6)\n assert_allclose(res_bounded_powell.fun, func(p), atol=1e-6)\n\n # now we test bounded Powell but with a mix of inf bounds.\n bounds = (None, -1), (-np.inf, -.1), (1, np.inf), (-4, None), (-15.9, -2)\n res_bounded_powell = optimize.minimize(func, x0, options=options,\n bounds=bounds,\n method=\"Powell\")\n p = np.array([-1, -0.1, 1, 0, -2])\n assert_allclose(res_bounded_powell.x, p, atol=1e-6)\n assert_allclose(res_bounded_powell.fun, func(p), atol=1e-6)\n\n # next we test an example where the global minimum is within\n # the bounds, but the bounded Powell method performs better\n # than the standard Powell method.\n def func(x):\n t = np.sin(-x[0]) * np.cos(x[1]) * np.sin(-x[0] * x[1]) * np.cos(x[1])\n t -= np.cos(np.sin(x[1] * x[2]) * np.cos(x[2]))\n return t**2\n\n bounds = [(-2, 5)] * 3\n x0 = [-0.5, -0.5, -0.5]\n\n res_powell = optimize.minimize(func, x0, method=\"Powell\")\n res_bounded_powell = optimize.minimize(func, x0,\n bounds=bounds,\n method=\"Powell\")\n assert_allclose(res_powell.fun, 0.007136253919761627, atol=1e-6)\n assert_allclose(res_bounded_powell.fun, 0, atol=1e-6)\n\n # next we test the previous example where the we provide Powell\n # with (-inf, inf) bounds, and compare it to providing Powell\n # with no bounds. They should end up the same.\n bounds = [(-np.inf, np.inf)] * 3\n\n res_bounded_powell = optimize.minimize(func, x0,\n bounds=bounds,\n method=\"Powell\")\n assert_allclose(res_powell.fun, res_bounded_powell.fun, atol=1e-6)\n assert_allclose(res_powell.nfev, res_bounded_powell.nfev, atol=1e-6)\n assert_allclose(res_powell.x, res_bounded_powell.x, atol=1e-6)\n\n # now test when x0 starts outside of the bounds.\n x0 = [45.46254415, -26.52351498, 31.74830248]\n bounds = [(-2, 5)] * 3\n # we're starting outside the bounds, so we should get a warning\n with assert_warns(optimize.OptimizeWarning):\n res_bounded_powell = optimize.minimize(func, x0,\n bounds=bounds,\n method=\"Powell\")\n assert_allclose(res_bounded_powell.fun, 0, atol=1e-6)\n\n\ndef test_onesided_bounded_powell_stability():\n # When the Powell method is bounded on only one side, a\n # np.tan transform is done in order to convert it into a\n # completely bounded problem. Here we do some simple tests\n # of one-sided bounded Powell where the optimal solutions\n # are large to test the stability of the transformation.\n kwargs = {'method': 'Powell',\n 'bounds': [(-np.inf, 1e6)] * 3,\n 'options': {'ftol': 1e-8, 'xtol': 1e-8}}\n x0 = [1, 1, 1]\n\n # df/dx is constant.\n f = lambda x: -np.sum(x)\n res = optimize.minimize(f, x0, **kwargs)\n assert_allclose(res.fun, -3e6, atol=1e-4)\n\n # df/dx gets smaller and smaller.\n def f(x):\n return -np.abs(np.sum(x)) ** (0.1) * (1 if np.all(x > 0) else -1)\n\n res = optimize.minimize(f, x0, **kwargs)\n assert_allclose(res.fun, -(3e6) ** (0.1))\n\n # df/dx gets larger and larger.\n def f(x):\n return -np.abs(np.sum(x)) ** 10 * (1 if np.all(x > 0) else -1)\n\n res = optimize.minimize(f, x0, **kwargs)\n assert_allclose(res.fun, -(3e6) ** 10, rtol=1e-7)\n\n # df/dx gets larger for some of the variables and smaller for others.\n def f(x):\n t = -np.abs(np.sum(x[:2])) ** 5 - np.abs(np.sum(x[2:])) ** (0.1)\n t *= (1 if np.all(x > 0) else -1)\n return t\n\n kwargs['bounds'] = [(-np.inf, 1e3)] * 3\n res = optimize.minimize(f, x0, **kwargs)\n assert_allclose(res.fun, -(2e3) ** 5 - (1e6) ** (0.1), rtol=1e-7)\n\n\nclass TestOptimizeWrapperDisp(CheckOptimizeParameterized):\n use_wrapper = True\n disp = True\n\n\nclass TestOptimizeWrapperNoDisp(CheckOptimizeParameterized):\n use_wrapper = True\n disp = False\n\n\nclass TestOptimizeNoWrapperDisp(CheckOptimizeParameterized):\n use_wrapper = False\n disp = True\n\n\nclass TestOptimizeNoWrapperNoDisp(CheckOptimizeParameterized):\n use_wrapper = False\n disp = False\n\n\nclass TestOptimizeSimple(CheckOptimize):\n\n def test_bfgs_nan(self):\n # Test corner case where nan is fed to optimizer. See gh-2067.\n func = lambda x: x\n fprime = lambda x: np.ones_like(x)\n x0 = [np.nan]\n with np.errstate(over='ignore', invalid='ignore'):\n x = optimize.fmin_bfgs(func, x0, fprime, disp=False)\n assert_(np.isnan(func(x)))\n\n def test_bfgs_nan_return(self):\n # Test corner cases where fun returns NaN. See gh-4793.\n\n # First case: NaN from first call.\n func = lambda x: np.nan\n with np.errstate(invalid='ignore'):\n result = optimize.minimize(func, 0)\n\n assert_(np.isnan(result['fun']))\n assert_(result['success'] is False)\n\n # Second case: NaN from second call.\n func = lambda x: 0 if x == 0 else np.nan\n fprime = lambda x: np.ones_like(x) # Steer away from zero.\n with np.errstate(invalid='ignore'):\n result = optimize.minimize(func, 0, jac=fprime)\n\n assert_(np.isnan(result['fun']))\n assert_(result['success'] is False)\n\n def test_bfgs_numerical_jacobian(self):\n # BFGS with numerical Jacobian and a vector epsilon parameter.\n # define the epsilon parameter using a random vector\n epsilon = np.sqrt(np.spacing(1.)) * np.random.rand(len(self.solution))\n\n params = optimize.fmin_bfgs(self.func, self.startparams,\n epsilon=epsilon, args=(),\n maxiter=self.maxiter, disp=False)\n\n assert_allclose(self.func(params), self.func(self.solution),\n atol=1e-6)\n\n def test_finite_differences(self):\n methods = ['BFGS', 'CG', 'TNC']\n jacs = ['2-point', '3-point', None]\n for method, jac in itertools.product(methods, jacs):\n result = optimize.minimize(self.func, self.startparams,\n method=method, jac=jac)\n assert_allclose(self.func(result.x), self.func(self.solution),\n atol=1e-6)\n\n def test_bfgs_gh_2169(self):\n def f(x):\n if x < 0:\n return 1.79769313e+308\n else:\n return x + 1./x\n xs = optimize.fmin_bfgs(f, [10.], disp=False)\n assert_allclose(xs, 1.0, rtol=1e-4, atol=1e-4)\n\n def test_bfgs_double_evaluations(self):\n # check BFGS does not evaluate twice in a row at same point\n def f(x):\n xp = float(x)\n assert xp not in seen\n seen.add(xp)\n return 10*x**2, 20*x\n\n seen = set()\n optimize.minimize(f, -100, method='bfgs', jac=True, tol=1e-7)\n\n def test_l_bfgs_b(self):\n # limited-memory bound-constrained BFGS algorithm\n retval = optimize.fmin_l_bfgs_b(self.func, self.startparams,\n self.grad, args=(),\n maxiter=self.maxiter)\n\n (params, fopt, d) = retval\n\n assert_allclose(self.func(params), self.func(self.solution),\n atol=1e-6)\n\n # Ensure that function call counts are 'known good'; these are from\n # SciPy 0.7.0. Don't allow them to increase.\n assert_(self.funccalls == 7, self.funccalls)\n assert_(self.gradcalls == 5, self.gradcalls)\n\n # Ensure that the function behaves the same; this is from SciPy 0.7.0\n # test fixed in gh10673\n assert_allclose(self.trace[3:5],\n [[8.117083e-16, -5.196198e-01, 4.897617e-01],\n [0., -0.52489628, 0.48753042]],\n atol=1e-14, rtol=1e-7)\n\n def test_l_bfgs_b_numjac(self):\n # L-BFGS-B with numerical Jacobian\n retval = optimize.fmin_l_bfgs_b(self.func, self.startparams,\n approx_grad=True,\n maxiter=self.maxiter)\n\n (params, fopt, d) = retval\n\n assert_allclose(self.func(params), self.func(self.solution),\n atol=1e-6)\n\n def test_l_bfgs_b_funjac(self):\n # L-BFGS-B with combined objective function and Jacobian\n def fun(x):\n return self.func(x), self.grad(x)\n\n retval = optimize.fmin_l_bfgs_b(fun, self.startparams,\n maxiter=self.maxiter)\n\n (params, fopt, d) = retval\n\n assert_allclose(self.func(params), self.func(self.solution),\n atol=1e-6)\n\n def test_l_bfgs_b_maxiter(self):\n # gh7854\n # Ensure that not more than maxiters are ever run.\n class Callback:\n def __init__(self):\n self.nit = 0\n self.fun = None\n self.x = None\n\n def __call__(self, x):\n self.x = x\n self.fun = optimize.rosen(x)\n self.nit += 1\n\n c = Callback()\n res = optimize.minimize(optimize.rosen, [0., 0.], method='l-bfgs-b',\n callback=c, options={'maxiter': 5})\n\n assert_equal(res.nit, 5)\n assert_almost_equal(res.x, c.x)\n assert_almost_equal(res.fun, c.fun)\n assert_equal(res.status, 1)\n assert_(res.success is False)\n assert_equal(res.message,\n 'STOP: TOTAL NO. of ITERATIONS REACHED LIMIT')\n\n def test_minimize_l_bfgs_b(self):\n # Minimize with L-BFGS-B method\n opts = {'disp': False, 'maxiter': self.maxiter}\n r = optimize.minimize(self.func, self.startparams,\n method='L-BFGS-B', jac=self.grad,\n options=opts)\n assert_allclose(self.func(r.x), self.func(self.solution),\n atol=1e-6)\n assert self.gradcalls == r.njev\n\n self.funccalls = self.gradcalls = 0\n # approximate jacobian\n ra = optimize.minimize(self.func, self.startparams,\n method='L-BFGS-B', options=opts)\n # check that function evaluations in approximate jacobian are counted\n # assert_(ra.nfev > r.nfev)\n assert self.funccalls == ra.nfev\n assert_allclose(self.func(ra.x), self.func(self.solution),\n atol=1e-6)\n\n self.funccalls = self.gradcalls = 0\n # approximate jacobian\n ra = optimize.minimize(self.func, self.startparams, jac='3-point',\n method='L-BFGS-B', options=opts)\n assert self.funccalls == ra.nfev\n assert_allclose(self.func(ra.x), self.func(self.solution),\n atol=1e-6)\n\n def test_minimize_l_bfgs_b_ftol(self):\n # Check that the `ftol` parameter in l_bfgs_b works as expected\n v0 = None\n for tol in [1e-1, 1e-4, 1e-7, 1e-10]:\n opts = {'disp': False, 'maxiter': self.maxiter, 'ftol': tol}\n sol = optimize.minimize(self.func, self.startparams,\n method='L-BFGS-B', jac=self.grad,\n options=opts)\n v = self.func(sol.x)\n\n if v0 is None:\n v0 = v\n else:\n assert_(v < v0)\n\n assert_allclose(v, self.func(self.solution), rtol=tol)\n\n def test_minimize_l_bfgs_maxls(self):\n # check that the maxls is passed down to the Fortran routine\n sol = optimize.minimize(optimize.rosen, np.array([-1.2, 1.0]),\n method='L-BFGS-B', jac=optimize.rosen_der,\n options={'disp': False, 'maxls': 1})\n assert_(not sol.success)\n\n def test_minimize_l_bfgs_b_maxfun_interruption(self):\n # gh-6162\n f = optimize.rosen\n g = optimize.rosen_der\n values = []\n x0 = np.full(7, 1000)\n\n def objfun(x):\n value = f(x)\n values.append(value)\n return value\n\n # Look for an interesting test case.\n # Request a maxfun that stops at a particularly bad function\n # evaluation somewhere between 100 and 300 evaluations.\n low, medium, high = 30, 100, 300\n optimize.fmin_l_bfgs_b(objfun, x0, fprime=g, maxfun=high)\n v, k = max((y, i) for i, y in enumerate(values[medium:]))\n maxfun = medium + k\n # If the minimization strategy is reasonable,\n # the minimize() result should not be worse than the best\n # of the first 30 function evaluations.\n target = min(values[:low])\n xmin, fmin, d = optimize.fmin_l_bfgs_b(f, x0, fprime=g, maxfun=maxfun)\n assert_array_less(fmin, target)\n\n def test_custom(self):\n # This function comes from the documentation example.\n def custmin(fun, x0, args=(), maxfev=None, stepsize=0.1,\n maxiter=100, callback=None, **options):\n bestx = x0\n besty = fun(x0)\n funcalls = 1\n niter = 0\n improved = True\n stop = False\n\n while improved and not stop and niter < maxiter:\n improved = False\n niter += 1\n for dim in range(np.size(x0)):\n for s in [bestx[dim] - stepsize, bestx[dim] + stepsize]:\n testx = np.copy(bestx)\n testx[dim] = s\n testy = fun(testx, *args)\n funcalls += 1\n if testy < besty:\n besty = testy\n bestx = testx\n improved = True\n if callback is not None:\n callback(bestx)\n if maxfev is not None and funcalls >= maxfev:\n stop = True\n break\n\n return optimize.OptimizeResult(fun=besty, x=bestx, nit=niter,\n nfev=funcalls, success=(niter > 1))\n\n x0 = [1.35, 0.9, 0.8, 1.1, 1.2]\n res = optimize.minimize(optimize.rosen, x0, method=custmin,\n options=dict(stepsize=0.05))\n assert_allclose(res.x, 1.0, rtol=1e-4, atol=1e-4)\n\n def test_gh10771(self):\n # check that minimize passes bounds and constraints to a custom\n # minimizer without altering them.\n bounds = [(-2, 2), (0, 3)]\n constraints = 'constraints'\n\n def custmin(fun, x0, **options):\n assert options['bounds'] is bounds\n assert options['constraints'] is constraints\n return optimize.OptimizeResult()\n\n x0 = [1, 1]\n optimize.minimize(optimize.rosen, x0, method=custmin,\n bounds=bounds, constraints=constraints)\n\n def test_minimize_tol_parameter(self):\n # Check that the minimize() tol= argument does something\n def func(z):\n x, y = z\n return x**2*y**2 + x**4 + 1\n\n def dfunc(z):\n x, y = z\n return np.array([2*x*y**2 + 4*x**3, 2*x**2*y])\n\n for method in ['nelder-mead', 'powell', 'cg', 'bfgs',\n 'newton-cg', 'l-bfgs-b', 'tnc',\n 'cobyla', 'slsqp']:\n if method in ('nelder-mead', 'powell', 'cobyla'):\n jac = None\n else:\n jac = dfunc\n\n sol1 = optimize.minimize(func, [1, 1], jac=jac, tol=1e-10,\n method=method)\n sol2 = optimize.minimize(func, [1, 1], jac=jac, tol=1.0,\n method=method)\n assert_(func(sol1.x) < func(sol2.x),\n \"%s: %s vs. %s\" % (method, func(sol1.x), func(sol2.x)))\n\n @pytest.mark.parametrize('method',\n ['fmin', 'fmin_powell', 'fmin_cg', 'fmin_bfgs',\n 'fmin_ncg', 'fmin_l_bfgs_b', 'fmin_tnc',\n 'fmin_slsqp'] + MINIMIZE_METHODS)\n def test_minimize_callback_copies_array(self, method):\n # Check that arrays passed to callbacks are not modified\n # inplace by the optimizer afterward\n\n # cobyla doesn't have callback\n if method == 'cobyla':\n return\n\n if method in ('fmin_tnc', 'fmin_l_bfgs_b'):\n func = lambda x: (optimize.rosen(x), optimize.rosen_der(x))\n else:\n func = optimize.rosen\n jac = optimize.rosen_der\n hess = optimize.rosen_hess\n\n x0 = np.zeros(10)\n\n # Set options\n kwargs = {}\n if method.startswith('fmin'):\n routine = getattr(optimize, method)\n if method == 'fmin_slsqp':\n kwargs['iter'] = 5\n elif method == 'fmin_tnc':\n kwargs['maxfun'] = 100\n else:\n kwargs['maxiter'] = 5\n else:\n def routine(*a, **kw):\n kw['method'] = method\n return optimize.minimize(*a, **kw)\n\n if method == 'tnc':\n kwargs['options'] = dict(maxfun=100)\n else:\n kwargs['options'] = dict(maxiter=5)\n\n if method in ('fmin_ncg',):\n kwargs['fprime'] = jac\n elif method in ('newton-cg',):\n kwargs['jac'] = jac\n elif method in ('trust-krylov', 'trust-exact', 'trust-ncg', 'dogleg',\n 'trust-constr'):\n kwargs['jac'] = jac\n kwargs['hess'] = hess\n\n # Run with callback\n results = []\n\n def callback(x, *args, **kwargs):\n results.append((x, np.copy(x)))\n\n routine(func, x0, callback=callback, **kwargs)\n\n # Check returned arrays coincide with their copies\n # and have no memory overlap\n assert_(len(results) > 2)\n assert_(all(np.all(x == y) for x, y in results))\n assert_(not any(np.may_share_memory(x[0], y[0])\n for x, y in itertools.combinations(results, 2)))\n\n @pytest.mark.parametrize('method', ['nelder-mead', 'powell', 'cg',\n 'bfgs', 'newton-cg', 'l-bfgs-b',\n 'tnc', 'cobyla', 'slsqp'])\n def test_no_increase(self, method):\n # Check that the solver doesn't return a value worse than the\n # initial point.\n\n def func(x):\n return (x - 1)**2\n\n def bad_grad(x):\n # purposefully invalid gradient function, simulates a case\n # where line searches start failing\n return 2*(x - 1) * (-1) - 2\n\n x0 = np.array([2.0])\n f0 = func(x0)\n jac = bad_grad\n if method in ['nelder-mead', 'powell', 'cobyla']:\n jac = None\n sol = optimize.minimize(func, x0, jac=jac, method=method,\n options=dict(maxiter=20))\n assert_equal(func(sol.x), sol.fun)\n\n if method == 'slsqp':\n pytest.xfail(\"SLSQP returns slightly worse\")\n assert_(func(sol.x) <= f0)\n\n def test_slsqp_respect_bounds(self):\n # Regression test for gh-3108\n def f(x):\n return sum((x - np.array([1., 2., 3., 4.]))**2)\n\n def cons(x):\n a = np.array([[-1, -1, -1, -1], [-3, -3, -2, -1]])\n return np.concatenate([np.dot(a, x) + np.array([5, 10]), x])\n\n x0 = np.array([0.5, 1., 1.5, 2.])\n res = optimize.minimize(f, x0, method='slsqp',\n constraints={'type': 'ineq', 'fun': cons})\n assert_allclose(res.x, np.array([0., 2, 5, 8])/3, atol=1e-12)\n\n @pytest.mark.parametrize('method', ['Nelder-Mead', 'Powell', 'CG', 'BFGS',\n 'Newton-CG', 'L-BFGS-B', 'SLSQP',\n 'trust-constr', 'dogleg', 'trust-ncg',\n 'trust-exact', 'trust-krylov'])\n def test_respect_maxiter(self, method):\n # Check that the number of iterations equals max_iter, assuming\n # convergence doesn't establish before\n MAXITER = 4\n\n x0 = np.zeros(10)\n\n sf = ScalarFunction(optimize.rosen, x0, (), optimize.rosen_der,\n optimize.rosen_hess, None, None)\n\n # Set options\n kwargs = {'method': method, 'options': dict(maxiter=MAXITER)}\n\n if method in ('Newton-CG',):\n kwargs['jac'] = sf.grad\n elif method in ('trust-krylov', 'trust-exact', 'trust-ncg', 'dogleg',\n 'trust-constr'):\n kwargs['jac'] = sf.grad\n kwargs['hess'] = sf.hess\n\n sol = optimize.minimize(sf.fun, x0, **kwargs)\n assert sol.nit == MAXITER\n assert sol.nfev >= sf.nfev\n if hasattr(sol, 'njev'):\n assert sol.njev >= sf.ngev\n\n # method specific tests\n if method == 'SLSQP':\n assert sol.status == 9 # Iteration limit reached\n\n def test_respect_maxiter_trust_constr_ineq_constraints(self):\n # special case of minimization with trust-constr and inequality\n # constraints to check maxiter limit is obeyed when using internal\n # method 'tr_interior_point'\n MAXITER = 4\n f = optimize.rosen\n jac = optimize.rosen_der\n hess = optimize.rosen_hess\n\n fun = lambda x: np.array([0.2 * x[0] - 0.4 * x[1] - 0.33 * x[2]])\n cons = ({'type': 'ineq',\n 'fun': fun},)\n\n x0 = np.zeros(10)\n sol = optimize.minimize(f, x0, constraints=cons, jac=jac, hess=hess,\n method='trust-constr',\n options=dict(maxiter=MAXITER))\n assert sol.nit == MAXITER\n\n def test_minimize_automethod(self):\n def f(x):\n return x**2\n\n def cons(x):\n return x - 2\n\n x0 = np.array([10.])\n sol_0 = optimize.minimize(f, x0)\n sol_1 = optimize.minimize(f, x0, constraints=[{'type': 'ineq',\n 'fun': cons}])\n sol_2 = optimize.minimize(f, x0, bounds=[(5, 10)])\n sol_3 = optimize.minimize(f, x0,\n constraints=[{'type': 'ineq', 'fun': cons}],\n bounds=[(5, 10)])\n sol_4 = optimize.minimize(f, x0,\n constraints=[{'type': 'ineq', 'fun': cons}],\n bounds=[(1, 10)])\n for sol in [sol_0, sol_1, sol_2, sol_3, sol_4]:\n assert_(sol.success)\n assert_allclose(sol_0.x, 0, atol=1e-7)\n assert_allclose(sol_1.x, 2, atol=1e-7)\n assert_allclose(sol_2.x, 5, atol=1e-7)\n assert_allclose(sol_3.x, 5, atol=1e-7)\n assert_allclose(sol_4.x, 2, atol=1e-7)\n\n def test_minimize_coerce_args_param(self):\n # Regression test for gh-3503\n def Y(x, c):\n return np.sum((x-c)**2)\n\n def dY_dx(x, c=None):\n return 2*(x-c)\n\n c = np.array([3, 1, 4, 1, 5, 9, 2, 6, 5, 3, 5])\n xinit = np.random.randn(len(c))\n optimize.minimize(Y, xinit, jac=dY_dx, args=(c), method=\"BFGS\")\n\n def test_initial_step_scaling(self):\n # Check that optimizer initial step is not huge even if the\n # function and gradients are\n\n scales = [1e-50, 1, 1e50]\n methods = ['CG', 'BFGS', 'L-BFGS-B', 'Newton-CG']\n\n def f(x):\n if first_step_size[0] is None and x[0] != x0[0]:\n first_step_size[0] = abs(x[0] - x0[0])\n if abs(x).max() > 1e4:\n raise AssertionError(\"Optimization stepped far away!\")\n return scale*(x[0] - 1)**2\n\n def g(x):\n return np.array([scale*(x[0] - 1)])\n\n for scale, method in itertools.product(scales, methods):\n if method in ('CG', 'BFGS'):\n options = dict(gtol=scale*1e-8)\n else:\n options = dict()\n\n if scale < 1e-10 and method in ('L-BFGS-B', 'Newton-CG'):\n # XXX: return initial point if they see small gradient\n continue\n\n x0 = [-1.0]\n first_step_size = [None]\n res = optimize.minimize(f, x0, jac=g, method=method,\n options=options)\n\n err_msg = \"{0} {1}: {2}: {3}\".format(method, scale,\n first_step_size,\n res)\n\n assert_(res.success, err_msg)\n assert_allclose(res.x, [1.0], err_msg=err_msg)\n assert_(res.nit <= 3, err_msg)\n\n if scale > 1e-10:\n if method in ('CG', 'BFGS'):\n assert_allclose(first_step_size[0], 1.01, err_msg=err_msg)\n else:\n # Newton-CG and L-BFGS-B use different logic for the first\n # step, but are both scaling invariant with step sizes ~ 1\n assert_(first_step_size[0] > 0.5 and\n first_step_size[0] < 3, err_msg)\n else:\n # step size has upper bound of ||grad||, so line\n # search makes many small steps\n pass\n\n @pytest.mark.parametrize('method', ['nelder-mead', 'powell', 'cg', 'bfgs',\n 'newton-cg', 'l-bfgs-b', 'tnc',\n 'cobyla', 'slsqp', 'trust-constr',\n 'dogleg', 'trust-ncg', 'trust-exact',\n 'trust-krylov'])\n def test_nan_values(self, method):\n # Check nan values result to failed exit status\n np.random.seed(1234)\n\n count = [0]\n\n def func(x):\n return np.nan\n\n def func2(x):\n count[0] += 1\n if count[0] > 2:\n return np.nan\n else:\n return np.random.rand()\n\n def grad(x):\n return np.array([1.0])\n\n def hess(x):\n return np.array([[1.0]])\n\n x0 = np.array([1.0])\n\n needs_grad = method in ('newton-cg', 'trust-krylov', 'trust-exact',\n 'trust-ncg', 'dogleg')\n needs_hess = method in ('trust-krylov', 'trust-exact', 'trust-ncg',\n 'dogleg')\n\n funcs = [func, func2]\n grads = [grad] if needs_grad else [grad, None]\n hesss = [hess] if needs_hess else [hess, None]\n\n with np.errstate(invalid='ignore'), suppress_warnings() as sup:\n sup.filter(UserWarning, \"delta_grad == 0.*\")\n sup.filter(RuntimeWarning, \".*does not use Hessian.*\")\n sup.filter(RuntimeWarning, \".*does not use gradient.*\")\n\n for f, g, h in itertools.product(funcs, grads, hesss):\n count = [0]\n sol = optimize.minimize(f, x0, jac=g, hess=h, method=method,\n options=dict(maxiter=20))\n assert_equal(sol.success, False)\n\n @pytest.mark.parametrize('method', ['nelder-mead', 'cg', 'bfgs',\n 'l-bfgs-b', 'tnc',\n 'cobyla', 'slsqp', 'trust-constr',\n 'dogleg', 'trust-ncg', 'trust-exact',\n 'trust-krylov'])\n def test_duplicate_evaluations(self, method):\n # check that there are no duplicate evaluations for any methods\n jac = hess = None\n if method in ('newton-cg', 'trust-krylov', 'trust-exact',\n 'trust-ncg', 'dogleg'):\n jac = self.grad\n if method in ('trust-krylov', 'trust-exact', 'trust-ncg',\n 'dogleg'):\n hess = self.hess\n\n with np.errstate(invalid='ignore'), suppress_warnings() as sup:\n # for trust-constr\n sup.filter(UserWarning, \"delta_grad == 0.*\")\n optimize.minimize(self.func, self.startparams,\n method=method, jac=jac, hess=hess)\n\n for i in range(1, len(self.trace)):\n if np.array_equal(self.trace[i - 1], self.trace[i]):\n raise RuntimeError(\n \"Duplicate evaluations made by {}\".format(method))\n\n\nclass TestLBFGSBBounds:\n def setup_method(self):\n self.bounds = ((1, None), (None, None))\n self.solution = (1, 0)\n\n def fun(self, x, p=2.0):\n return 1.0 / p * (x[0]**p + x[1]**p)\n\n def jac(self, x, p=2.0):\n return x**(p - 1)\n\n def fj(self, x, p=2.0):\n return self.fun(x, p), self.jac(x, p)\n\n def test_l_bfgs_b_bounds(self):\n x, f, d = optimize.fmin_l_bfgs_b(self.fun, [0, -1],\n fprime=self.jac,\n bounds=self.bounds)\n assert_(d['warnflag'] == 0, d['task'])\n assert_allclose(x, self.solution, atol=1e-6)\n\n def test_l_bfgs_b_funjac(self):\n # L-BFGS-B with fun and jac combined and extra arguments\n x, f, d = optimize.fmin_l_bfgs_b(self.fj, [0, -1], args=(2.0, ),\n bounds=self.bounds)\n assert_(d['warnflag'] == 0, d['task'])\n assert_allclose(x, self.solution, atol=1e-6)\n\n def test_minimize_l_bfgs_b_bounds(self):\n # Minimize with method='L-BFGS-B' with bounds\n res = optimize.minimize(self.fun, [0, -1], method='L-BFGS-B',\n jac=self.jac, bounds=self.bounds)\n assert_(res['success'], res['message'])\n assert_allclose(res.x, self.solution, atol=1e-6)\n\n @pytest.mark.parametrize('bounds', [\n ([(10, 1), (1, 10)]),\n ([(1, 10), (10, 1)]),\n ([(10, 1), (10, 1)])\n ])\n def test_minimize_l_bfgs_b_incorrect_bounds(self, bounds):\n with pytest.raises(ValueError, match='.*bounds.*'):\n optimize.minimize(self.fun, [0, -1], method='L-BFGS-B',\n jac=self.jac, bounds=bounds)\n\n def test_minimize_l_bfgs_b_bounds_FD(self):\n # test that initial starting value outside bounds doesn't raise\n # an error (done with clipping).\n # test all different finite differences combos, with and without args\n\n jacs = ['2-point', '3-point', None]\n argss = [(2.,), ()]\n for jac, args in itertools.product(jacs, argss):\n res = optimize.minimize(self.fun, [0, -1], args=args,\n method='L-BFGS-B',\n jac=jac, bounds=self.bounds,\n options={'finite_diff_rel_step': None})\n assert_(res['success'], res['message'])\n assert_allclose(res.x, self.solution, atol=1e-6)\n\n\nclass TestOptimizeScalar:\n def setup_method(self):\n self.solution = 1.5\n\n def fun(self, x, a=1.5):\n \"\"\"Objective function\"\"\"\n return (x - a)**2 - 0.8\n\n def test_brent(self):\n x = optimize.brent(self.fun)\n assert_allclose(x, self.solution, atol=1e-6)\n\n x = optimize.brent(self.fun, brack=(-3, -2))\n assert_allclose(x, self.solution, atol=1e-6)\n\n x = optimize.brent(self.fun, full_output=True)\n assert_allclose(x[0], self.solution, atol=1e-6)\n\n x = optimize.brent(self.fun, brack=(-15, -1, 15))\n assert_allclose(x, self.solution, atol=1e-6)\n\n def test_golden(self):\n x = optimize.golden(self.fun)\n assert_allclose(x, self.solution, atol=1e-6)\n\n x = optimize.golden(self.fun, brack=(-3, -2))\n assert_allclose(x, self.solution, atol=1e-6)\n\n x = optimize.golden(self.fun, full_output=True)\n assert_allclose(x[0], self.solution, atol=1e-6)\n\n x = optimize.golden(self.fun, brack=(-15, -1, 15))\n assert_allclose(x, self.solution, atol=1e-6)\n\n x = optimize.golden(self.fun, tol=0)\n assert_allclose(x, self.solution)\n\n maxiter_test_cases = [0, 1, 5]\n for maxiter in maxiter_test_cases:\n x0 = optimize.golden(self.fun, maxiter=0, full_output=True)\n x = optimize.golden(self.fun, maxiter=maxiter, full_output=True)\n nfev0, nfev = x0[2], x[2]\n assert_equal(nfev - nfev0, maxiter)\n\n def test_fminbound(self):\n x = optimize.fminbound(self.fun, 0, 1)\n assert_allclose(x, 1, atol=1e-4)\n\n x = optimize.fminbound(self.fun, 1, 5)\n assert_allclose(x, self.solution, atol=1e-6)\n\n x = optimize.fminbound(self.fun, np.array([1]), np.array([5]))\n assert_allclose(x, self.solution, atol=1e-6)\n assert_raises(ValueError, optimize.fminbound, self.fun, 5, 1)\n\n def test_fminbound_scalar(self):\n with pytest.raises(ValueError, match='.*must be scalar.*'):\n optimize.fminbound(self.fun, np.zeros((1, 2)), 1)\n\n x = optimize.fminbound(self.fun, 1, np.array(5))\n assert_allclose(x, self.solution, atol=1e-6)\n\n def test_gh11207(self):\n def fun(x):\n return x**2\n optimize.fminbound(fun, 0, 0)\n\n def test_minimize_scalar(self):\n # combine all tests above for the minimize_scalar wrapper\n x = optimize.minimize_scalar(self.fun).x\n assert_allclose(x, self.solution, atol=1e-6)\n\n x = optimize.minimize_scalar(self.fun, method='Brent')\n assert_(x.success)\n\n x = optimize.minimize_scalar(self.fun, method='Brent',\n options=dict(maxiter=3))\n assert_(not x.success)\n\n x = optimize.minimize_scalar(self.fun, bracket=(-3, -2),\n args=(1.5, ), method='Brent').x\n assert_allclose(x, self.solution, atol=1e-6)\n\n x = optimize.minimize_scalar(self.fun, method='Brent',\n args=(1.5,)).x\n assert_allclose(x, self.solution, atol=1e-6)\n\n x = optimize.minimize_scalar(self.fun, bracket=(-15, -1, 15),\n args=(1.5, ), method='Brent').x\n assert_allclose(x, self.solution, atol=1e-6)\n\n x = optimize.minimize_scalar(self.fun, bracket=(-3, -2),\n args=(1.5, ), method='golden').x\n assert_allclose(x, self.solution, atol=1e-6)\n\n x = optimize.minimize_scalar(self.fun, method='golden',\n args=(1.5,)).x\n assert_allclose(x, self.solution, atol=1e-6)\n\n x = optimize.minimize_scalar(self.fun, bracket=(-15, -1, 15),\n args=(1.5, ), method='golden').x\n assert_allclose(x, self.solution, atol=1e-6)\n\n x = optimize.minimize_scalar(self.fun, bounds=(0, 1), args=(1.5,),\n method='Bounded').x\n assert_allclose(x, 1, atol=1e-4)\n\n x = optimize.minimize_scalar(self.fun, bounds=(1, 5), args=(1.5, ),\n method='bounded').x\n assert_allclose(x, self.solution, atol=1e-6)\n\n x = optimize.minimize_scalar(self.fun, bounds=(np.array([1]),\n np.array([5])),\n args=(np.array([1.5]), ),\n method='bounded').x\n assert_allclose(x, self.solution, atol=1e-6)\n\n assert_raises(ValueError, optimize.minimize_scalar, self.fun,\n bounds=(5, 1), method='bounded', args=(1.5, ))\n\n assert_raises(ValueError, optimize.minimize_scalar, self.fun,\n bounds=(np.zeros(2), 1), method='bounded', args=(1.5, ))\n\n x = optimize.minimize_scalar(self.fun, bounds=(1, np.array(5)),\n method='bounded').x\n assert_allclose(x, self.solution, atol=1e-6)\n\n def test_minimize_scalar_custom(self):\n # This function comes from the documentation example.\n def custmin(fun, bracket, args=(), maxfev=None, stepsize=0.1,\n maxiter=100, callback=None, **options):\n bestx = (bracket[1] + bracket[0]) / 2.0\n besty = fun(bestx)\n funcalls = 1\n niter = 0\n improved = True\n stop = False\n\n while improved and not stop and niter < maxiter:\n improved = False\n niter += 1\n for testx in [bestx - stepsize, bestx + stepsize]:\n testy = fun(testx, *args)\n funcalls += 1\n if testy < besty:\n besty = testy\n bestx = testx\n improved = True\n if callback is not None:\n callback(bestx)\n if maxfev is not None and funcalls >= maxfev:\n stop = True\n break\n\n return optimize.OptimizeResult(fun=besty, x=bestx, nit=niter,\n nfev=funcalls, success=(niter > 1))\n\n res = optimize.minimize_scalar(self.fun, bracket=(0, 4),\n method=custmin,\n options=dict(stepsize=0.05))\n assert_allclose(res.x, self.solution, atol=1e-6)\n\n def test_minimize_scalar_coerce_args_param(self):\n # Regression test for gh-3503\n optimize.minimize_scalar(self.fun, args=1.5)\n\n @pytest.mark.parametrize('method', ['brent', 'bounded', 'golden'])\n def test_nan_values(self, method):\n # Check nan values result to failed exit status\n np.random.seed(1234)\n\n count = [0]\n\n def func(x):\n count[0] += 1\n if count[0] > 4:\n return np.nan\n else:\n return x**2 + 0.1 * np.sin(x)\n\n bracket = (-1, 0, 1)\n bounds = (-1, 1)\n\n with np.errstate(invalid='ignore'), suppress_warnings() as sup:\n sup.filter(UserWarning, \"delta_grad == 0.*\")\n sup.filter(RuntimeWarning, \".*does not use Hessian.*\")\n sup.filter(RuntimeWarning, \".*does not use gradient.*\")\n\n count = [0]\n sol = optimize.minimize_scalar(func, bracket=bracket,\n bounds=bounds, method=method,\n options=dict(maxiter=20))\n assert_equal(sol.success, False)\n\n\ndef test_brent_negative_tolerance():\n assert_raises(ValueError, optimize.brent, np.cos, tol=-.01)\n\n\nclass TestNewtonCg:\n def test_rosenbrock(self):\n x0 = np.array([-1.2, 1.0])\n sol = optimize.minimize(optimize.rosen, x0,\n jac=optimize.rosen_der,\n hess=optimize.rosen_hess,\n tol=1e-5,\n method='Newton-CG')\n assert_(sol.success, sol.message)\n assert_allclose(sol.x, np.array([1, 1]), rtol=1e-4)\n\n def test_himmelblau(self):\n x0 = np.array(himmelblau_x0)\n sol = optimize.minimize(himmelblau,\n x0,\n jac=himmelblau_grad,\n hess=himmelblau_hess,\n method='Newton-CG',\n tol=1e-6)\n assert_(sol.success, sol.message)\n assert_allclose(sol.x, himmelblau_xopt, rtol=1e-4)\n assert_allclose(sol.fun, himmelblau_min, atol=1e-4)\n\n\ndef test_line_for_search():\n # _line_for_search is only used in _linesearch_powell, which is also\n # tested below. Thus there are more tests of _line_for_search in the\n # test_linesearch_powell_bounded function.\n\n line_for_search = optimize.optimize._line_for_search\n # args are x0, alpha, lower_bound, upper_bound\n # returns lmin, lmax\n\n lower_bound = np.array([-5.3, -1, -1.5, -3])\n upper_bound = np.array([1.9, 1, 2.8, 3])\n\n # test when starting in the bounds\n x0 = np.array([0., 0, 0, 0])\n # and when starting outside of the bounds\n x1 = np.array([0., 2, -3, 0])\n\n all_tests = (\n (x0, np.array([1., 0, 0, 0]), -5.3, 1.9),\n (x0, np.array([0., 1, 0, 0]), -1, 1),\n (x0, np.array([0., 0, 1, 0]), -1.5, 2.8),\n (x0, np.array([0., 0, 0, 1]), -3, 3),\n (x0, np.array([1., 1, 0, 0]), -1, 1),\n (x0, np.array([1., 0, -1, 2]), -1.5, 1.5),\n (x0, np.array([2., 0, -1, 2]), -1.5, 0.95),\n (x1, np.array([1., 0, 0, 0]), -5.3, 1.9),\n (x1, np.array([0., 1, 0, 0]), -3, -1),\n (x1, np.array([0., 0, 1, 0]), 1.5, 5.8),\n (x1, np.array([0., 0, 0, 1]), -3, 3),\n (x1, np.array([1., 1, 0, 0]), -3, -1),\n (x1, np.array([1., 0, -1, 0]), -5.3, -1.5),\n )\n\n for x, alpha, lmin, lmax in all_tests:\n mi, ma = line_for_search(x, alpha, lower_bound, upper_bound)\n assert_allclose(mi, lmin, atol=1e-6)\n assert_allclose(ma, lmax, atol=1e-6)\n\n # now with infinite bounds\n lower_bound = np.array([-np.inf, -1, -np.inf, -3])\n upper_bound = np.array([np.inf, 1, 2.8, np.inf])\n\n all_tests = (\n (x0, np.array([1., 0, 0, 0]), -np.inf, np.inf),\n (x0, np.array([0., 1, 0, 0]), -1, 1),\n (x0, np.array([0., 0, 1, 0]), -np.inf, 2.8),\n (x0, np.array([0., 0, 0, 1]), -3, np.inf),\n (x0, np.array([1., 1, 0, 0]), -1, 1),\n (x0, np.array([1., 0, -1, 2]), -1.5, np.inf),\n (x1, np.array([1., 0, 0, 0]), -np.inf, np.inf),\n (x1, np.array([0., 1, 0, 0]), -3, -1),\n (x1, np.array([0., 0, 1, 0]), -np.inf, 5.8),\n (x1, np.array([0., 0, 0, 1]), -3, np.inf),\n (x1, np.array([1., 1, 0, 0]), -3, -1),\n (x1, np.array([1., 0, -1, 0]), -5.8, np.inf),\n )\n\n for x, alpha, lmin, lmax in all_tests:\n mi, ma = line_for_search(x, alpha, lower_bound, upper_bound)\n assert_allclose(mi, lmin, atol=1e-6)\n assert_allclose(ma, lmax, atol=1e-6)\n\n\ndef test_linesearch_powell():\n # helper function in optimize.py, not a public function.\n linesearch_powell = optimize.optimize._linesearch_powell\n # args are func, p, xi, fval, lower_bound=None, upper_bound=None, tol=1e-3\n # returns new_fval, p + direction, direction\n func = lambda x: np.sum((x - np.array([-1., 2., 1.5, -.4]))**2)\n p0 = np.array([0., 0, 0, 0])\n fval = func(p0)\n lower_bound = np.array([-np.inf] * 4)\n upper_bound = np.array([np.inf] * 4)\n\n all_tests = (\n (np.array([1., 0, 0, 0]), -1),\n (np.array([0., 1, 0, 0]), 2),\n (np.array([0., 0, 1, 0]), 1.5),\n (np.array([0., 0, 0, 1]), -.4),\n (np.array([-1., 0, 1, 0]), 1.25),\n (np.array([0., 0, 1, 1]), .55),\n (np.array([2., 0, -1, 1]), -.65),\n )\n\n for xi, l in all_tests:\n f, p, direction = linesearch_powell(func, p0, xi,\n fval=fval, tol=1e-5)\n assert_allclose(f, func(l * xi), atol=1e-6)\n assert_allclose(p, l * xi, atol=1e-6)\n assert_allclose(direction, l * xi, atol=1e-6)\n\n f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5,\n lower_bound=lower_bound,\n upper_bound=upper_bound,\n fval=fval)\n assert_allclose(f, func(l * xi), atol=1e-6)\n assert_allclose(p, l * xi, atol=1e-6)\n assert_allclose(direction, l * xi, atol=1e-6)\n\n\ndef test_linesearch_powell_bounded():\n # helper function in optimize.py, not a public function.\n linesearch_powell = optimize.optimize._linesearch_powell\n # args are func, p, xi, fval, lower_bound=None, upper_bound=None, tol=1e-3\n # returns new_fval, p+direction, direction\n func = lambda x: np.sum((x-np.array([-1., 2., 1.5, -.4]))**2)\n p0 = np.array([0., 0, 0, 0])\n fval = func(p0)\n\n # first choose bounds such that the same tests from\n # test_linesearch_powell should pass.\n lower_bound = np.array([-2.]*4)\n upper_bound = np.array([2.]*4)\n\n all_tests = (\n (np.array([1., 0, 0, 0]), -1),\n (np.array([0., 1, 0, 0]), 2),\n (np.array([0., 0, 1, 0]), 1.5),\n (np.array([0., 0, 0, 1]), -.4),\n (np.array([-1., 0, 1, 0]), 1.25),\n (np.array([0., 0, 1, 1]), .55),\n (np.array([2., 0, -1, 1]), -.65),\n )\n\n for xi, l in all_tests:\n f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5,\n lower_bound=lower_bound,\n upper_bound=upper_bound,\n fval=fval)\n assert_allclose(f, func(l * xi), atol=1e-6)\n assert_allclose(p, l * xi, atol=1e-6)\n assert_allclose(direction, l * xi, atol=1e-6)\n\n # now choose bounds such that unbounded vs bounded gives different results\n lower_bound = np.array([-.3]*3 + [-1])\n upper_bound = np.array([.45]*3 + [.9])\n\n all_tests = (\n (np.array([1., 0, 0, 0]), -.3),\n (np.array([0., 1, 0, 0]), .45),\n (np.array([0., 0, 1, 0]), .45),\n (np.array([0., 0, 0, 1]), -.4),\n (np.array([-1., 0, 1, 0]), .3),\n (np.array([0., 0, 1, 1]), .45),\n (np.array([2., 0, -1, 1]), -.15),\n )\n\n for xi, l in all_tests:\n f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5,\n lower_bound=lower_bound,\n upper_bound=upper_bound,\n fval=fval)\n assert_allclose(f, func(l * xi), atol=1e-6)\n assert_allclose(p, l * xi, atol=1e-6)\n assert_allclose(direction, l * xi, atol=1e-6)\n\n # now choose as above but start outside the bounds\n p0 = np.array([-1., 0, 0, 2])\n fval = func(p0)\n\n all_tests = (\n (np.array([1., 0, 0, 0]), .7),\n (np.array([0., 1, 0, 0]), .45),\n (np.array([0., 0, 1, 0]), .45),\n (np.array([0., 0, 0, 1]), -2.4),\n )\n\n for xi, l in all_tests:\n f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5,\n lower_bound=lower_bound,\n upper_bound=upper_bound,\n fval=fval)\n assert_allclose(f, func(p0 + l * xi), atol=1e-6)\n assert_allclose(p, p0 + l * xi, atol=1e-6)\n assert_allclose(direction, l * xi, atol=1e-6)\n\n # now mix in inf\n p0 = np.array([0., 0, 0, 0])\n fval = func(p0)\n\n # now choose bounds that mix inf\n lower_bound = np.array([-.3, -np.inf, -np.inf, -1])\n upper_bound = np.array([np.inf, .45, np.inf, .9])\n\n all_tests = (\n (np.array([1., 0, 0, 0]), -.3),\n (np.array([0., 1, 0, 0]), .45),\n (np.array([0., 0, 1, 0]), 1.5),\n (np.array([0., 0, 0, 1]), -.4),\n (np.array([-1., 0, 1, 0]), .3),\n (np.array([0., 0, 1, 1]), .55),\n (np.array([2., 0, -1, 1]), -.15),\n )\n\n for xi, l in all_tests:\n f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5,\n lower_bound=lower_bound,\n upper_bound=upper_bound,\n fval=fval)\n assert_allclose(f, func(l * xi), atol=1e-6)\n assert_allclose(p, l * xi, atol=1e-6)\n assert_allclose(direction, l * xi, atol=1e-6)\n\n # now choose as above but start outside the bounds\n p0 = np.array([-1., 0, 0, 2])\n fval = func(p0)\n\n all_tests = (\n (np.array([1., 0, 0, 0]), .7),\n (np.array([0., 1, 0, 0]), .45),\n (np.array([0., 0, 1, 0]), 1.5),\n (np.array([0., 0, 0, 1]), -2.4),\n )\n\n for xi, l in all_tests:\n f, p, direction = linesearch_powell(func, p0, xi, tol=1e-5,\n lower_bound=lower_bound,\n upper_bound=upper_bound,\n fval=fval)\n assert_allclose(f, func(p0 + l * xi), atol=1e-6)\n assert_allclose(p, p0 + l * xi, atol=1e-6)\n assert_allclose(direction, l * xi, atol=1e-6)\n\n\nclass TestRosen:\n\n def test_hess(self):\n # Compare rosen_hess(x) times p with rosen_hess_prod(x,p). See gh-1775.\n x = np.array([3, 4, 5])\n p = np.array([2, 2, 2])\n hp = optimize.rosen_hess_prod(x, p)\n dothp = np.dot(optimize.rosen_hess(x), p)\n assert_equal(hp, dothp)\n\n\ndef himmelblau(p):\n \"\"\"\n R^2 -> R^1 test function for optimization. The function has four local\n minima where himmelblau(xopt) == 0.\n \"\"\"\n x, y = p\n a = x*x + y - 11\n b = x + y*y - 7\n return a*a + b*b\n\n\ndef himmelblau_grad(p):\n x, y = p\n return np.array([4*x**3 + 4*x*y - 42*x + 2*y**2 - 14,\n 2*x**2 + 4*x*y + 4*y**3 - 26*y - 22])\n\n\ndef himmelblau_hess(p):\n x, y = p\n return np.array([[12*x**2 + 4*y - 42, 4*x + 4*y],\n [4*x + 4*y, 4*x + 12*y**2 - 26]])\n\n\nhimmelblau_x0 = [-0.27, -0.9]\nhimmelblau_xopt = [3, 2]\nhimmelblau_min = 0.0\n\n\ndef test_minimize_multiple_constraints():\n # Regression test for gh-4240.\n def func(x):\n return np.array([25 - 0.2 * x[0] - 0.4 * x[1] - 0.33 * x[2]])\n\n def func1(x):\n return np.array([x[1]])\n\n def func2(x):\n return np.array([x[2]])\n\n cons = ({'type': 'ineq', 'fun': func},\n {'type': 'ineq', 'fun': func1},\n {'type': 'ineq', 'fun': func2})\n\n f = lambda x: -1 * (x[0] + x[1] + x[2])\n\n res = optimize.minimize(f, [0, 0, 0], method='SLSQP', constraints=cons)\n assert_allclose(res.x, [125, 0, 0], atol=1e-10)\n\n\nclass TestOptimizeResultAttributes:\n # Test that all minimizers return an OptimizeResult containing\n # all the OptimizeResult attributes\n def setup_method(self):\n self.x0 = [5, 5]\n self.func = optimize.rosen\n self.jac = optimize.rosen_der\n self.hess = optimize.rosen_hess\n self.hessp = optimize.rosen_hess_prod\n self.bounds = [(0., 10.), (0., 10.)]\n\n def test_attributes_present(self):\n attributes = ['nit', 'nfev', 'x', 'success', 'status', 'fun',\n 'message']\n skip = {'cobyla': ['nit']}\n for method in MINIMIZE_METHODS:\n with suppress_warnings() as sup:\n sup.filter(RuntimeWarning,\n (\"Method .+ does not use (gradient|Hessian.*)\"\n \" information\"))\n res = optimize.minimize(self.func, self.x0, method=method,\n jac=self.jac, hess=self.hess,\n hessp=self.hessp)\n for attribute in attributes:\n if method in skip and attribute in skip[method]:\n continue\n\n assert hasattr(res, attribute)\n assert_(attribute in dir(res))\n\n # gh13001, OptimizeResult.message should be a str\n assert isinstance(res.message, str)\n\n\ndef f1(z, *params):\n x, y = z\n a, b, c, d, e, f, g, h, i, j, k, l, scale = params\n return (a * x**2 + b * x * y + c * y**2 + d*x + e*y + f)\n\n\ndef f2(z, *params):\n x, y = z\n a, b, c, d, e, f, g, h, i, j, k, l, scale = params\n return (-g*np.exp(-((x-h)**2 + (y-i)**2) / scale))\n\n\ndef f3(z, *params):\n x, y = z\n a, b, c, d, e, f, g, h, i, j, k, l, scale = params\n return (-j*np.exp(-((x-k)**2 + (y-l)**2) / scale))\n\n\ndef brute_func(z, *params):\n return f1(z, *params) + f2(z, *params) + f3(z, *params)\n\n\nclass TestBrute:\n # Test the \"brute force\" method\n def setup_method(self):\n self.params = (2, 3, 7, 8, 9, 10, 44, -1, 2, 26, 1, -2, 0.5)\n self.rranges = (slice(-4, 4, 0.25), slice(-4, 4, 0.25))\n self.solution = np.array([-1.05665192, 1.80834843])\n\n def brute_func(self, z, *params):\n # an instance method optimizing\n return brute_func(z, *params)\n\n def test_brute(self):\n # test fmin\n resbrute = optimize.brute(brute_func, self.rranges, args=self.params,\n full_output=True, finish=optimize.fmin)\n assert_allclose(resbrute[0], self.solution, atol=1e-3)\n assert_allclose(resbrute[1], brute_func(self.solution, *self.params),\n atol=1e-3)\n\n # test minimize\n resbrute = optimize.brute(brute_func, self.rranges, args=self.params,\n full_output=True,\n finish=optimize.minimize)\n assert_allclose(resbrute[0], self.solution, atol=1e-3)\n assert_allclose(resbrute[1], brute_func(self.solution, *self.params),\n atol=1e-3)\n\n # test that brute can optimize an instance method (the other tests use\n # a non-class based function\n resbrute = optimize.brute(self.brute_func, self.rranges,\n args=self.params, full_output=True,\n finish=optimize.minimize)\n assert_allclose(resbrute[0], self.solution, atol=1e-3)\n\n def test_1D(self):\n # test that for a 1-D problem the test function is passed an array,\n # not a scalar.\n def f(x):\n assert_(len(x.shape) == 1)\n assert_(x.shape[0] == 1)\n return x ** 2\n\n optimize.brute(f, [(-1, 1)], Ns=3, finish=None)\n\n def test_workers(self):\n # check that parallel evaluation works\n resbrute = optimize.brute(brute_func, self.rranges, args=self.params,\n full_output=True, finish=None)\n\n resbrute1 = optimize.brute(brute_func, self.rranges, args=self.params,\n full_output=True, finish=None, workers=2)\n\n assert_allclose(resbrute1[-1], resbrute[-1])\n assert_allclose(resbrute1[0], resbrute[0])\n\n\ndef test_cobyla_threadsafe():\n\n # Verify that cobyla is threadsafe. Will segfault if it is not.\n\n import concurrent.futures\n import time\n\n def objective1(x):\n time.sleep(0.1)\n return x[0]**2\n\n def objective2(x):\n time.sleep(0.1)\n return (x[0]-1)**2\n\n min_method = \"COBYLA\"\n\n def minimizer1():\n return optimize.minimize(objective1,\n [0.0],\n method=min_method)\n\n def minimizer2():\n return optimize.minimize(objective2,\n [0.0],\n method=min_method)\n\n with concurrent.futures.ThreadPoolExecutor() as pool:\n tasks = []\n tasks.append(pool.submit(minimizer1))\n tasks.append(pool.submit(minimizer2))\n for t in tasks:\n res = t.result()\n\n\nclass TestIterationLimits:\n # Tests that optimisation does not give up before trying requested\n # number of iterations or evaluations. And that it does not succeed\n # by exceeding the limits.\n def setup_method(self):\n self.funcalls = 0\n\n def slow_func(self, v):\n self.funcalls += 1\n r, t = np.sqrt(v[0]**2+v[1]**2), np.arctan2(v[0], v[1])\n return np.sin(r*20 + t)+r*0.5\n\n def test_neldermead_limit(self):\n self.check_limits(\"Nelder-Mead\", 200)\n\n def test_powell_limit(self):\n self.check_limits(\"powell\", 1000)\n\n def check_limits(self, method, default_iters):\n for start_v in [[0.1, 0.1], [1, 1], [2, 2]]:\n for mfev in [50, 500, 5000]:\n self.funcalls = 0\n res = optimize.minimize(self.slow_func, start_v,\n method=method,\n options={\"maxfev\": mfev})\n assert_(self.funcalls == res[\"nfev\"])\n if res[\"success\"]:\n assert_(res[\"nfev\"] < mfev)\n else:\n assert_(res[\"nfev\"] >= mfev)\n for mit in [50, 500, 5000]:\n res = optimize.minimize(self.slow_func, start_v,\n method=method,\n options={\"maxiter\": mit})\n if res[\"success\"]:\n assert_(res[\"nit\"] <= mit)\n else:\n assert_(res[\"nit\"] >= mit)\n for mfev, mit in [[50, 50], [5000, 5000], [5000, np.inf]]:\n self.funcalls = 0\n res = optimize.minimize(self.slow_func, start_v,\n method=method,\n options={\"maxiter\": mit,\n \"maxfev\": mfev})\n assert_(self.funcalls == res[\"nfev\"])\n if res[\"success\"]:\n assert_(res[\"nfev\"] < mfev and res[\"nit\"] <= mit)\n else:\n assert_(res[\"nfev\"] >= mfev or res[\"nit\"] >= mit)\n for mfev, mit in [[np.inf, None], [None, np.inf]]:\n self.funcalls = 0\n res = optimize.minimize(self.slow_func, start_v,\n method=method,\n options={\"maxiter\": mit,\n \"maxfev\": mfev})\n assert_(self.funcalls == res[\"nfev\"])\n if res[\"success\"]:\n if mfev is None:\n assert_(res[\"nfev\"] < default_iters*2)\n else:\n assert_(res[\"nit\"] <= default_iters*2)\n else:\n assert_(res[\"nfev\"] >= default_iters*2 or\n res[\"nit\"] >= default_iters*2)\n\n\ndef test_result_x_shape_when_len_x_is_one():\n def fun(x):\n return x * x\n\n def jac(x):\n return 2. * x\n\n def hess(x):\n return np.array([[2.]])\n\n methods = ['Nelder-Mead', 'Powell', 'CG', 'BFGS', 'L-BFGS-B', 'TNC',\n 'COBYLA', 'SLSQP']\n for method in methods:\n res = optimize.minimize(fun, np.array([0.1]), method=method)\n assert res.x.shape == (1,)\n\n # use jac + hess\n methods = ['trust-constr', 'dogleg', 'trust-ncg', 'trust-exact',\n 'trust-krylov', 'Newton-CG']\n for method in methods:\n res = optimize.minimize(fun, np.array([0.1]), method=method, jac=jac,\n hess=hess)\n assert res.x.shape == (1,)\n\n\nclass FunctionWithGradient:\n def __init__(self):\n self.number_of_calls = 0\n\n def __call__(self, x):\n self.number_of_calls += 1\n return np.sum(x**2), 2 * x\n\n\[email protected]\ndef function_with_gradient():\n return FunctionWithGradient()\n\n\ndef test_memoize_jac_function_before_gradient(function_with_gradient):\n memoized_function = MemoizeJac(function_with_gradient)\n\n x0 = np.array([1.0, 2.0])\n assert_allclose(memoized_function(x0), 5.0)\n assert function_with_gradient.number_of_calls == 1\n\n assert_allclose(memoized_function.derivative(x0), 2 * x0)\n assert function_with_gradient.number_of_calls == 1, \\\n \"function is not recomputed \" \\\n \"if gradient is requested after function value\"\n\n assert_allclose(\n memoized_function(2 * x0), 20.0,\n err_msg=\"different input triggers new computation\")\n assert function_with_gradient.number_of_calls == 2, \\\n \"different input triggers new computation\"\n\n\ndef test_memoize_jac_gradient_before_function(function_with_gradient):\n memoized_function = MemoizeJac(function_with_gradient)\n\n x0 = np.array([1.0, 2.0])\n assert_allclose(memoized_function.derivative(x0), 2 * x0)\n assert function_with_gradient.number_of_calls == 1\n\n assert_allclose(memoized_function(x0), 5.0)\n assert function_with_gradient.number_of_calls == 1, \\\n \"function is not recomputed \" \\\n \"if function value is requested after gradient\"\n\n assert_allclose(\n memoized_function.derivative(2 * x0), 4 * x0,\n err_msg=\"different input triggers new computation\")\n assert function_with_gradient.number_of_calls == 2, \\\n \"different input triggers new computation\"\n\n\ndef test_memoize_jac_with_bfgs(function_with_gradient):\n \"\"\" Tests that using MemoizedJac in combination with ScalarFunction\n and BFGS does not lead to repeated function evaluations.\n Tests changes made in response to GH11868.\n \"\"\"\n memoized_function = MemoizeJac(function_with_gradient)\n jac = memoized_function.derivative\n hess = optimize.BFGS()\n\n x0 = np.array([1.0, 0.5])\n scalar_function = ScalarFunction(\n memoized_function, x0, (), jac, hess, None, None)\n assert function_with_gradient.number_of_calls == 1\n\n scalar_function.fun(x0 + 0.1)\n assert function_with_gradient.number_of_calls == 2\n\n scalar_function.fun(x0 + 0.2)\n assert function_with_gradient.number_of_calls == 3\n\n\ndef test_gh12696():\n # Test that optimize doesn't throw warning gh-12696\n with assert_no_warnings():\n optimize.fminbound(\n lambda x: np.array([x**2]), -np.pi, np.pi, disp=False)\n\n\ndef test_show_options():\n solver_methods = {\n 'minimize': MINIMIZE_METHODS,\n 'minimize_scalar': MINIMIZE_SCALAR_METHODS,\n 'root': ROOT_METHODS,\n 'root_scalar': ROOT_SCALAR_METHODS,\n 'linprog': LINPROG_METHODS,\n 'quadratic_assignment': QUADRATIC_ASSIGNMENT_METHODS,\n }\n for solver, methods in solver_methods.items():\n for method in methods:\n # testing that `show_options` works without error\n show_options(solver, method)\n\n unknown_solver_method = {\n 'minimize': \"ekki\", # unknown method\n 'maximize': \"cg\", # unknown solver\n 'maximize_scalar': \"ekki\", # unknown solver and method\n }\n for solver, method in unknown_solver_method.items():\n # testing that `show_options` raises ValueError\n assert_raises(ValueError, show_options, solver, method)\n\n\ndef test_bounds_with_list():\n # gh13501. Bounds created with lists weren't working for Powell.\n bounds = optimize.Bounds(lb=[5., 5.], ub=[10., 10.])\n optimize.minimize(\n optimize.rosen, x0=np.array([9, 9]), method='Powell', bounds=bounds\n )\n\n\ndef test_x_overwritten_user_function():\n # if the user overwrites the x-array in the user function it's likely\n # that the minimizer stops working properly.\n # gh13740\n def fquad(x):\n a = np.arange(np.size(x))\n x -= a\n x *= x\n return np.sum(x)\n\n def fquad_jac(x):\n a = np.arange(np.size(x))\n x *= 2\n x -= 2 * a\n return x\n\n fquad_hess = lambda x: np.eye(np.size(x)) * 2.0\n\n meth_jac = [\n 'newton-cg', 'dogleg', 'trust-ncg', 'trust-exact',\n 'trust-krylov', 'trust-constr'\n ]\n meth_hess = [\n 'dogleg', 'trust-ncg', 'trust-exact', 'trust-krylov', 'trust-constr'\n ]\n\n x0 = np.ones(5) * 1.5\n\n for meth in MINIMIZE_METHODS:\n jac = None\n hess = None\n if meth in meth_jac:\n jac = fquad_jac\n if meth in meth_hess:\n hess = fquad_hess\n res = optimize.minimize(fquad, x0, method=meth, jac=jac, hess=hess)\n assert_allclose(res.x, np.arange(np.size(x0)), atol=2e-4)\n", "\"\"\"\n==============================================\nDiscrete Fourier transforms (:mod:`scipy.fft`)\n==============================================\n\n.. currentmodule:: scipy.fft\n\nFast Fourier Transforms (FFTs)\n==============================\n\n.. autosummary::\n :toctree: generated/\n\n fft - Fast (discrete) Fourier Transform (FFT)\n ifft - Inverse FFT\n fft2 - 2-D FFT\n ifft2 - 2-D inverse FFT\n fftn - N-D FFT\n ifftn - N-D inverse FFT\n rfft - FFT of strictly real-valued sequence\n irfft - Inverse of rfft\n rfft2 - 2-D FFT of real sequence\n irfft2 - Inverse of rfft2\n rfftn - N-D FFT of real sequence\n irfftn - Inverse of rfftn\n hfft - FFT of a Hermitian sequence (real spectrum)\n ihfft - Inverse of hfft\n hfft2 - 2-D FFT of a Hermitian sequence\n ihfft2 - Inverse of hfft2\n hfftn - N-D FFT of a Hermitian sequence\n ihfftn - Inverse of hfftn\n\nDiscrete Sin and Cosine Transforms (DST and DCT)\n================================================\n.. autosummary::\n :toctree: generated/\n\n dct - Discrete cosine transform\n idct - Inverse discrete cosine transform\n dctn - N-D Discrete cosine transform\n idctn - N-D Inverse discrete cosine transform\n dst - Discrete sine transform\n idst - Inverse discrete sine transform\n dstn - N-D Discrete sine transform\n idstn - N-D Inverse discrete sine transform\n\nFast Hankel Transforms\n======================\n\n.. autosummary::\n :toctree: generated/\n\n fht - Fast Hankel transform\n ifht - Inverse of fht\n\nHelper functions\n================\n\n.. autosummary::\n :toctree: generated/\n\n fftshift - Shift the zero-frequency component to the center of the spectrum\n ifftshift - The inverse of `fftshift`\n fftfreq - Return the Discrete Fourier Transform sample frequencies\n rfftfreq - DFT sample frequencies (for usage with rfft, irfft)\n fhtoffset - Compute an optimal offset for the Fast Hankel Transform\n next_fast_len - Find the optimal length to zero-pad an FFT for speed\n set_workers - Context manager to set default number of workers\n get_workers - Get the current default number of workers\n\nBackend control\n===============\n\n.. autosummary::\n :toctree: generated/\n\n set_backend - Context manager to set the backend within a fixed scope\n skip_backend - Context manager to skip a backend within a fixed scope\n set_global_backend - Sets the global fft backend\n register_backend - Register a backend for permanent use\n\n\"\"\"\n\nfrom ._basic import (\n fft, ifft, fft2, ifft2, fftn, ifftn,\n rfft, irfft, rfft2, irfft2, rfftn, irfftn,\n hfft, ihfft, hfft2, ihfft2, hfftn, ihfftn)\nfrom ._realtransforms import dct, idct, dst, idst, dctn, idctn, dstn, idstn\nfrom ._fftlog import fht, ifht, fhtoffset\nfrom ._helper import next_fast_len\nfrom ._backend import (set_backend, skip_backend, set_global_backend,\n register_backend)\nfrom numpy.fft import fftfreq, rfftfreq, fftshift, ifftshift\nfrom ._pocketfft.helper import set_workers, get_workers\n\n__all__ = [\n 'fft', 'ifft', 'fft2','ifft2', 'fftn', 'ifftn',\n 'rfft', 'irfft', 'rfft2', 'irfft2', 'rfftn', 'irfftn',\n 'hfft', 'ihfft', 'hfft2', 'ihfft2', 'hfftn', 'ihfftn',\n 'fftfreq', 'rfftfreq', 'fftshift', 'ifftshift',\n 'next_fast_len',\n 'dct', 'idct', 'dst', 'idst', 'dctn', 'idctn', 'dstn', 'idstn',\n 'fht', 'ifht',\n 'fhtoffset',\n 'set_backend', 'skip_backend', 'set_global_backend', 'register_backend',\n 'get_workers', 'set_workers']\n\n\nfrom scipy._lib._testutils import PytestTester\ntest = PytestTester(__name__)\ndel PytestTester\n", "from scipy.linalg import norm\n\n\ndef test_norm():\n assert norm([]) == 0.0\n" ]
[ [ "numpy.linalg.eigvals", "scipy.linalg.svd", "scipy.linalg.invhilbert", "scipy.linalg.hilbert", "numpy.random.randn", "scipy.linalg.solve_triangular", "numpy.random.default_rng", "numpy.linalg.svd", "scipy.linalg.hadamard", "scipy.linalg.dft", "numpy.arange", "numpy.eye", "scipy.linalg.blas.get_blas_funcs", "scipy.linalg.lstsq", "numpy.linalg.det", "numpy.finfo", "scipy.linalg.tri", "scipy.linalg.norm", "scipy.linalg.inv", "scipy.linalg.hankel", "scipy.linalg.solve", "scipy.linalg.leslie", "scipy.linalg.companion", "scipy.linalg.det", "numpy.linalg.inv", "scipy.linalg.eigvals", "numpy.random.rand", "numpy.testing.assert_", "numpy.linalg.solve", "scipy.linalg.toeplitz", "scipy.linalg.block_diag", "numpy.linalg.norm", "numpy.ones", "scipy.linalg.helmert", "scipy.linalg.cholesky", "scipy.linalg.circulant", "scipy.linalg.invpascal", "scipy.linalg.pascal" ], [ "numpy.ma.testutils.assert_", "numpy.sqrt", "scipy.stats.mstats.scoreatpercentile", "scipy.stats.zscore", "scipy.stats.mstats.tsem", "scipy.stats.mstats.ttest_1samp", "numpy.ma.masked_where", "scipy.stats.mstats.obrientransform", "scipy.stats.ttest_rel", "numpy.ma.testutils.assert_array_equal", "numpy.full", "scipy.stats.kurtosistest", "scipy.stats.mstats.siegelslopes", "numpy.zeros", "scipy.stats.obrientransform", "numpy.power", "scipy.stats.tvar", "scipy.stats.pearsonr", "scipy.stats.tmax", "scipy.stats.mstats.gmean", "scipy.stats.mstats.tmin", "scipy.stats.mstats.trimmed_stde", "scipy.stats.kurtosis", "numpy.array", "scipy.stats.mstats.zscore", "scipy.stats.mstats.find_repeats", "scipy.stats.normaltest", "scipy.stats.mstats.trimmed_mean", "scipy.stats.mstats.pearsonr", "scipy.stats.kendalltau", "numpy.ma.masked_array", "scipy.stats.mstats.theilslopes", "scipy.stats.mstats.skewtest", "scipy.stats.ttest_ind", "scipy.stats.mstats.f_oneway", "scipy.stats.mstats.pointbiserialr", "numpy.ma.testutils.assert_equal", "numpy.vstack", "numpy.asarray", "scipy.stats.mstats.spearmanr", "numpy.concatenate", "numpy.ma.testutils.assert_allclose", "scipy.stats.tmin", "scipy.stats.mstats.mode", "scipy.stats.mstats.trimr", "scipy.stats.mstats.winsorize", "scipy.stats.mstats.variation", "numpy.reshape", "numpy.testing.suppress_warnings", "scipy.stats.trimboth", "numpy.ma.masked_greater", "numpy.ma.arange", "scipy.stats.mstats.kurtosistest", "scipy.stats.mstats.ks_2samp", "scipy.stats.mstats_basic._kendall_p_exact", "scipy.stats.mstats.kstest", "scipy.stats.skew", "scipy.stats.stats.Ks_2sampResult", "scipy.stats.mstats.kendalltau_seasonal", "scipy.stats.linregress", "scipy.stats.norm.rvs", "numpy.random.rand", "numpy.errstate", "scipy.stats.kstest", "numpy.ma.fix_invalid", "scipy.stats.hmean", "scipy.stats.mstats.trim", "scipy.stats.sem", "scipy.stats.mstats.ks_1samp", "scipy.stats.ks_1samp", "numpy.linspace", "scipy.stats.zmap", "scipy.stats.mstats.kendalltau", "scipy.stats.tmean", "numpy.ma.testutils.assert_almost_equal", "scipy.stats.mstats.describe", "scipy.stats.describe", "scipy.stats.mstats.kurtosis", "scipy.stats.moment", "scipy.stats.tsem", "numpy.isnan", "scipy.stats.skewtest", "numpy.ma.testutils.assert_array_almost_equal", "scipy.stats.mstats.tvar", "scipy.stats.mstats.zmap", "scipy.stats.mstats.sem", "scipy.stats.mstats.friedmanchisquare", "scipy.stats.mstats.moment", "scipy.stats.ttest_1samp", "scipy.stats.mstats.trimtail", "scipy.stats.mstats.kruskal", "scipy.stats.mstats.brunnermunzel", "numpy.ma.masked_invalid", "scipy.stats.mstats.ttest_ind", "numpy.broadcast_to", "scipy.stats.mstats.tmax", "scipy.stats.mstats.mquantiles", "scipy.stats.ks_2samp", "scipy.stats.mstats.tmean", "scipy.stats.mstats.linregress", "scipy.stats.mstats.ttest_rel", "numpy.random.randn", "numpy.ma.array", "scipy.stats.spearmanr", "scipy.stats.find_repeats", "scipy.stats.mstats.mannwhitneyu", "numpy.arange", "scipy.stats.mannwhitneyu", "scipy.stats.mstats.rankdata", "scipy.stats.mstats.skew", "scipy.stats.mstats.normaltest", "scipy.stats.mstats.trimboth", "numpy.random.random", "numpy.abs", "numpy.random.seed", "scipy.stats.rankdata", "scipy.stats.variation", "numpy.sort" ], [ "numpy.diag", "numpy.dot", "numpy.sqrt", "numpy.linspace", "scipy.optimize.fmin_l_bfgs_b", "numpy.testing.assert_no_warnings", "scipy.optimize.brute", "numpy.all", "numpy.arctan2", "scipy.optimize.fmin", "scipy.optimize.golden", "scipy.optimize.fmin_powell", "numpy.exp", "numpy.testing.assert_equal", "scipy.optimize.OptimizeResult", "numpy.ones_like", "scipy.optimize.optimize.MemoizeJac", "scipy.optimize.rosen", "numpy.testing.suppress_warnings", "scipy.optimize.rosen_der", "numpy.may_share_memory", "scipy.optimize.minimize_scalar", "scipy.optimize.check_grad", "numpy.full", "numpy.sin", "numpy.testing.assert_almost_equal", "numpy.copy", "scipy.optimize.brent", "numpy.size", "numpy.zeros", "scipy.optimize.fmin_cg", "numpy.spacing", "scipy.optimize.fmin_bfgs", "numpy.isnan", "scipy.optimize.fmin_ncg", "scipy.optimize.rosen_hess", "scipy.optimize.minimize", "scipy.optimize.fminbound", "numpy.testing.assert_", "numpy.testing.assert_allclose", "numpy.errstate", "numpy.random.rand", "numpy.array", "numpy.testing.assert_warns", "numpy.sum", "scipy.optimize._minimize._minimize_neldermead", "scipy.optimize.BFGS", "numpy.random.seed", "numpy.array_equal", "numpy.cos", "numpy.ones", "numpy.testing.assert_array_less", "scipy.optimize.Bounds", "scipy.optimize.optimize.show_options", "scipy.optimize.rosen_hess_prod", "scipy.optimize._differentiable_functions.ScalarFunction" ], [ "scipy._lib._testutils.PytestTester" ], [ "scipy.linalg.norm" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.12", "0.14", "0.15" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.6", "1.10", "1.9", "1.5", "1.7", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "0.14", "0.15", "0.12", "0.10" ], "tensorflow": [] } ]
jimgoo/metaflow-tabular
[ "66b02c1d53b1a64d19b4fc2f7f4a5f9a5ab1b422" ]
[ "metaflow_tabular/forecasting_flow.py" ]
[ "\"\"\"\nTo run this flow:\n```python forecasting_flow.py --environment=conda run```\n\"\"\"\n\nfrom functools import partial\n\nfrom metaflow import (\n Flow,\n FlowSpec,\n IncludeFile,\n Parameter,\n batch,\n conda,\n conda_base,\n get_metadata,\n parallel_map,\n step,\n)\n\nfrom pip_decorator import pip\nfrom forecasting_models import GluonTSModel, KatsModel, NeuralProphetModel, MerlionModel\n\n# this version is used in pre and post processing steps\nPANDAS_VERSION = \"1.3.3\"\n\n# this version is used when conda packages aren't available\nPIP_VERSION = \"21.3.1\"\n\n\ndef run_model(\n model_config, wrapper_class, target_index, forecast_steps, train_df, data_freq\n):\n try:\n model = wrapper_class(\n model_config, target_index, forecast_steps, data_freq=data_freq\n )\n model.fit(train_df)\n forecast = model.predict(train_df)\n forecast[\"id\"] = model_config[\"id\"]\n return forecast\n except:\n print(f\"Error with {model_config}\")\n raise\n\n\n@conda_base(python=\"3.8.12\")\nclass ForecastingFlow(FlowSpec):\n \"\"\"\n A flow for benchmarking forecasting libraries.\n \"\"\"\n\n train_path = Parameter(\n \"train_path\",\n help=\"The path to a DataFrame file for training\",\n default=\"https://jgoode.s3.amazonaws.com/ts-datasets/seattle-trail.csv\",\n )\n\n test_path = Parameter(\n \"test_path\",\n help=\"The path to a DataFrame file for testing\",\n default=None,\n )\n\n date_col = Parameter(\n \"date_col\",\n help=\"Column of the date in the input DataFrame\",\n default=\"Date\",\n )\n\n target_col = Parameter(\n \"target_col\",\n help=\"Column of the target in the input DataFrame\",\n default=\"BGT North of NE 70th Total\",\n )\n\n # data_config_path = Parameter(\n # \"data_config_path\",\n # help=\n\n model_config_path = Parameter(\n \"model_config_path\",\n help=\"The path to a model config file\",\n default=\"../configs/forecasting/models/default.yaml\",\n )\n\n forecast_steps = Parameter(\n \"forecast_steps\",\n help=\"The number of steps ahead to forecast\",\n default=10,\n )\n\n @conda(libraries={\"pandas\": PANDAS_VERSION, \"pyyaml\": \"6.0\"})\n @step\n def start(self):\n \"\"\"\n Start the flow by preprocessing the data.\n \"\"\"\n import pandas as pd\n from pprint import pprint\n import yaml\n\n # Print the Metaflow metadata provider\n print(f\"Using metadata provider: {get_metadata()}\")\n\n def load_df(path):\n df = pd.read_csv(path)\n\n assert self.date_col in df.columns, '\"%s\" not in columns' % self.date_col\n assert self.target_col in df.columns, (\n '\"%s\" not in columns' % self.target_col\n )\n\n # parse date column and set it as the index\n df[self.date_col] = pd.to_datetime(df[self.date_col])\n df.set_index(self.date_col, inplace=True)\n return df\n\n self.train_df = load_df(self.train_path)\n if self.test_path is not None:\n self.test_df = load_df(self.test_path)\n assert (\n self.train_df.columns == self.test_df.columns\n ).all(), \"Columns do not match\"\n else:\n self.test_df = None\n\n if self.test_df is None:\n n_train = 500\n self.test_df = self.train_df.iloc[n_train : n_train + self.forecast_steps]\n self.train_df = self.train_df.iloc[:n_train]\n\n # get index of the target column\n self.target_index = self.train_df.columns.tolist().index(self.target_col)\n\n # get the frequency of the data\n self.freq = pd.infer_freq(self.train_df.index)\n\n # load the model config file\n with open(self.model_config_path, \"r\") as f:\n self.model_config = yaml.safe_load(f)\n\n print(\"train df\")\n print(self.train_df)\n print(\"test df\")\n print(self.test_df)\n print(\"model_config\")\n pprint(self.model_config)\n\n # these branches will run in parallel\n # TODO: skip those with no entries in the model config\n self.next(\n self.run_merlion,\n self.run_gluonts,\n self.run_kats,\n self.run_neuralprophet,\n )\n\n @conda(libraries={\"salesforce-merlion\": \"1.0.2\"})\n @step\n def run_merlion(self):\n \"\"\"\n Run Merlion models.\n https://github.com/salesforce/Merlion\n \"\"\"\n self.forecasts = parallel_map(\n partial(\n run_model,\n wrapper_class=MerlionModel,\n target_index=self.target_index,\n forecast_steps=self.forecast_steps,\n train_df=self.train_df,\n data_freq=self.freq,\n ),\n self.model_config[\"libs\"].get(\"merlion\", []),\n )\n self.next(self.join)\n\n # We use pip because mxnet 1.5.0 is broken and there's no newer conda version.\n @pip(libraries={\"mxnet\": \"1.8.0.post0\", \"gluonts\": \"0.8.1\"})\n @conda(libraries={\"pip\": PIP_VERSION})\n @step\n def run_gluonts(self):\n \"\"\"\n Run gluon-ts models.\n https://github.com/awslabs/gluon-ts\n \"\"\"\n self.forecasts = parallel_map(\n partial(\n run_model,\n wrapper_class=GluonTSModel,\n target_index=self.target_index,\n forecast_steps=self.forecast_steps,\n train_df=self.train_df,\n data_freq=self.freq,\n ),\n self.model_config[\"libs\"].get(\"gluonts\", []),\n )\n self.next(self.join)\n\n @conda(libraries={\"kats\": \"0.1.0\"})\n @step\n def run_kats(self):\n \"\"\"\n Run Kats models.\n https://github.com/facebookresearch/Kats\n \"\"\"\n self.forecasts = parallel_map(\n partial(\n run_model,\n wrapper_class=KatsModel,\n target_index=self.target_index,\n forecast_steps=self.forecast_steps,\n train_df=self.train_df,\n data_freq=self.freq,\n ),\n self.model_config[\"libs\"].get(\"kats\", []),\n )\n self.next(self.join)\n\n # We use pip because there isn't a conda package for NeuralProphet.\n @pip(libraries={\"neuralprophet\": \"0.3.0\"})\n @conda(libraries={\"pip\": PIP_VERSION})\n @step\n def run_neuralprophet(self):\n \"\"\"\n Run NeuralProphet models.\n https://github.com/ourownstory/neural_prophet\n \"\"\"\n self.forecasts = parallel_map(\n partial(\n run_model,\n wrapper_class=NeuralProphetModel,\n target_index=self.target_index,\n forecast_steps=self.forecast_steps,\n train_df=self.train_df,\n data_freq=self.freq,\n ),\n self.model_config[\"libs\"].get(\"neuralprophet\", []),\n )\n self.next(self.join)\n\n @conda(libraries={\"pandas\": PANDAS_VERSION})\n @step\n def join(self, inputs):\n \"\"\"\n Compute performance metrics for each library.\n \"\"\"\n from collections import OrderedDict\n import numpy as np\n import pandas as pd\n\n forecasts = OrderedDict()\n\n # get forecasts for each library\n for lib in inputs:\n # carry these forward\n self.train_df = lib.train_df\n self.test_df = lib.test_df\n self.target_index = lib.target_index\n\n for forecast in lib.forecasts:\n assert (\n forecast[\"id\"] not in forecasts\n ), f\"Duplicate forecast id: {forecast['id']}\"\n forecasts[forecast[\"id\"]] = forecast[\"y_hat\"].reshape(-1)\n\n # get timestamps for the forecasts\n freq = self.train_df.index[1] - self.train_df.index[0]\n future_dates = pd.DatetimeIndex(\n [\n self.train_df.index[-1] + (i + 1) * freq\n for i in range(self.forecast_steps)\n ]\n )\n\n self.forecasts = pd.DataFrame(forecasts, index=future_dates)\n\n print(\"forecasts:\")\n print(self.forecasts)\n\n if self.test_df is not None:\n # duplicate univariate target across columns for each model\n true = self.test_df.iloc[\n : self.forecast_steps, [self.target_index] * self.forecasts.shape[1]\n ]\n pred = self.forecasts\n\n print(\"--> true\")\n print(true)\n print(\"--> pred\")\n print(pred)\n\n self.rmse = pd.Series(\n np.sqrt(np.mean((pred.values - true.values) ** 2, axis=0)),\n index=self.forecasts.columns,\n ).sort_values()\n\n print(f\"RMSE:\")\n print(self.rmse)\n\n self.next(self.end)\n\n @step\n def end(self):\n \"\"\"\n End of the flow\n \"\"\"\n pass\n\n\nif __name__ == \"__main__\":\n ForecastingFlow()\n" ]
[ [ "pandas.read_csv", "pandas.to_datetime", "pandas.DataFrame", "numpy.mean", "pandas.infer_freq" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
axdahl/SC-MMGP
[ "c6cd9d9de66bb7074925a4b6485f10a74bdd9f68" ]
[ "examples/flight_delays/logcox_implicit_scmmgp.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nScript to execute example implicit sparse covarying MMGP with Poisson likelihood.\r\nThe model invokes the 'implicit' sparse model class and accepts a degenerate kernel\r\nthat is optionally corrected within the model class with diagonal correction.\r\n\r\nInputs: Data training and test sets (dictionary pickle)\r\nData for example:\r\n - count data for 50 airports\r\n - N_train = 20,000, N_test = 10,798, P = 50, D = 105\r\n - Xtr[:, :4] ['time_index', 'dayofweek', 'dayofmonth', 'month']\r\n - Xtr[:, 4:105] total scheduled arrivals and departures per airport\r\n - Xtr[:, 105] total activity (arrivals and departures) for all airports\r\n - link inputs is a 50x3 array (link inputs repeated for every group)\r\n with normalised lat,long and airport size (total scheduled flights over sample period)\r\n\r\nModel Options:\r\n - Sparse or full x-function covariance prior Krhh (set bool SPARSE_PRIOR)\r\n - Diagonal or Kronecker-structured variational posterior covariance Sr (set bool DIAG_POST)\r\n - Sparse or full covariance (when Kronecker posterior; set bool SPARSE_POST)\r\n - Diagonal correction required within class for degenerate kernels (set bool EXACT_SPARSE)\r\n\r\nCurrent Settings (degenerate scmmgp model with sparse Kronecker posterior):\r\n DIAG_POST = False\r\n SPARSE_PRIOR = True # set False for equivalent non-sparse scmmgp model\r\n SPARSE_POST = True\r\n EXACT_SPARSE = True # option for sparse prior low-rank adjustment\r\n\r\nNote on specifying group structure for F:\r\n Grouping occurs via block_struct, a nested list of grouping order\r\n Where functions [i] are independent i.e. in own block, set link_kernel[i] = link_inputs[i] = 1.0\r\n See model class preamble and example below for further details.\r\n\r\n\"\"\"\r\nimport os\r\nimport numpy as np\r\nimport pickle\r\nimport pandas as pd\r\nimport traceback\r\nimport time\r\nimport sklearn.cluster\r\nimport csv\r\nimport sys\r\nimport mmgp\r\nfrom mmgp import likelihoods\r\nfrom mmgp import kernels\r\nimport tensorflow as tf\r\nfrom mmgp import datasets\r\nfrom mmgp import losses\r\nfrom mmgp import util\r\n\r\ndpath = '/experiments/datasets/'\r\ndfile = 'logcox_nozeroy_aggx_inputsdict.pickle'\r\ndlinkfile = 'logcox_nozeroy_aggx_linkinputsarray.pickle'\r\noutdir = '/experiments/results/logcox_implicit_scmmgp'\r\nsiteinclude = os.path.join(dpath, \"airports_top50.csv\") # contains order of output variables\r\n\r\ntry:\r\n os.makedirs(outdir)\r\nexcept FileExistsError:\r\n pass\r\n\r\ndef get_inputs():\r\n \"\"\"\r\n inputsdict contains {'Yte': Yte, 'Ytr': Ytr, 'Xtr': Xtr, 'Xte': Xte} where values are np.arrays\r\n np. arrays are truncated to evenly split into batches of size = batchsize\r\n\r\n returns inputsdict, Xtr_link (ndarray, shape = [P, D_link_features])\r\n \"\"\"\r\n with open(os.path.join(dpath, dfile), 'rb') as f:\r\n d_all = pickle.load(f)\r\n\r\n with open(os.path.join(dpath, dlinkfile), 'rb') as f:\r\n d_link = pickle.load(f)\r\n\r\n return d_all, d_link\r\n\r\n\r\ndef init_z(train_inputs, num_inducing):\r\n # Initialize inducing points using clustering.\r\n mini_batch = sklearn.cluster.MiniBatchKMeans(num_inducing)\r\n cluster_indices = mini_batch.fit_predict(train_inputs)\r\n inducing_locations = mini_batch.cluster_centers_\r\n return inducing_locations\r\n\r\n\r\nFLAGS = util.util.get_flags()\r\nBATCH_SIZE = 100\r\nLEARNING_RATE = FLAGS.learning_rate\r\nDISPLAY_STEP = FLAGS.display_step\r\nEPOCHS = 150\r\nNUM_SAMPLES = 200\r\nPRED_SAMPLES = 500\r\nNUM_INDUCING = 250\r\nNUM_COMPONENTS = FLAGS.num_components\r\nIS_ARD = FLAGS.is_ard\r\nTOL = 0.0001\r\nVAR_STEPS = FLAGS.var_steps\r\nDIAG_POST = False\r\nSPARSE_PRIOR = True\r\nSPARSE_POST = True # option for non-diag post\r\nEXACT_SPARSE = True # option for sparse prior low-rank adjustment\r\nMAXTIME = 1200\r\nsave_nlpds = True # If True saves samples of nlpds (mean and variance)\r\n\r\nprint(\"settings done\")\r\n\r\n\r\n# define GPRN P and Q\r\noutput_dim = 50 #P\r\nlocfeat_dim = 2 # [scheduled arrivals, scheduled departures] for time increment for airport\r\ncommonfeats = list(range(4)) # [t_ix, dayofweek, dayofmonth, month]\r\nnum_hubs = 5 # becomes nodedim\r\n# top 10 airports (select increasing subsets for varying nodedim)\r\ntoplist = ['ATL',\r\n 'ORD',\r\n 'DFW',\r\n 'DEN',\r\n 'LAX',\r\n 'PHX',\r\n 'IAH',\r\n 'LAS',\r\n 'DTW',\r\n 'EWR']\r\n\r\nuse_sites = pd.read_csv(siteinclude,header=None).iloc[:,0].tolist() # order of output variables\r\ntoplist = toplist[:num_hubs]\r\nhublocs = [use_sites.index(x) for x in toplist]\r\nnonhubs = [use_sites.index(x) for x in use_sites if x not in toplist] #non hub dims\r\nnode_dim = len(hublocs) #Q\r\n\r\n# extract dataset\r\nd, d_link = get_inputs()\r\nYtr, Yte, Xtr, Xte = d['Ytr'], d['Yte'], d['Xtr'], d['Xte']\r\n\r\ndata = datasets.DataSet(Xtr.astype(np.float32), Ytr.astype(np.float32), shuffle=False)\r\ntest = datasets.DataSet(Xte.astype(np.float32), Yte.astype(np.float32), shuffle=False)\r\nprint(\"dataset created\")\r\n\r\n# lists required: block_struct, link_inputs, kern_link, kern\r\n# model config: block columns, leave f independent\r\n# order of block_struct is columns, node functions\r\n\r\n#block_struct nested list of grouping order\r\nweight_struct = [[] for _ in range(node_dim)]\r\n\r\nfor i in range(node_dim):\r\n col = list(range(i*output_dim, i*output_dim + output_dim))\r\n col_0 = col.pop(hublocs[i]) # bring hub to pivot position\r\n weight_struct[i] = [col_0] + col\r\n\r\nnodes = [[x] for x in list(range(output_dim * node_dim, output_dim * node_dim + node_dim))]\r\nblock_struct = weight_struct + nodes\r\n\r\n# create link inputs (link inputs used repeatedly but can have link input per group)\r\n# permute to bring hub to first position\r\nlink_inputs = [[] for _ in range(node_dim)]\r\nfor i in range(node_dim):\r\n idx = list(range(d_link.shape[0]))\r\n link_inputs[i] = d_link[[idx.pop(hublocs[i])] + idx, :] # match inputs order to block_struct\r\n\r\nlink_inputs = link_inputs + [1.0 for i in range(node_dim)]\r\n\r\n# link kernel\r\nklink_w = [kernels.WaveletSlice(3, active_dims=[0,1,2], input_scaling=IS_ARD) for i in range(len(weight_struct)) ]\r\nklink_f = [1.0 for i in range(node_dim)]\r\n\r\nkernlink = klink_w + klink_f\r\n\r\n# create 'within' kernel\r\n# kern\r\nk_w = [kernels.CompositeKernel('add',[kernels.RadialBasisSlice(Xtr.shape[1],\r\n active_dims= list(range(Xtr.shape[1])),\r\n std_dev = 1.0, white = 0.01, input_scaling = IS_ARD),\r\n kernels.PeriodicSlice(1, active_dims=[0],\r\n lengthscale=0.5, std_dev=1.0, period = 2.0) ])\r\n for i in range(len(weight_struct))]\r\nk_f = [kernels.RadialBasisSlice(1, active_dims=[0], std_dev = 1.0, lengthscale=0.5, white = 0.01, input_scaling = IS_ARD)\r\n for i in range(node_dim)]\r\n\r\nkern = k_w + k_f\r\n\r\nprint('len link_inputs ',len(link_inputs))\r\nprint('len kernlink ',len(kernlink))\r\nprint('len kern ', len(kern))\r\nprint('no. groups = ', len(block_struct), 'no. latent functions =', len([i for b in block_struct for i in b]))\r\nprint('number latent functions', node_dim*(output_dim+1))\r\n\r\nlikelihood = likelihoods.SCMMGPLogCox(output_dim, node_dim, offset = 0.05) # output_dim, node_dim, offset\r\nprint(\"likelihood and kernels set\")\r\nZ = init_z(data.X, NUM_INDUCING)\r\nprint('inducing points set')\r\nm = mmgp.ImplicitSCMMGP(output_dim, likelihood, kern, kernlink, block_struct, Z, link_inputs,\r\n num_components=NUM_COMPONENTS, diag_post=DIAG_POST, sparse_prior=SPARSE_PRIOR,\r\n sparse_post=SPARSE_POST, exact_sparse=EXACT_SPARSE, num_samples=NUM_SAMPLES, predict_samples=PRED_SAMPLES)\r\nprint(\"model set\")\r\n\r\n\r\n# initialise losses and logging\r\nerror_rate = losses.RootMeanSqError(data.Dout)\r\nos.chdir(outdir)\r\nwith open(\"log_results.csv\", 'w', newline='') as f:\r\n csv.writer(f).writerow(['epoch', 'fit_runtime', 'nelbo', error_rate.get_name(),'generalised_nlpd'])\r\nwith open(\"log_params.csv\", 'w', newline='') as f:\r\n csv.writer(f).writerow(['epoch', 'raw_kernel_params', 'raw_kernlink_params', 'raw_likelihood_params', 'raw_weights'])\r\nwith open(\"log_comp_time.csv\", 'w', newline='') as f:\r\n csv.writer(f).writerow(['epoch', 'batch_time', 'nelbo_time', 'pred_time', 'gen_nlpd_time', error_rate.get_name()+'_time'])\r\n\r\n# optimise\r\no = tf.train.AdamOptimizer(LEARNING_RATE, beta1=0.9,beta2=0.99)\r\nprint(\"start time = \", time.strftime('%X %x %Z'))\r\nm.fit(data, o, var_steps = VAR_STEPS, epochs = EPOCHS, batch_size = BATCH_SIZE, display_step=DISPLAY_STEP,\r\n test = test, loss = error_rate, tolerance = TOL, max_time=MAXTIME )\r\nprint(\"optimisation complete\")\r\n\r\n\r\n# export final predicted values and loss metrics\r\nypred = m.predict(test.X, batch_size = BATCH_SIZE) #same batchsize used for convenience\r\nnp.savetxt(\"predictions.csv\", np.concatenate(ypred, axis=1), delimiter=\",\")\r\nif save_nlpds == True:\r\n nlpd_samples, nlpd_meanvar = m.nlpd_samples(test.X, test.Y, batch_size = BATCH_SIZE)\r\n try:\r\n np.savetxt(\"nlpd_meanvar.csv\", nlpd_meanvar, delimiter=\",\") # N x 2P as for predictions\r\n except:\r\n print('nlpd_meanvar export fail')\r\n #try:\r\n # np.savetxt(\"nlpd_samples.csv\", nlpd_samples, delimiter=\",\") # NP x S (NxS concat for P tasks)\r\n #except:\r\n # print('nlpd_samples export fail')\r\n\r\nprint(\"Final \" + error_rate.get_name() + \"=\" + \"%.4f\" % error_rate.eval(test.Y, ypred[0]))\r\nprint(\"Final \" + \"generalised_nlpd\" + \"=\" + \"%.4f\" % m.nlpd_general(test.X, test.Y, batch_size = BATCH_SIZE))\r\n\r\n# any extra accuracy measures at end of routine\r\nerror_rate_end = [losses.MeanAbsError(data.Dout)]\r\nprint(\"Final \", [e.get_name() for e in error_rate_end])\r\nprint([e.eval(test.Y, ypred[0]) for e in error_rate_end])\r\npredvar = [np.mean(np.mean(ypred[1]))]\r\nprint(\"Final predvar \", predvar)\r\n\r\nwith open(\"final_losses.csv\", 'w', newline='') as f:\r\n csv.writer(f).writerows([[e.get_name() for e in error_rate_end] + ['pred_var'],\r\n [e.eval(test.Y, ypred[0]) for e in error_rate_end] + predvar])\r\n\r\nprint(\"finish time = \" + time.strftime('%X %x %Z'))\r\n" ]
[ [ "pandas.read_csv", "numpy.concatenate", "numpy.mean", "tensorflow.train.AdamOptimizer", "numpy.savetxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [ "1.10" ] } ]
opplatek/fivepseq
[ "9a531520e29aa9b3edbf1823c6cfd249f97bba9b" ]
[ "fivepseq/logic/structures/fivepseq_counts.py" ]
[ "import collections\nimport logging\nimport os\nfrom math import floor\n\nimport numpy as np\nimport pandas as pd\nimport plastid\nfrom preconditions import preconditions\nfrom scipy import stats\n\nfrom fivepseq import config\nfrom fivepseq.logic.structures import codons\nfrom fivepseq.logic.structures.codons import Codons\nfrom fivepseq.util.writers import FivePSeqOut\n\n\nclass FivePSeqCounts:\n \"\"\"\n This class wraps annotation, alignment and genome objects in one place.\n Algorithms extracting count information from these objects are implemented in this class as functions.\n Algorithms able to work with count arrays and dataframes alone are in the algorithms package.\n \"\"\"\n\n START = \"START\"\n TERM = \"STOP\"\n FULL_LENGTH = \"full_length\"\n ALL = \"all\"\n\n START_CODON = \"start\"\n STOP_CODON = \"stop\"\n TRANSCRIPT_LENGTH = \"len\"\n TRANSCRIPT_3NT = \"3nt\"\n NUMBER_READS = \"NumOfReads\"\n NUMBER_READS_DOWNSAMPLED = \"NumOfReadsDownsampled\"\n NUMBER_POSITIONS = \"NumOfMapPositions\"\n\n COUNT_THRESHOLD = 100\n logger = logging.getLogger(config.FIVEPSEQ_LOGGER)\n\n count_distribution_dict = None\n outlier_lower = None\n downsample_constant = None\n outlier_probability = None\n\n config = None\n alignment = None\n annotation = None\n genome = None\n\n count_vector_list_start = None\n count_vector_list_term = None\n count_vector_list_full_length = None\n meta_count_series_start = None\n meta_count_series_term = None\n frame_counts_df_start = None\n frame_counts_df_term = None\n codon_genome_usage_df = None\n codon_count_df = None\n amino_acid_count_df = None\n dicodon_count_df = None\n dipeptide_count_df = None\n tricodon_count_df = None\n tripeptide_count_df = None\n codon_stats_df = None\n amino_acid_stats_df = None\n\n codon_genome_usage_df = None\n amino_acid_genome_usage_df = None\n\n start_codon_dict = None\n stop_codon_dict = None\n canonical_transcript_index = None\n transcript_descriptors = None\n outliers = None\n\n is_geneset = False\n\n loci_overlaps = None\n READ_LOCATIONS_ALL = \"_ALL\"\n READ_LOCATIONS_3UTR = \"_3UTR\"\n READ_LOCATIONS_5UTR = \"_5UTR\"\n READ_LOCATIONS_CDS = \"_CDS\"\n\n MASK_DIST = 20\n TRIPEPTIDE_POS = -11\n DIPEPTIDE_POS = -14\n\n missing_chroms = []\n\n def __init__(self, alignment, annotation, genome, config, downsample_constant, is_geneset=False,\n transcript_filter=None):\n \"\"\"\n Initializes a FivePSeqCounts object with Alignment and Annotation instances.\n\n :param alignment: fivepseq.logic.structures.Alignment type object\n :param annotation: fivepseq.logic.structures.Annotation type object\n :param genome: fivepseq.logic.structures.Genome: Genome type object\n :param outlier_probability: a float setting the probability threshold for Poisson distribution that will be used to downsample outliers\n :param downsample_constant: a float specifying a constant threshold: higher values will be down-sampled to this constant (without Poisson check)\n \"\"\"\n\n self.alignment = alignment\n self.annotation = annotation\n self.genome = genome\n self.transcript_filter = transcript_filter\n self.config = config\n self.outlier_probability = config.args.op\n self.outlier_lower = downsample_constant\n self.outliers = []\n self.start_codon_dict = {}\n self.stop_codon_dict = {}\n self.canonical_transcript_index = []\n self.is_geneset = is_geneset\n\n self.logger.info(\"Initiated a FivePSeqCounts object with\"\n \"\\n\\talignment from file %s\"\n \"\\n\\tannotation from file %s \"\n \"\\n\\tgenome from file %s\"\n % (alignment.alignment_file.filename, annotation.file_path, genome.fasta_file))\n\n def get_transcript_descriptors(self):\n if self.transcript_descriptors is None:\n self.generate_transcript_descriptors()\n\n return self.transcript_descriptors\n\n def get_start_codon_dict(self):\n if self.start_codon_dict is None:\n self.generate_transcript_descriptors()\n\n return self.start_codon_dict\n\n def get_stop_codon_dict(self):\n if self.stop_codon_dict is None:\n self.generate_transcript_descriptors()\n\n return self.stop_codon_dict\n\n def generate_transcript_descriptors(self):\n \"\"\"\n Generates and stores the basic statistics on transcript sequences and counts.\n The following objects are generated and kept in self:\n\n transcript_descriptors:: pandas DataFrame\n - columns: START, TERM codons, transcript length,\n transcript length divisible by three, number of reads mapping within coding region\n - rows: transcripts\n\n\n :return:\n \"\"\"\n\n # info\n self.logger.info(\"Generating transcript descriptors\")\n\n transcript_assembly = self.annotation.get_transcript_assembly(span_size=0)\n transcript_count = len(transcript_assembly)\n self.transcript_descriptors = pd.DataFrame(data=None,\n index=range(transcript_count),\n columns=[self.START_CODON,\n self.STOP_CODON,\n self.TRANSCRIPT_LENGTH,\n self.TRANSCRIPT_3NT,\n self.NUMBER_READS,\n self.NUMBER_READS_DOWNSAMPLED,\n self.NUMBER_POSITIONS])\n\n count_distribution_dict = {}\n\n for transcript_ind in range(transcript_count):\n transcript = transcript_assembly[transcript_ind]\n\n cds_sequence = self.get_cds_sequence_safe(transcript, 0)\n count_vector = self.get_count_vector_safe(transcript, 0)\n\n # NOTE the count distribution does not include values 0 to avoid skewness for outlier detection\n for c in count_vector:\n if c > 0:\n if c in count_distribution_dict:\n count_distribution_dict[c] += 1\n else:\n count_distribution_dict[c] = 1\n\n start_codon = cds_sequence[0:3]\n stop_codon = cds_sequence[len(cds_sequence) - 3:len(cds_sequence)]\n\n if (start_codon == codons.Codons.START_CODON) & (stop_codon in codons.Codons.stop_codons):\n self.canonical_transcript_index.append(transcript_ind)\n\n self.transcript_descriptors.at[transcript_ind, self.START_CODON] = start_codon\n self.transcript_descriptors.at[transcript_ind, self.STOP_CODON] = stop_codon\n self.transcript_descriptors.at[transcript_ind, self.TRANSCRIPT_3NT] = str(len(cds_sequence) % 3 == 0)\n self.transcript_descriptors.at[transcript_ind, self.TRANSCRIPT_LENGTH] = len(cds_sequence)\n self.transcript_descriptors.at[transcript_ind, self.NUMBER_READS] = int(np.sum(count_vector))\n self.transcript_descriptors.at[transcript_ind, self.NUMBER_POSITIONS] = np.count_nonzero(count_vector)\n\n if start_codon in self.start_codon_dict.keys():\n self.start_codon_dict[start_codon] += 1\n else:\n self.start_codon_dict.update({start_codon: 1})\n\n if stop_codon in self.stop_codon_dict.keys():\n self.stop_codon_dict[stop_codon] += 1\n else:\n self.stop_codon_dict.update({stop_codon: 1})\n\n self.count_distribution_dict = collections.OrderedDict(sorted(count_distribution_dict.items()))\n self.outlier_lower = self.get_outlier_lower()\n\n self.logger.info(\"The lower bound for outliers set as %f \" % self.outlier_lower)\n\n # also store downsampled transcript counts\n for transcript_ind in range(transcript_count):\n transcript = transcript_assembly[transcript_ind]\n\n count_vector_downsampled = self.get_count_vector(transcript, span_size=0,\n region=self.FULL_LENGTH, downsample=True)\n self.transcript_descriptors.at[transcript_ind, self.NUMBER_READS_DOWNSAMPLED] = int(\n np.sum(count_vector_downsampled))\n\n self.logger.info(\"Done generating transcript descriptors\")\n\n def get_count_distribution_dict(self):\n return self.count_distribution_dict\n\n def get_count_distribution(self):\n\n if self.count_distribution_dict is None:\n self.generate_transcript_descriptors()\n\n count_distribution = []\n for c, f in self.count_distribution_dict.items():\n for i in range(f):\n count_distribution.append(c)\n\n return count_distribution\n\n def set_count_distribution_dict(self, count_distribution_dict):\n \"\"\"\n Sets the count distribution according to the specified count vector.\n\n :param count_distribution_dict: an ordered dictionary of count frequencies\n :return:\n \"\"\"\n if len(count_distribution_dict) == 0:\n self.count_distribution_dict = None\n else:\n self.count_distribution_dict = count_distribution_dict\n\n def get_outlier_lower(self):\n \"\"\"\n Returns the lower bound for outliers detected as points lying self.downsample_by number times higher than the\n 25-75% interquartile range.\n\n :return:\n \"\"\"\n if self.outlier_lower is not None:\n return self.outlier_lower\n\n count_distribution = self.get_count_distribution()\n\n if len(count_distribution) == 0:\n self.outlier_lower = 0\n return 0\n\n scd = sorted(count_distribution)\n lam = np.mean(scd)\n ps = [1 - stats.poisson.cdf(x, lam) for x in scd]\n ind = np.where(np.asarray(ps) <= self.outlier_probability)[0].tolist()\n\n if len(ind) > 0:\n # outliers = [scd[i] for i in ind]\n outlier_lower = scd[min(ind) - 1]\n else:\n outlier_lower = max(scd) + 1\n\n self.outlier_lower = outlier_lower\n return outlier_lower\n\n def set_outlier_lower(self, outlier_lower):\n # TODO add checks, set preconditions\n self.outlier_lower = outlier_lower\n\n def generate_count_vector_lists(self):\n \"\"\"\n Generates read count vectors for full length transcripts, terminus- and start- aligned sections,\n spanning respective regions of each transcript in the transcript assembly.\n The region is spanned according to the span_size set in annotation.\n\n :return: [[int]]: array of counts arrays of 5' mapping counts per position of the specified region of each transcript\n \"\"\"\n\n # if counts are already computed, return the existing ones\n\n logging.getLogger(config.FIVEPSEQ_LOGGER).info(\"Generating count vectors\")\n\n if self.count_vector_list_full_length is not None:\n if self.count_vector_list_term is not None:\n if self.count_vector_list_start is not None:\n logging.getLogger(config.FIVEPSEQ_LOGGER).warning(\"All count vectors are already generated\")\n\n # otherwise, retrieve the counts from the alignment file, referencing the transcript assembly\n self.logger.info(\"Retrieving counts (span size :%d)...\"\n % self.annotation.span_size)\n\n # initialize empty vectors\n transcript_count = len(self.annotation.get_transcript_assembly())\n self.count_vector_list_full_length = [None] * transcript_count\n self.count_vector_list_term = [None] * transcript_count\n self.count_vector_list_start = [None] * transcript_count\n\n # setup the the counter\n counter = 1\n ta = self.annotation.get_transcript_assembly()\n for i in range(transcript_count):\n transcript = ta[i]\n\n # update to console\n if counter % 10000 == 0:\n self.logger.info(\"\\r>>Transcript count: %d (%d%s)\\t\" % (\n counter, floor(100 * (counter - 1) / self.annotation.transcript_count),\n '%'), )\n\n # retrieve actual counts for current transcript\n try:\n count_vector = self.get_count_vector(transcript, self.annotation.span_size, self.FULL_LENGTH)\n self.count_vector_list_full_length[counter - 1] = count_vector\n self.count_vector_list_start[counter - 1] = count_vector[:2 * self.annotation.span_size]\n self.count_vector_list_term[counter - 1] = count_vector[-(2 * self.annotation.span_size):]\n\n except Exception as e:\n error_message = \"Problem retrieving counts for transcript %s. Reason: %s\" \\\n % (transcript.get_name(), e.message)\n self.logger.error(error_message)\n raise Exception(error_message)\n\n counter += 1\n self.check_for_codons = False\n\n # report successful retrieval\n self.logger.info(\"Finished retrieving count vectors\")\n\n @preconditions(lambda region: isinstance(region, str))\n def get_count_vector_list(self, region):\n \"\"\"\n Returns arrays of read count vectors spanning the given region of each transcript in the transcript assembly.\n The region is spanned according to the span_size set in annotation.\n\n :param region: str: Specifies the region of the transcript to span around\n :return: [[int]]: array of counts arrays of 5' mapping counts per position of the specified region of each transcript\n \"\"\"\n\n # if counts are already computed, return the existing ones else generate count vector lists first\n if self.count_vector_list_full_length is None:\n self.generate_count_vector_lists()\n\n if region == self.FULL_LENGTH:\n return self.count_vector_list_full_length\n elif region == self.START:\n return self.count_vector_list_start\n elif region == self.TERM:\n return self.count_vector_list_term\n\n else:\n error_message = \"Cannot retrieve the counts. \" \\\n \"Invalid region \\\"%s\\\" specified: should be one of (%s, %s, %s).\" \\\n % (region, self.FULL_LENGTH, self.START, self.TERM)\n self.logger.error(error_message)\n raise ValueError(error_message)\n\n @preconditions(lambda span_size: isinstance(span_size, int),\n lambda region: isinstance(region, str))\n def get_count_vector(self, transcript, span_size, region, downsample=True):\n \"\"\"\n Returns the vector of counts for the given transcript within the given spanning region.\n\n :param region: str: Specifies the region of the transcript to span for count vector generation\n :param transcript: plastid.Transcript: The transcript to return the counts for: is is already spanned with the specified span_size\n :param span_size: int: Specifies how many nucleotides to span around the specified region\n :param transcript_ind: int: the index of transcript in the transcript assembly\n :return: [int]: array of 5' mapping counts per position of the specified transcript region\n \"\"\"\n\n try:\n # retrieve the count vector using plastid function \"get_counts\" called from the given Transcript object\n count_vector = self.get_count_vector_safe(transcript, span_size)\n if downsample and any(x > self.outlier_lower for x in count_vector):\n count_vector_ds = [0] * len(count_vector)\n for i in range(len(count_vector_ds)):\n if count_vector[i] > self.outlier_lower:\n count_vector_ds[i] = self.outlier_lower\n outlier_params = [FivePSeqOut.get_transcript_attr(transcript, \"ID\"),\n FivePSeqOut.get_transcript_attr(transcript, \"Name\"),\n i - span_size, len(count_vector) - i - span_size, count_vector[i],\n count_vector_ds[i]]\n if outlier_params not in self.outliers:\n self.outliers.append(outlier_params)\n else:\n count_vector_ds[i] = count_vector[i]\n count_vector = count_vector_ds\n\n count_vector = count_vector[transcript.cds_start: transcript.cds_end + 2 * span_size]\n\n # return only the region of the vector that is specified by region and span_size parameters\n if region == self.FULL_LENGTH:\n # the full vector will be returned\n pass\n elif region == self.START:\n count_vector = count_vector[:2 * span_size]\n elif region == self.TERM:\n count_vector = count_vector[-(2 * span_size):]\n else:\n error_message = \"Cannot retrieve a count vector for the transcript %s. \" \\\n \"Invalid region \\\"%s\\\" specified: should be one of (%s, %s, %s).\" \\\n % (transcript.get_name(), region, self.FULL_LENGTH, self.START, self.TERM)\n self.logger.error(error_message)\n raise ValueError(error_message)\n\n except Exception as e:\n error_message = \"Problem retrieving the count vector for the transcript %s. Reason:%s\" % (\n transcript.get_name(), e.message)\n self.logger.error(error_message)\n raise Exception(error_message)\n\n # convert the count array to an int vector\n if not isinstance(count_vector, list):\n count_vector = count_vector.tolist()\n # if not isinstance(count_vector[0], int):\n count_vector = list(map(int, count_vector))\n\n return count_vector\n\n def get_count_vector_safe(self, transcript, span_size):\n \"\"\"\n A safe method to return count vector accounting for transcripts that span before or after genome start and end.\n\n :param transcript:\n :param span_size:\n :return:\n \"\"\"\n\n try:\n count_vector = transcript.get_counts(self.alignment.bam_array)\n except Exception as e:\n\n if transcript.spanning_segment.start < 0:\n diff = -1 * transcript.spanning_segment.start\n t_subchain = transcript.get_subchain(diff, transcript.spanning_segment.end, stranded=False)\n subchain_counts = list(t_subchain.get_counts(self.alignment.bam_array))\n count_vector = [0] * diff + subchain_counts\n\n logging.getLogger(config.FIVEPSEQ_LOGGER). \\\n debug(\"Transcript %s at the beginning of the genome padded with %d zeros\"\n % (FivePSeqOut.get_transcript_attr(transcript, \"Name\"), diff))\n\n else:\n t_len = transcript.spanning_segment.end - transcript.spanning_segment.start\n diff = transcript.spanning_segment.end - len(self.genome.genome_dict[transcript.chrom].seq)\n\n if diff > span_size:\n # NOTE wrongly annotated transcripts go outside genome boundaries,\n # NOTE return an empty vector spanned by span size as a safe way of discarding such transcripts\n count_vector = [0] * t_len\n logging.getLogger(config.FIVEPSEQ_LOGGER). \\\n debug(\"Transcript %s exceeds genome dimensions by %d bases\"\n % (FivePSeqOut.get_transcript_attr(transcript, \"Name\"), diff))\n\n else:\n t_subchain = transcript.get_subchain(diff, transcript.spanning_segment.end, stranded=False)\n\n subchain_counts = list(t_subchain.get_counts(self.alignment.bam_array))\n count_vector = subchain_counts + [0] * diff\n\n logging.getLogger(config.FIVEPSEQ_LOGGER). \\\n debug(\"Transcript %s at the end of the genome padded with %d zeros\"\n % (FivePSeqOut.get_transcript_attr(transcript, \"Name\"), diff))\n\n return count_vector\n\n def get_sequence(self, transcript, transcript_span_size, desired_span_size):\n if desired_span_size > transcript_span_size:\n raise ValueError(\"Desired span size %d bigger than the transcript span size %d\"\n % (desired_span_size, transcript_span_size))\n\n try:\n sequence = transcript.get_sequence(self.genome.genome_dict)\n desired_seq = sequence[transcript.cds_start + transcript_span_size - desired_span_size:\n transcript.cds_end + transcript_span_size + desired_span_size]\n except:\n t_len = transcript.cds_end - transcript.cds_start\n desired_seq = ''.join(['N'] * t_len + 2 * desired_span_size)\n\n return desired_seq\n\n def get_cds_sequence_safe(self, transcript, span_size):\n # NOTE a dangerous code here. Works correctly only if the input span size is the same as in the transcript.\n # TOCHANGE\n try:\n sequence = transcript.get_sequence(self.genome.genome_dict)\n cds_sequence = sequence[transcript.cds_start + span_size: transcript.cds_end + span_size]\n except:\n if transcript.chrom not in self.genome.genome_dict.keys():\n if transcript.chrom not in self.missing_chroms:\n self.missing_chroms.append(transcript.chrom)\n logging.getLogger(config.FIVEPSEQ_LOGGER).warn(\n \"No chromosome named %s found in the genome sequence\" % transcript.chrom)\n t_len = transcript.spanning_segment.end - transcript.spanning_segment.start\n cds_sequence = ''.join(['N'] * t_len)\n\n elif transcript.spanning_segment.start < 0:\n diff = -1 * transcript.spanning_segment.start\n t_subchain = transcript.get_subchain(diff, transcript.spanning_segment.end, stranded=False)\n sequence = t_subchain.get_sequence(self.genome.genome_dict)\n\n if span_size < diff:\n cds_sequence = sequence[transcript.cds_start + span_size - diff: transcript.cds_end + span_size]\n else: # TODO I don't know how to get sequence in this case: need debugging\n cds_sequence = sequence[transcript.cds_start + span_size - diff: transcript.cds_end + span_size]\n\n logging.getLogger(config.FIVEPSEQ_LOGGER). \\\n debug(\"Transcript %s at the beginning of the genome padded with %d N's\"\n % (FivePSeqOut.get_transcript_attr(transcript, \"Name\"), diff))\n\n else:\n t_len = transcript.spanning_segment.end - transcript.spanning_segment.start\n diff = transcript.spanning_segment.end - len(self.genome.genome_dict[transcript.chrom].seq)\n\n if diff > span_size:\n # NOTE wrongly annotated transcripts go outside genome boundaries,\n # NOTE return an empty sequence spanned by span size as a safe way of discarding such transcripts\n cds_sequence = ''.join(['N'] * t_len)\n\n else:\n t_subchain = transcript.get_subchain(diff, transcript.spanning_segment.end, stranded=False)\n\n sequence = t_subchain.get_sequence(self.genome.genome_dict)\n cds_sequence = sequence[transcript.cds_start + span_size:\n transcript.cds_end + span_size - diff]\n\n return cds_sequence\n\n def get_outliers_df(self):\n \"\"\"\n Returns the outliers in the form of a data-frame with column names.\n\n :return:\n \"\"\"\n\n colnames = [\"ID\", \"Name\", \"position_from_start\", \"position_from_term\", \"actual_count\", \"downsampled_count\"]\n outliers_df = pd.DataFrame(self.outliers, index=None, columns=colnames)\n return outliers_df\n\n @preconditions(lambda region: isinstance(region, str))\n def get_frame_counts_df(self, region):\n if region == self.START:\n if self.frame_counts_df_start is None:\n self.frame_counts_df_start = CountManager.extract_count_sums_per_frame_per_transcript(\n self.get_count_vector_list(FivePSeqCounts.FULL_LENGTH), self.annotation.span_size,\n FivePSeqCounts.START)\n return self.frame_counts_df_start\n\n elif region == self.TERM:\n if self.frame_counts_df_term is None:\n self.frame_counts_df_term = CountManager.extract_count_sums_per_frame_per_transcript(\n self.get_count_vector_list(FivePSeqCounts.FULL_LENGTH), self.annotation.span_size,\n FivePSeqCounts.TERM)\n return self.frame_counts_df_term\n\n else:\n err_msg = (\"Wrong region %s provided: should be either %s or %s\"\n % (region, self.START, self.TERM))\n self.logger.error(err_msg)\n raise Exception(err_msg)\n\n @preconditions(lambda region: isinstance(region, str))\n def get_meta_count_series(self, region):\n \"\"\"\n Computes counts of 5' mapping positions at all the transcripts on the specified region, within the specified span size,\n and returns the position-wise sum of counts as a single [int] array.\n\n :param region: str: the region of transcript (start (START) or terminus (TERM)) to span around\n\n :return: pd.Series{int: int}: series of position-wise sum of transcript-specific counts indexed according to the\n distance of genomic coordinates from the first nucleotides of the codon corresponding to the specified region (START or TERM)\n \"\"\"\n\n if region == self.FULL_LENGTH:\n error_message = \"Cannot compute meta counts for full length transcript counts: the counts should be of \" \\\n \"the same length. \" \\\n \"Regions can be specified from choices (%s, %s)\" % (self.START, self.TERM)\n self.logger.error(error_message)\n raise ValueError(error_message)\n elif region == self.START:\n if self.meta_count_series_start is not None:\n return self.meta_count_series_start\n elif region == self.TERM:\n if self.meta_count_series_term is not None:\n return self.meta_count_series_term\n else:\n error_message = \"Problem retrieving meta_counts. \" \\\n \"Invalid region \\\"%s\\\" specified: should be one of (%s, %s).\" \\\n % (region, self.START, self.TERM)\n self.logger.error(error_message)\n raise ValueError(error_message)\n try:\n count_vector_list = self.get_count_vector_list(region)\n except Exception as e:\n raise e\n meta_count_series = CountManager.count_vector_to_series(\n CountManager.compute_meta_counts(count_vector_list), region, tail=self.annotation.span_size)\n\n self.set_meta_count_series(meta_count_series, region)\n\n return meta_count_series\n\n @preconditions(lambda count_vector_list: isinstance(count_vector_list, list),\n lambda count_vector_list: isinstance(count_vector_list[0], list),\n lambda count_vector_list: isinstance(count_vector_list[0][0], int),\n lambda region: isinstance(region, str))\n def set_count_vector_list(self, count_vector_list, region):\n \"\"\"\n Sets the retrieved counts as a class property for later use. The property is chosen is based on the region supplied.\n\n :param count_vector_list: [[int]]: the vector of count vectors per transcript\n :param region: str: the region for which the counts were computed\n\n :return: nothing to return\n \"\"\"\n if region == self.START:\n self.count_vector_list_start = count_vector_list\n elif region == self.TERM:\n self.count_vector_list_term = count_vector_list\n elif region == self.FULL_LENGTH:\n self.count_vector_list_full_length = count_vector_list\n else:\n error_message = \"Cannot set counts: wrong region %s supplied: should be either of (%s, %s, %s)\" \\\n % (region, self.START, self.TERM, self.FULL_LENGTH)\n self.logger.error(error_message)\n raise ValueError(error_message)\n\n @preconditions(lambda meta_count_series: isinstance(meta_count_series, pd.Series),\n lambda region: isinstance(region, str))\n def set_meta_count_series(self, meta_count_series, region):\n \"\"\"\n Sets the retrieved meta-counts as a class property for later use. The property is chosen is based on the region supplied.\n\n :param meta_count_series: Series{int:int}: the panda Series of per-position mapped read sums across transcripts\n indexed by position from first nucleotide of START of STOP codon\n :param region: str: the region for which the counts were computed\n\n :return: nothing to return\n \"\"\"\n if region == self.START:\n self.meta_count_series_start = meta_count_series\n elif region == self.TERM:\n self.meta_count_series_term = meta_count_series\n\n @preconditions(lambda region: isinstance(region, str),\n lambda span_before: isinstance(span_before, int),\n lambda span_before: span_before >= 0,\n lambda span_after: isinstance(span_after, int),\n lambda span_after: span_after >= 0)\n def get_unique_sequences(self, region, span_before, span_after):\n \"\"\"\n Retrieves the unique set of sequences spanning the given region of all transcripts, with the specified parameters.\n\n :param region: str: the START or TERM parts of the transcript\n :param span_before: int: the number of nucleotides to span before the first codon of the specified region\n :param span_after: int: the number of nucleotides to span after the last codon of the specified region\n\n :return: dict{str:int}: a dictionary keyed by the unique sequences identified within the spanning regions\n and valued by the number of occurrences of that sequence in transcripts\n \"\"\"\n sequences = {}\n i = 0\n for transcript in self.annotation.get_transcript_assembly(max(span_before, span_after)):\n sequence = transcript.get_sequence(self.genome.genome_dict)\n if region == self.TERM:\n endpoint = len(transcript.spanning_segment) - max(span_before, span_after)\n span_sequence = sequence[endpoint - span_before: endpoint + span_after]\n elif region == self.START:\n startpoint = max(span_before, span_after)\n span_sequence = sequence[startpoint - span_before: startpoint + span_after]\n else:\n raise Exception\n if span_sequence in sequences.keys():\n sequences[span_sequence] += 1\n else:\n sequences[span_sequence] = 1\n i += 1\n return sequences\n\n def get_amino_acid_pauses(self):\n if self.amino_acid_count_df is None:\n self.compute_codon_pauses()\n\n return self.amino_acid_count_df\n\n def get_codon_pauses(self):\n if self.codon_count_df is None:\n self.compute_codon_pauses()\n\n return self.codon_count_df\n\n def get_tricodon_pauses(self):\n if self.tricodon_count_df is None:\n self.compute_codon_pauses()\n\n return self.tricodon_count_df\n\n def get_dicodon_pauses(self):\n if self.dicodon_count_df is None:\n self.compute_codon_pauses()\n\n return self.dicodon_count_df\n\n def get_dipeptide_pauses(self):\n if self.dipeptide_count_df is None:\n self.compute_codon_pauses()\n\n return self.dipeptide_count_df\n\n def get_tripeptide_pauses(self):\n if self.tripeptide_count_df is None:\n self.compute_codon_pauses()\n\n return self.tripeptide_count_df\n\n # TODO a modification for di-codon counts to me incorporated inseat of get_codon_pauses in the future\n @preconditions(lambda dist_from: isinstance(dist_from, int),\n lambda dist_from: dist_from < 0,\n lambda dist_to: isinstance(dist_to, int),\n lambda dist_to: dist_to >= 0)\n def compute_codon_pauses(self, dist_from=-30, dist_to=3, downsample=True):\n \"\"\"\n Counts the meta-number of 5' mapping positions at the given distance from a codon or codon-pair\n Only transcripts with cds of length multiple of 3 are accounted for.\n The only frame in these transcripts is considered.\n\n :param codon:\n :param dist_from: negative distance from each codon or codon-pair\n :param dist_to: positive distance after each codon or codon-pair\n :param mask_dist: the number of positions to mask in the beginning and end of the gene body\n :return:\n \"\"\"\n\n self.logger.info(\n \"Counting codon specific pauses within %d to %d nt distance from the first nucleotide of each codon\" %\n (dist_from, dist_to))\n\n if self.config.args.no_mask:\n mask_dist = 0\n self.logger.info(\"Transcript boundaries will not be masked\")\n else:\n if hasattr(config.args, \"codon_mask_size\"):\n mask_dist = config.args.codon_mask_size\n else:\n mask_dist = self.MASK_DIST\n self.logger.info(\"Transcript boundaries will be masked by %d nucleotides\" % mask_dist)\n\n codon_count_df = pd.DataFrame(data=0, index=Codons.CODON_TABLE.keys(),\n columns=range(dist_from, dist_to))\n\n dicodon_count_df = pd.DataFrame(data=0, index=Codons.get_dicodon_table().keys(),\n columns=range(dist_from + 3, dist_to + 3))\n\n dipeptide_count_df = pd.DataFrame(data=0, index=Codons.get_dipeptide_list(),\n columns=range(dist_from + 3, dist_to + 3))\n\n tricodon_count_df = pd.DataFrame(data=0, index=Codons.get_tricodon_table().keys(),\n columns=range(dist_from + 6, dist_to + 6))\n\n tripeptide_count_df = pd.DataFrame(data=0, index=Codons.get_tripeptide_list(),\n columns=range(dist_from + 6, dist_to + 6))\n\n self.codon_genome_usage_df = pd.DataFrame(data=0, index=Codons.CODON_TABLE.keys(),\n columns=['abs', 'fraction'])\n self.amino_acid_genome_usage_df = pd.DataFrame(data=0, index=Codons.AMINO_ACID_TABLE.keys(),\n columns=['abs', 'fraction'])\n\n counter = 1\n\n transcript_assembly = self.annotation.get_transcript_assembly(\n span_size=0) # don't take more than the gene body (different from previous versions)\n transcript_count = len(transcript_assembly)\n for t in range(transcript_count):\n transcript = transcript_assembly[t]\n if np.floor(transcript_count / 1000) > 0 and counter % 1000 == 0:\n self.logger.info(\"\\r>>Transcript count: %d (%d%s)\\t\" % (\n counter, floor(100 * (counter - 1) / transcript_count), '%',), )\n counter += 1\n\n count_vector = self.get_count_vector(transcript, span_size=0,\n region=FivePSeqCounts.FULL_LENGTH,\n downsample=downsample)\n\n cds_sequence = self.get_cds_sequence_safe(transcript, 0)\n\n if sum(count_vector) == 0:\n continue\n\n if len(cds_sequence) != len(count_vector):\n self.logger.warning(\"Transcript num %d: cds sequence length %d not equal to count vector length %d\"\n % (counter, len(cds_sequence), len(count_vector)))\n continue\n\n if (mask_dist >= 3):\n # v1.0b3 mask the first and last 20 counts to avoid initiation affecting codon-specific counts\n count_vector[0:mask_dist] = [0] * mask_dist\n # v1.0b3 mask the last 20 nucleotides to avoid termination affecting codon-specific counts, but keep STOP codon counts\n cds_sequence = cds_sequence[0:len(cds_sequence) - mask_dist] + ''.join(\n 'N' * (mask_dist - 3)) + cds_sequence[len(cds_sequence) - 3:len(cds_sequence)]\n\n # v1.0b3 add stretches of 0's to count_vector and N's to cds_sequence to avoid checking vector boundaries\n count_vector = [0] * (-1 * dist_from) + count_vector + [0] * dist_to\n cds_sequence = ''.join('N' * (-1 * dist_from)) + cds_sequence + ''.join('N' * dist_to)\n\n # store genome usage stats\n for i in range(0, len(cds_sequence), 3):\n codon = cds_sequence[i: i + 3].upper()\n if codon in self.codon_genome_usage_df.index:\n self.codon_genome_usage_df.at[codon, \"abs\"] += 1\n amino_acid = Codons.CODON_TABLE.get(codon)\n self.amino_acid_genome_usage_df.at[amino_acid, \"abs\"] += 1\n\n # identify 3nt bins with non-zero counts\n ind = np.array(range(0, len(count_vector), 3))\n hits = [sum(count_vector[i:i + 3]) > 0 for i in ind]\n non_empty_ind = ind[hits]\n\n # loop through non-empty triplets only\n for i in non_empty_ind:\n # loop through all codons dist_from nucleotides downstream and dist_to nucleotides upstream\n j_range = list(np.arange(i, i - dist_to, -3))[::-1] + list(np.arange(i + 3, i + 3 - dist_from, 3))\n for j in j_range:\n if j < 0:\n continue\n if j + 3 > len(cds_sequence):\n break\n codonA = cds_sequence[j: j + 3].upper()\n\n if j - 3 >= 0:\n codonP = cds_sequence[j - 3: j].upper()\n else:\n codonP = 'NNN'\n if j - 6 >= 0:\n codonE = cds_sequence[j - 6: j - 3].upper()\n else:\n codonE = 'NNN'\n\n if (len(codonA) == 3) & (codonA in Codons.CODON_TABLE.keys()):\n for p in range(0, 3):\n d = i - j + p\n try:\n codon_count_df.at[codonA, d] += count_vector[i + p]\n if len(codonP) == 3 and codonP in Codons.CODON_TABLE.keys():\n dicodon_count_df.at[codonP + codonA, d + 3] += count_vector[i + p]\n dipeptide = Codons.get_peptide_from_codon_list([codonP, codonA])\n dipeptide_count_df.at[dipeptide, d + 3] += count_vector[i + p]\n\n if len(codonE) == 3 and codonE in Codons.CODON_TABLE:\n tricodon_count_df.at[codonE + codonP + codonA, d + 6] += count_vector[i + p]\n tripeptide = Codons.get_peptide_from_codon_list([codonE, codonP, codonA])\n tripeptide_count_df.at[tripeptide, d + 6] += count_vector[i + p]\n\n except Exception as e:\n self.logger.warn(\"Index out of range: i: %d, j: %d, p: %d, d: %d. %s\"\n % (i, j, p, d, str(e)))\n\n self.codon_genome_usage_df.loc[:, \"fraction\"] = self.codon_genome_usage_df.loc[:, \"abs\"] / sum(\n self.codon_genome_usage_df.loc[:, \"abs\"])\n self.amino_acid_genome_usage_df.loc[:, \"fraction\"] = self.amino_acid_genome_usage_df.loc[:, \"abs\"] / sum(\n self.amino_acid_genome_usage_df.loc[:, \"abs\"])\n self.amino_acid_count_df = self.codon_to_amino_acid_count_df(codon_count_df)\n self.tripeptide_count_df = self.filter_codon_counts(tripeptide_count_df, self.get_tripeptide_pos())\n self.dipeptide_count_df = self.filter_codon_counts(dipeptide_count_df, self.get_dipeptide_pos())\n\n # rename codon_count_df indices by adding amino acid names\n new_index = [Codons.CODON_TABLE.get(codon) + '_' + codon for codon in codon_count_df.index]\n codon_count_df.index = new_index\n self.codon_count_df = codon_count_df\n\n # rename codon_count_df indices by adding amino acid names\n self.logger.info(\"Mapping tricodons to amino acid names\")\n tricodon_count_df.index = Codons.get_tricodon_full_index()\n self.tricodon_count_df = self.filter_codon_counts(tricodon_count_df, self.get_tripeptide_pos())\n\n # rename codon_count_df indices by adding amino acid names\n self.logger.info(\"Mapping dicodons to amino acid names\")\n dicodon_count_df.index = Codons.get_dicodon_full_index()\n self.dicodon_count_df = self.filter_codon_counts(dicodon_count_df, self.get_dipeptide_pos())\n\n return\n\n def codon_to_amino_acid_count_df(self, codon_count_df):\n\n amino_acid_count_df = pd.DataFrame(data=0, index=Codons.AMINO_ACID_TABLE.keys(),\n columns=codon_count_df.columns)\n\n for codon in codon_count_df.index:\n aa = Codons.CODON_TABLE.get(codon)\n amino_acid_count_df.loc[aa, :] += codon_count_df.loc[codon, :]\n\n return amino_acid_count_df\n\n def get_tripeptide_pos(self):\n\n if hasattr(config.args, \"tripeptide_pos\"):\n pos = config.args.tripeptide_pos\n else:\n pos = self.TRIPEPTIDE_POS\n\n return pos\n\n def get_dipeptide_pos(self):\n\n if hasattr(config.args, \"dipeptide_pos\"):\n pos = config.args.dipeptide_pos\n else:\n pos = self.DIPEPTIDE_POS\n\n return pos\n\n def filter_codon_counts(self, codon_count_df, pos, top=50):\n \"\"\"\n Filter the di/tricodon (or di/tripeptide) counts to exclude low counts (rowSums less than the specified threshold) and\n to include only the top di/tricodons with highest relative counts at the given position\n\n :param codon_count_df: the codon_df to filter\n :param top: the number of highest relative count tricodons to keep\n :param pos: the position to filter the top counts\n :return codon_filtered_df: the filtered count dataframe\n \"\"\"\n\n self.logger.info(\"Sorting and selecting top %d peptides/codons at position %d from the A site\" %\n (top, pos))\n\n codon_filtered_df = codon_count_df[codon_count_df.sum(1) >= self.COUNT_THRESHOLD]\n pos_rel_counts = codon_filtered_df[pos] / codon_filtered_df.sum(1)\n codon_filtered_df = codon_filtered_df.iloc[\n sorted(range(len(pos_rel_counts)), reverse=True, key=lambda k: pos_rel_counts[k])[0:top]]\n return codon_filtered_df\n\n def get_amino_acid_stats(self):\n if self.amino_acid_stats_df is None:\n self.amino_acid_stats_df = self.compute_codon_stats_amino_acid()\n\n return self.amino_acid_stats_df\n\n def get_codon_stats(self):\n if self.codon_stats_df is None:\n self.codon_stats_df = self.compute_codon_stats_codon()\n\n return self.codon_stats_df\n\n def compute_codon_genome_usage(self):\n self.codon_genome_usage_df = pd.DataFrame(data=0, index=Codons.CODON_TABLE.keys(),\n columns=['abs', 'fraction'])\n self.amino_acid_genome_usage_df = pd.DataFrame(data=0, index=Codons.AMINO_ACID_TABLE.keys(),\n columns=['abs', 'fraction'])\n\n def compute_codon_stats_amino_acid(self):\n return self.compute_codon_stats(self.get_amino_acid_pauses(), self.amino_acid_genome_usage_df)\n\n def compute_codon_stats_codon(self):\n return self.compute_codon_stats(self.get_codon_pauses(), self.codon_genome_usage_df)\n\n def compute_codon_stats(self, codon_counts, codon_genome_usage, until=-3):\n \"\"\"\n Counts usage and frame protection stats for each codon/amino-acid.\n\n The following dataframe will be generated based on codon counts table:\n\n codon/aminoacid FPI Frame peak(pos) peak(scale) usage(sum of counts) genome_presence\n\n :return: dataframe\n \"\"\"\n\n self.logger.info(\"Counting codon usage statistics\")\n\n try:\n stop_ind = codon_counts.keys().to_list().index(until)\n codon_counts = codon_counts.iloc[:, 0:stop_ind]\n f2 = sum([codon_counts.iloc[:, i] for i in reversed(range(stop_ind - 1, -1, -3))])\n f1 = sum([codon_counts.iloc[:, i] for i in reversed(range(stop_ind - 2, -1, -3))])\n f0 = sum([codon_counts.iloc[:, i] for i in reversed(range(stop_ind - 3, -1, -3))])\n\n codon_stats = pd.DataFrame(list(zip(f0, f1, f2)), columns=['F0', 'F1', 'F2'])\n\n codon_stats['FPI'] = np.zeros(len(codon_stats))\n codon_stats['F'] = np.zeros(len(codon_stats))\n codon_stats['F_perc'] = np.zeros(len(codon_stats))\n\n for i in range(len(codon_stats)):\n fpi, fmax, fperc = CountManager.fpi_stats_from_frame_counts(codon_stats.iloc[i, :])\n codon_stats.loc[i, 'FPI'] = fpi\n codon_stats.loc[i, 'F'] = fmax\n codon_stats.loc[i, 'F_perc'] = fperc\n\n codon_stats['peak_pos'] = [np.argmax(codon_counts.iloc[i, :]) for i in range(len(codon_stats))]\n codon_stats['peak_scale'] = np.zeros(len(codon_stats))\n\n\n for i in range(len(codon_stats)):\n for i in range(len(codon_stats)):\n counts = list(codon_counts.iloc[i, :])\n if sum(counts) > 0:\n frame = int(codon_stats.loc[i, 'F'])\n frame_inds = [j for j in reversed(range(len(counts) - 3 + frame, -1, -3))]\n frame_counts = [counts[j] for j in frame_inds]\n codon_stats.loc[i, 'peak_scale'] = len(frame_counts) * max(frame_counts) / sum(frame_counts)\n codon_stats.loc[i, 'peak_pos'] = codon_counts.columns[frame_inds[np.argmax(frame_counts)]]\n\n codon_stats['usage'] = list(sum([codon_counts.iloc[:, i] for i in range(0, stop_ind)]))\n codon_stats['genome_usage_abs'] = list(codon_genome_usage.loc[:, 'abs'])\n codon_stats['genome_usage_fraction'] = list(codon_genome_usage.loc[:, 'fraction'])\n usage_norm = codon_stats['usage'] / codon_stats['genome_usage_fraction']\n usage_norm /= sum(usage_norm)\n codon_stats['usage_normalized'] = usage_norm\n\n codon_stats.index = codon_counts.index\n\n return codon_stats\n\n except:\n self.logger.warning(\"Could not compute codon stats. Codon counts dataframe did not have column %d.\" % until)\n return None\n # exclude the counts downstream from -3\n\n @preconditions(lambda loci_file: str)\n def get_pauses_from_loci(self, loci_file, read_locations=READ_LOCATIONS_ALL):\n \"\"\"\n Counts the meta-number of 5' mapping positions at the given distance from the specified loci\n\n The loci file should contain one locus per row.\n Two tab separated columns should indicate chromosome number and position.\n\n The distance of 5' mapping positions from each loci is counted within each cds.\n The padding sizes are subtracted from the start and end of each transcript.\n\n :param padding: int: padding, bp (not to count the first and last regions in the transcripts)\n :param loci_file: str: full path to the file specifying the loci.\n :return:\n \"\"\"\n\n self.logger.info(\n \"Counting pauses in %s region from loci given in file %s\" % (read_locations, loci_file))\n\n loci = pd.read_csv(loci_file, sep=\"\\t\", index_col=None)\n self.loci_overlaps = []\n # the results will be kept in a dictionary:\n # key - distance from any locus\n # value - number of mapping positions at key distance from any locus\n loci_pauses_dict = {}\n\n span_size = self.annotation.span_size\n\n counter = 0\n loci_row = 0\n\n done = False\n move_transcript = True\n move_locus = False\n tg = self.annotation.get_transcript_assembly(span_size)\n transcript = None\n\n while True:\n if counter % 1000 == 0:\n self.logger.info(\"\\r>>Transcript count: %d (%d%s)\\t\" % (\n counter, floor(100 * (counter - 1) / self.annotation.transcript_count), '%',), )\n\n if move_locus:\n if loci.shape[0] == loci_row:\n self.logger.debug(\"Reached the end of loci file (row %d)\" % loci_row)\n break\n loci_row += 1\n move_locus = False\n continue\n\n if move_transcript:\n try:\n transcript = tg[counter]\n except:\n self.logger.debug(\"Reached the end of transcript assembly (counter: %d)\" % counter)\n break\n counter += 1\n move_transcript = False\n continue\n\n # check if the locus at the cursor is within the current transcript\n if loci_row < loci.shape[0]:\n if str(transcript.chrom) == str(loci.loc[loci_row, \"chr\"]):\n\n if loci.loc[loci_row, \"str\"] == \"+\":\n locus_pos = loci.loc[loci_row, \"start\"]\n else:\n locus_pos = loci.loc[loci_row, \"end\"]\n\n # locus is upstream of transcript -> move locus\n if transcript.cds_genome_start - span_size > locus_pos:\n move_locus = True\n continue\n # transcript is upstream of locus -> move transcript\n elif transcript.cds_genome_end + span_size < locus_pos:\n move_transcript = True\n continue\n\n elif str(transcript.strand) != str(loci.loc[loci_row, \"str\"]):\n move_locus = True\n continue\n\n else:\n count_vector = self.get_count_vector(transcript, span_size, FivePSeqCounts.FULL_LENGTH,\n downsample=True)\n\n transcript_genome_start = transcript.cds_genome_start - span_size\n transcript_genome_end = transcript.cds_genome_end + span_size\n\n if len(count_vector) != transcript_genome_end - transcript_genome_start:\n move_transcript = True\n continue\n\n if transcript.strand == \"+\":\n locus_ind = locus_pos - transcript_genome_start\n else:\n locus_ind = transcript_genome_end - locus_pos\n\n if read_locations == self.READ_LOCATIONS_ALL:\n ind = np.array(range(len(count_vector) - 2 * span_size, len(count_vector)))\n elif read_locations == self.READ_LOCATIONS_5UTR:\n ind = np.array(range(0, span_size))\n elif read_locations == self.READ_LOCATIONS_3UTR:\n ind = np.array(range(len(count_vector) - span_size, len(count_vector)))\n elif read_locations == self.READ_LOCATIONS_CDS:\n ind = np.array(range(len(count_vector) - 2 * span_size, len(count_vector) - span_size))\n else:\n ind = np.array(range(0, len(count_vector)))\n\n hits = [count_vector[i] > 0 for i in ind]\n non_empty_ind = ind[hits]\n\n for i in non_empty_ind:\n distance = i - locus_ind\n\n if distance < 2 * span_size and distance >= -2 * span_size:\n if distance in loci_pauses_dict.keys():\n loci_pauses_dict[distance] += count_vector[i]\n else:\n loci_pauses_dict.update({distance: count_vector[i]})\n\n overlap = [FivePSeqOut.get_transcript_attr(transcript, \"ID\"),\n FivePSeqOut.get_transcript_attr(transcript, \"Name\"),\n transcript.chrom, transcript.strand,\n transcript.cds_genome_start,\n transcript.cds_genome_end,\n loci.loc[loci_row, \"symbol\"],\n loci.loc[loci_row, \"chr\"],\n loci.loc[loci_row, \"str\"],\n loci.loc[loci_row, \"start\"],\n loci.loc[loci_row, \"end\"],\n i, distance, count_vector[i]]\n self.loci_overlaps.append(overlap)\n\n move_locus = True\n\n elif str(transcript.chrom) > str(loci.loc[loci_row, \"chr\"]):\n move_locus = True\n continue\n\n else:\n move_transcript = True\n continue\n else:\n break\n\n # turn the dictionary into a metacount vector, with indices from -1*maxdistance to maxdistance\n self.logger.debug(\"Merging the dictionary into metacounts\")\n maxdist = 2 * span_size\n metacount_vector = [0] * 2 * maxdist\n for i in range(-1 * maxdist, maxdist):\n if i in loci_pauses_dict.keys():\n metacount_vector[maxdist + i] = loci_pauses_dict[i]\n metacount_series = pd.Series(data=metacount_vector, index=np.arange(-1 * maxdist, maxdist))\n\n return metacount_series\n\n def get_loci_overlaps_df(self):\n \"\"\"\n Returns the overlaps of transcripts with given loci in the form of a data-frame with column names.\n\n :return:\n \"\"\"\n colnames = [\"ID\", \"Name\", \"chr\", \"str\", \"genome_start\", \"genome_end\", \"RBP\", \"loc_chr\", \"loc_str\", \"loc_start\",\n \"loc_end\",\n \"i\", \"dist\", \"count\"]\n\n outliers_df = pd.DataFrame(self.loci_overlaps, index=None, columns=colnames)\n return outliers_df\n\n @preconditions(lambda num: isinstance(num, int))\n def top_populated_transcript_indices(self, num=1000):\n \"\"\"\n Returns indices of top populated transcripts.\n A populated transcript is defined as the one with most length-relative number\n of positions with non-zero counts.\n\n :param num: int: number of transcript indices to return\n :return: [int]: a list of transcript indices in the transcript assembly\n \"\"\"\n populated = [0] * self.annotation.transcript_count\n for i in range(self.annotation.transcript_count):\n transcript = self.annotation.transcript_assembly[i]\n count_vector = self.get_count_vector(transcript, 0, FivePSeqCounts.FULL_LENGTH, downsample=False)\n populated[i] = sum(count_vector > 0) / len(count_vector)\n populated_indices = sorted(range(len(populated)), key=lambda k: populated[k])\n\n return populated_indices\n\n\nclass FivePSeqCountsContainer:\n \"\"\"\n A wraper for the following data structures:\n count_vector_list_start = None\n count_vector_list_term = None\n count_vector_list_full_length = None\n meta_count_series_start = None\n meta_count_series_term = None\n frame_counts_df_start = None\n frame_counts_df_term = None\n\n \"\"\"\n count_vector_list_start = None\n count_vector_list_term = None\n count_vector_list_full_length = None\n meta_count_series_start = None\n meta_count_series_term = None\n frame_counts_df_start = None\n frame_counts_df_term = None\n\n def __init__(self, count_vector_list_start,\n count_vector_list_term,\n count_vector_list_full_length,\n meta_count_series_start,\n meta_count_series_term,\n frame_counts_df_start,\n frame_counts_df_term):\n self.count_vector_list_start = count_vector_list_start\n self.count_vector_list_term = count_vector_list_term\n self.count_vector_list_full_length = count_vector_list_full_length\n self.meta_count_series_term = meta_count_series_term\n self.meta_count_series_start = meta_count_series_start\n self.frame_counts_df_start = frame_counts_df_start\n self.frame_counts_df_term = frame_counts_df_term\n\n\nclass CountManager:\n \"\"\"\n This module implements a set of static functions to handle count vectors retrieved from FivePSeqCounts class.\n \"\"\"\n\n def __init__(self):\n pass\n\n @staticmethod\n @preconditions(lambda count_vector_list: isinstance(count_vector_list, list),\n lambda count_vector_list: isinstance(count_vector_list[0], list),\n lambda count_vector_list: isinstance(count_vector_list[0][0], int))\n def compute_meta_counts(count_vector_list):\n \"\"\"\n Computes the sum of counts at each position across transcripts.\n\n :param count_vector_list: [[int]] a list of count vectors for all transcripts\n\n :return: [int]: a vector of position-wise count sums\n \"\"\"\n\n # TODO check that the count vectors have the same length\n max_len = 0\n for i in range(len(count_vector_list)):\n if len(count_vector_list) > max_len:\n max_len = len(count_vector_list[i])\n\n for i in range(len(count_vector_list)):\n if len(count_vector_list[i]) < max_len:\n short_vec = count_vector_list[i]\n long_vec = [0] * max_len\n long_vec[0:len(short_vec)] = short_vec\n count_vector_list[i] = long_vec\n\n # sum the position-wise counts\n meta_count_vector = np.vstack(count_vector_list).sum(axis=0).tolist()\n\n return meta_count_vector\n\n @staticmethod\n @preconditions(lambda count_vector: isinstance(count_vector, list),\n lambda count_vector: isinstance(count_vector[0], int),\n lambda span_size: isinstance(span_size, int),\n lambda region: isinstance(region, str),\n lambda include_span: isinstance(include_span, bool))\n def extract_frame_count_vectors(count_vector, span_size, region=FivePSeqCounts.START, include_span=False):\n \"\"\"\n Takes a vector of position-wise int counts across full length transcripts\n and returns counts for three different frames from 0 to 2,\n relative to either START (default) or TERM regions.\n\n :param count_vector: [int]: a transcript-specific vector of 5P read counts per transcript position\n :param span_size: int: the size of regions spanning around the transcript cds\n :param include_span: if true returns frame counts including the spanning regions, and only the cds region\n otherwise\n :param region: region (START or TERM) relative to which to count the frames\n\n :return: a tuple of frame count arrays (frame0:[int], frame1:[int], frame2:[int])\n \"\"\"\n\n # determine the tail size to be subtracted from the count_vector\n if include_span:\n tail = 0\n else:\n tail = span_size\n\n # for START, start the Frame0 from tail to the length of the vector minus the tail\n if region == FivePSeqCounts.START:\n frame0_array = count_vector[0 + tail: len(count_vector) - tail: 3]\n frame1_array = count_vector[1 + tail: len(count_vector) - tail: 3]\n frame2_array = count_vector[2 + tail: len(count_vector) - tail: 3]\n\n elif region == FivePSeqCounts.TERM:\n # NOTE the frames relative to START and TERM should be aligned in the future\n # NOTE (if cds length is not a multiple of 3)\n frame0_array = [count_vector[i] for i in list(reversed(range(len(count_vector) - 3 - tail, -1 + tail, -3)))]\n frame1_array = [count_vector[i] for i in list(reversed(range(len(count_vector) - 2 - tail, -1 + tail, -3)))]\n frame2_array = [count_vector[i] for i in list(reversed(range(len(count_vector) - 1 - tail, -1 + tail, -3)))]\n\n else:\n error_message = \"Invalid region %s specified: should be either %s or %s\" \\\n % (region, FivePSeqCounts.START, FivePSeqCounts.TERM)\n logger = logging.getLogger(config.FIVEPSEQ_LOGGER)\n logger.error(error_message)\n raise Exception(error_message)\n\n return frame0_array, frame1_array, frame2_array\n\n @staticmethod\n @preconditions(lambda count_vector: isinstance(count_vector, list),\n lambda count_vector: isinstance(count_vector[0], int),\n lambda region: isinstance(region, str),\n lambda tail: isinstance(tail, int),\n lambda tail: tail >= 0)\n def count_vector_to_series(count_vector, region, tail=0):\n \"\"\"\n Takes a vector of counts, indexes them with distances from the specified region.\n Returns a series with indexes as genomic coordinates from start/stop codons, and values as counts at each coordinates.\n\n For the following REAL coordinates (R), D0, D1 and D2 will be converted to:\n\n Relative to START:\n\n R: 0, 1, ... S0, Sm, S1, ... L - 1\n D: -T, -T+1, ... 0, 1, 2, ... L - T - 1\n\n Relative to TERM:\n\n R: 0, 1, ... S0, Sm, S1, ... L - 1\n D: -(L-T-3), -(L-T-2), ... 0, 1, 2, ... T + 2\n\n\n Legend:\n R Real coordinates\n S0 First nucleotides of START or TERM codon\n S1 Last nucleotides of START or TERM codon\n T Tail\n L Vector length\n\n :param count_vector: [int]: a vector of summed position-wise counts for a (meta)-transcript\n :param region: str: the region respective to which the distance is calculated\n :param tail: int:\n\n :return: pandas.Series: a series with indices as genomic coordinates* and values as meta counts.\n *-corresponding to positions' distance from nucleotide 0 of START/TERM codons\n\n \"\"\"\n\n if region == FivePSeqCounts.START:\n d = np.arange(-tail, len(count_vector) - tail)\n\n elif region == FivePSeqCounts.TERM:\n d = np.arange(-(len(count_vector) - tail - 3), tail + 3)\n\n else:\n error_message = \"Invalid region %s specified: should be either %s or %s\" \\\n % (region, FivePSeqCounts.START, FivePSeqCounts.TERM)\n logger = logging.getLogger(config.FIVEPSEQ_LOGGER)\n logger.error(error_message)\n raise Exception(error_message)\n\n counts_series = pd.Series(data=count_vector, index=d)\n\n return counts_series\n\n @staticmethod\n @preconditions(lambda count_vector: isinstance(count_vector, list),\n lambda count_vector: isinstance(count_vector[0], int),\n lambda region: isinstance(region, str),\n lambda tail: isinstance(tail, int),\n lambda tail: tail >= 0)\n def count_vector_to_df(count_vector, region, tail=0):\n \"\"\"\n Takes a vector of counts, indexes them with distances from the specified region.\n Returns a dataframe with indexes as genomic coordinates from start/stop codons, and values as counts at each coordinates.\n\n For the following REAL coordinates (R), D0, D1 and D2 will be converted to:\n\n Relative to START:\n\n R: 0, 1, ... S0, Sm, S1, ... L - 1\n D: -T, -T+1, ... 0, 1, 2, ... L - T - 1\n\n Relative to TERM:\n\n R: 0, 1, ... S0, Sm, S1, ... L - 1\n D: -(L-T-3), -(L-T-2), ... 0, 1, 2, ... T + 2\n\n\n Legend:\n R Real coordinates\n S0 First nucleotides of START or TERM codon\n S1 Last nucleotides of START or TERM codon\n T Tail\n L Vector length\n\n :param count_vector: [int]: a vector of summed position-wise counts for a (meta)-transcript\n :param region: str: the region respective to which the distance is calculated\n :param tail: int:\n\n :return: pandas.Series: a series with indices as genomic coordinates* and values as meta counts.\n *-corresponding to positions' distance from nucleotide 0 of START/TERM codons\n\n \"\"\"\n\n if region == FivePSeqCounts.START:\n d = np.arange(-tail, len(count_vector) - tail)\n\n elif region == FivePSeqCounts.TERM:\n d = np.arange(-(len(count_vector) - tail - 3), tail + 3)\n\n else:\n error_message = \"Invalid region %s specified: should be either %s or %s\" \\\n % (region, FivePSeqCounts.START, FivePSeqCounts.TERM)\n logging.getLogger(config.FIVEPSEQ_LOGGER).error(error_message)\n raise Exception(error_message)\n\n counts_df = pd.DataFrame({'D': d, 'C': count_vector})\n\n return counts_df\n\n @staticmethod\n @preconditions(lambda region: isinstance(region, str),\n lambda span_size: isinstance(span_size, int))\n def extract_count_sums_per_frame_per_transcript(count_vector_list, span_size, region):\n \"\"\"\n Returns a data frame with rows representing transcripts and columns (F0, F1, F2) representing the sum of 5P\n read mapping counts at each frame. The transcripts are aligned at the start or the end, depending on the\n region specified.\n\n :param span_size:\n :param count_vector_list: the list of per-transcript count vectors\n :param region: str: the region to align the transcripts to\n\n :return: a dataframe with frame-based count-sums for each transcript\n \"\"\"\n\n logging.getLogger(config.FIVEPSEQ_LOGGER).debug(\n \"Retrieving count-sums per frame relative to %s ...\" % region)\n\n # Create an empty dataframe\n n = len(count_vector_list)\n frame_counts_df = pd.DataFrame({'F0': [0] * n,\n 'F1': [0] * n,\n 'F2': [0] * n})\n for t_ind in range(0, n):\n # Print status update to console\n if t_ind % 10000 == 0:\n logging.getLogger(config.FIVEPSEQ_LOGGER).info(\"\\r>>Transcript count: %d (%d%s)\\t\" % (\n t_ind, np.floor(100 * (t_ind - 1) / n), '%'))\n\n # extract frame count vectors from count vectors\n count_vector = count_vector_list[t_ind]\n if sum(count_vector) == 0:\n for f in range(0, 3):\n frame_counts_df.iloc[t_ind, f] = 0\n else:\n frame_counts = CountManager.extract_frame_count_vectors(count_vector, span_size, region)\n\n # sum-up counts in each frame and add to the dataframe\n for f in range(0, 3):\n frame_counts_df.iloc[t_ind, f] = sum(frame_counts[f])\n\n return frame_counts_df\n\n @staticmethod\n @preconditions(lambda file_path: isinstance(file_path, str))\n def read_index_as_list(file_path):\n \"\"\"\n Reads a new line separated list of integers from a file to a list of indices.\n\n :param file_path: str: full path to the file\n :return: [int]: list of indices\n :exception: raise IOError if file does not exist\n \"\"\"\n if not os.path.exists(file_path):\n error_message = \"Problem reading counts: the file %s does not exist\" % file_path\n logging.getLogger(config.FIVEPSEQ_LOGGER).error(error_message)\n raise IOError(error_message)\n logging.getLogger(config.FIVEPSEQ_LOGGER).debug(\"Reading count file %s\" % file_path)\n\n indices = list(pd.read_csv(file_path, header=None).iloc[:, 0])\n return indices\n\n @staticmethod\n @preconditions(lambda file_path: isinstance(file_path, str))\n def read_counts_as_list(file_path):\n \"\"\"\n Reads and returns a list of count vectors, each corresponding to a transcript.\n\n :param file_path: str: full path to the file\n :return: [[int]]: list of count vectors (a count vector is a list of int counts)\n :exception: raises IOError if file does not exist\n \"\"\"\n\n if not os.path.exists(file_path):\n error_message = \"Problem reading counts: the file %s does not exist\" % file_path\n logging.getLogger(config.FIVEPSEQ_LOGGER).error(error_message)\n raise IOError(error_message)\n logging.getLogger(config.FIVEPSEQ_LOGGER).debug(\"Reading count file %s\" % file_path)\n df = pd.read_csv(file_path, header=None, sep=\"|\")\n count_vector_list = [[]] * len(df)\n for i in range(0, len(df)):\n count_vector_list[i] = list(map(int, df.iloc[i, 0].split(\"\\t\")))\n return count_vector_list\n\n @staticmethod\n @preconditions(lambda file: isinstance(file, str))\n def read_meta_counts(file):\n \"\"\"\n Reads the meta count file as pandas DataFrame.\n These files are saved with tab separator.\n They have two columns, but no column names.\n This function assigns names to read DataFrame:\n D (distance from START/TERM)\n C (meta counts)\n The number of rows corresponds to 2*span_size\n\n :param file: str: full path to the file\n :return: pandas DataFrame: a dataframe with D and C columns\n :exception: raises IOError if file does not exist\n \"\"\"\n if not os.path.exists(file):\n error_message = \"Problem reading meta counts: the file %s does not exist\" % file\n logging.getLogger(config.FIVEPSEQ_LOGGER).error(error_message)\n raise IOError(error_message)\n logging.getLogger(config.FIVEPSEQ_LOGGER).debug(\"Reading meta count file %s\" % file)\n meta_count = pd.read_csv(file, sep=\"\\t\", header=None, names=[\"D\", \"C\"])\n return meta_count\n\n @staticmethod\n @preconditions(lambda file: isinstance(file, str))\n def read_frame_counts(file):\n \"\"\"\n Reads the frame count file as pandas DataFrame.\n The file has a header with four columns:\n (no name: transcript number), F0, F1, F2\n A four-column dataFrame is created and returned accordingly.\n The number of rows corresponds to the number of transcripts.\n\n :param file: str: full path to the file\n :return: pandas DataFrame: a dataframe with transcript number and F0, F1, F2 frame counts\n :exception: raises IOError if file doesn't exist\n \"\"\"\n if not os.path.exists(file):\n error_message = \"Problem reading frame counts: the file %s does not exist\" % file\n logging.getLogger(config.FIVEPSEQ_LOGGER).error(error_message)\n raise IOError(error_message)\n logging.getLogger(config.FIVEPSEQ_LOGGER).debug(\"Reading frame counts file %s\" % file)\n\n frame_counts = pd.read_csv(file, sep=\"\\t\")\n return frame_counts\n\n @staticmethod\n @preconditions(lambda file: isinstance(file, str))\n def read_amino_acid_df(file):\n \"\"\"\n Reads a pandas DataFrame from amino acid pauses file.\n The file is stored with a header indicating distance from amino acids.\n The file has row names indicating names of amino acids.\n The dataFrame is read with indicated columns and row names.\n\n :param file: str: full path to file\n :return: pandas DataFrame: index is amino acids, columns - distance\n :exception: raises IOError if file does not exist\n \"\"\"\n if not os.path.exists(file):\n error_message = \"Problem reading amino acid pauses: the file %s does not exist\" % file\n logging.getLogger(config.FIVEPSEQ_LOGGER).error(error_message)\n raise IOError(error_message)\n logging.getLogger(config.FIVEPSEQ_LOGGER).debug(\"Reading amino acids pauses file %s\" % file)\n\n amino_acid_df = pd.read_csv(file, sep=\"\\t\", header=0, index_col=0)\n\n return amino_acid_df\n\n @staticmethod\n def top_populated_count_vector_indices(count_vector_list, num=1000):\n \"\"\"\n Returns indices of top populated count_vectors (transcripts).\n A populated count_vector (transcript) is defined as the one with most length-relative number\n of positions with non-zero counts.\n\n :param count_vector_list: [[int]]: a list of count vectors\n :param num: int: number of indices to return\n :return: [int]: a list of count_vector indices in the count_vector_list\n \"\"\"\n\n populated = [0] * len(count_vector_list)\n for i in range(len(count_vector_list)):\n count_vector = count_vector_list[i]\n populated[i] = float(np.count_nonzero(count_vector)) / len(count_vector)\n populated_indices = sorted(range(len(populated)), key=lambda k: populated[k], reverse=True)\n\n return populated_indices[0:num]\n\n @staticmethod\n def canonical_transcript_indices(count_dir):\n \"\"\"\n Reads and returns canonical transcript indices from the canonical transcript indices file,\n if such a file exists.\n\n :return: [int] indices of transcript with canonical start and stop codons or None if no such file exists.\n\n \"\"\"\n\n canonical_index_file = os.path.join(count_dir, FivePSeqOut.CANONICAL_TRANSCRIPT_INDEX_FILE)\n if os.path.exists(canonical_index_file):\n transcript_index = list(pd.read_csv(canonical_index_file, header=None).iloc[:, 0])\n return transcript_index\n else:\n logging.getLogger(config.FIVEPSEQ_LOGGER).debug(\n \"Problem retrieving canonical transcript indices. No file %s exists. \"\n \"The filter will return None.\" % canonical_index_file)\n return None\n\n @staticmethod\n @preconditions(lambda file_path: isinstance(file_path, str))\n def read_count_vector(file_path):\n \"\"\"\n Reads and returns a list of counts from a new-line separated file of counts.\n\n :param file_path: str: full path to the file\n :return: [int]: a list of counts\n :exception: raises IOError if file does not exist\n \"\"\"\n\n if not os.path.exists(file_path):\n error_message = \"Problem reading counts: the file %s does not exist\" % file_path\n logging.getLogger(config.FIVEPSEQ_LOGGER).error(error_message)\n raise IOError(error_message)\n logging.getLogger(config.FIVEPSEQ_LOGGER).debug(\"Reading count file %s\" % file_path)\n if os.stat(file_path).st_size == 0:\n counts = []\n else:\n logging.getLogger(\"Reading in count distribution (this may last a few minutes for large libraries)\")\n counts = pd.read_csv(file_path, header=None)\n counts = list(map(int, counts.iloc[:, 0]))\n\n return counts\n\n @staticmethod\n @preconditions(lambda file_path: isinstance(file_path, str))\n def read_outlier_lower(file_path):\n \"\"\"\n Reads and returns the lower value of read numbers to be considered as outliers for downsampling.\n\n :param file_path: a file containing a single float number\n :return: float\n \"\"\"\n\n outlier_lower = float(pd.read_csv(file_path, header=None))\n return outlier_lower\n\n @staticmethod\n @preconditions(lambda file_path: isinstance(file_path, str))\n def read_count_dict(file_path):\n \"\"\"\n Reads a tab-delimited file and returns a dictionary of count frequencies.\n\n :param file_path: a file containing a dictionary of count frequencies\n :return: float\n \"\"\"\n count_freq_dict = {}\n dict_mat = pd.read_csv(file_path, header=None, delimiter=\"\\t\", index_col=0)\n for i in range(len(dict_mat)):\n count_freq_dict[dict_mat.index[i]] = dict_mat.iloc[i, 0]\n\n return collections.OrderedDict(sorted(count_freq_dict.items()))\n\n @staticmethod\n def filter_fivepseqCountsContainer(fivepseqcountsContainer, transcript_indices, span_size=100):\n \"\"\"\n Gets a fivepseq_counts instance with non-empty count items and filters each by provided transcript indices\n count_vector_list_start = None\n count_vector_list_term = None\n count_vector_list_full_length = None\n meta_count_series_start = None\n meta_count_series_term = None\n frame_counts_df_start = None\n frame_counts_df_term = None\n\n :param fivepseqcountsContainer:\n :param transcript_ind:\n :return:\n \"\"\"\n if fivepseqcountsContainer.count_vector_list_full_length is not None:\n count_vector_list_full_length = [fivepseqcountsContainer.count_vector_list_full_length[i] for i\n in transcript_indices]\n else:\n count_vector_list_full_length = None\n\n count_vector_list_term = [fivepseqcountsContainer.count_vector_list_term[i] for i in\n transcript_indices]\n\n count_vector_list_start = [fivepseqcountsContainer.count_vector_list_start[i] for i in\n transcript_indices]\n\n meta_count_series_term = CountManager.count_vector_to_df(\n CountManager.compute_meta_counts(count_vector_list_term),\n FivePSeqCounts.TERM, tail=span_size)\n\n meta_count_series_start = CountManager.count_vector_to_df(\n CountManager.compute_meta_counts(count_vector_list_start),\n FivePSeqCounts.START, tail=span_size)\n\n frame_counts_df_term = fivepseqcountsContainer.frame_counts_df_term.iloc[transcript_indices,]\n\n frame_counts_df_start = fivepseqcountsContainer.frame_counts_df_start.iloc[\n transcript_indices,]\n\n fivepseq_counts_filtered = FivePSeqCountsContainer(count_vector_list_start, count_vector_list_term,\n count_vector_list_full_length,\n meta_count_series_start, meta_count_series_term,\n frame_counts_df_start, frame_counts_df_term)\n\n return fivepseq_counts_filtered\n\n @staticmethod\n def combine_count_series(count_series_dict, lib_size_dict=None, scale=False):\n \"\"\"\n Combines counts in the series dictionary and returns a single count series.\n If lib_size_dict is not None, than the counts are first weighted based on the library size and then combined.\n Weighting is done in a way to give higher weight to samples with larger library sizes.\n\n :param count_series_dict:\n :param lib_size_dict:\n :return:\n \"\"\"\n count_series_combined = None\n start = True\n for key in count_series_dict.keys():\n count_series = count_series_dict[key].copy()\n if lib_size_dict is not None:\n if scale:\n count_series.C /= (float(lib_size_dict[key]) / (10 ** 6)) * len(lib_size_dict)\n else:\n count_series.C *= float(lib_size_dict[key]) / sum(lib_size_dict.values())\n\n if start:\n count_series_combined = count_series.copy()\n start = False\n else:\n count_series_combined.C += count_series.C\n\n return count_series_combined\n\n @staticmethod\n def combine_frame_counts(frame_count_dict, lib_size_dict=None):\n \"\"\"\n Combines counts in the dataframe dictionary and returns a single dataframe.\n If lib_size_dict is not None, than the counts are first weighted based on the library size and then combined.\n Weighting is done in a way to give higher weight to samples with larger library sizes.\n\n :param count_series_dict:\n :param lib_size_dict:\n :return:\n \"\"\"\n frame_count_combined = None\n start = True\n for key in frame_count_dict.keys():\n count_df = frame_count_dict[key]\n if lib_size_dict is not None:\n count_df.loc[:, ('F0', 'F1', 'F2')] *= float(lib_size_dict[key]) / sum(lib_size_dict.values())\n\n if start:\n frame_count_combined = count_df.copy()\n start = False\n else:\n frame_count_combined.loc[:, ('F0', 'F1', 'F2')] += count_df.loc[:, ('F0', 'F1', 'F2')]\n\n return frame_count_combined\n\n @staticmethod\n def combine_amino_acid_dfs(amino_acid_df_dict, lib_size_dict=None):\n \"\"\"\n Combines counts in the dataframe dictionary and returns a single dataframe.\n If lib_size_dict is not None, than the counts are first weighted based on the library size and then combined.\n Weighting is done in a way to give higher weight to samples with larger library sizes.\n\n :param count_series_dict:\n :param lib_size_dict:\n :return:\n \"\"\"\n amino_acid_df_combined = None\n start = True\n for key in amino_acid_df_dict.keys():\n count_df = amino_acid_df_dict[key].copy(deep=True)\n if lib_size_dict is not None:\n count_df *= float(lib_size_dict[key]) / sum(lib_size_dict.values())\n\n if start:\n amino_acid_df_combined = count_df.copy()\n start = False\n else:\n amino_acid_df_combined += count_df\n\n return amino_acid_df_combined\n\n @staticmethod\n def fpi_stats_from_frame_counts(frame_counts):\n \"\"\"\n Takes as input a vector named [F0, F1, F2] and returns:\n (fpi, fmax, f_perc)\n fpi = frame protection index of the maximum frame\n fmax = the maximum frame\n f_perc = the fraction of counts in the maximum frame\n\n :param frame_counts:\n :return:\n \"\"\"\n f_counts = (frame_counts['F0'], frame_counts['F1'], frame_counts['F2'])\n fmax = np.argmax(f_counts)\n nom = f_counts[fmax]\n if nom == 0:\n fpi = None\n f_perc = None\n else:\n denom = (sum(f_counts) - nom) / 2.\n if denom == 0:\n fpi = np.log2(float(nom) / 0.5)\n else:\n fpi = np.log2(float(nom / denom))\n f_perc = 100 * (float(f_counts[fmax]) / sum(f_counts))\n\n return fpi, fmax, f_perc\n" ]
[ [ "pandas.read_csv", "pandas.Series", "numpy.asarray", "numpy.arange", "pandas.DataFrame", "numpy.argmax", "numpy.mean", "numpy.count_nonzero", "numpy.floor", "numpy.sum", "numpy.vstack", "scipy.stats.poisson.cdf" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
junkilee/simple_baselines
[ "cc5cc4b8d83119bf144abb08900762b76b1a33ac" ]
[ "baselines/deepq/build_graph.py" ]
[ "\"\"\"Deep Q learning graph\n\nThe functions in this file can are used to create the following functions:\n\n======= act ========\n\n Function to chose an action given an observation\n\n Parameters\n ----------\n observation: object\n Observation that can be feed into the output of make_obs_ph\n stochastic: bool\n if set to False all the actions are always deterministic (default False)\n update_eps_ph: float\n update epsilon a new value, if negative not update happens\n (default: no update)\n\n Returns\n -------\n Tensor of dtype tf.int64 and shape (BATCH_SIZE,) with an action to be performed for\n every element of the batch.\n\n\n======= act (in case of parameter noise) ========\n\n Function to chose an action given an observation\n\n Parameters\n ----------\n observation: object\n Observation that can be feed into the output of make_obs_ph\n stochastic: bool\n if set to False all the actions are always deterministic (default False)\n update_eps_ph: float\n update epsilon a new value, if negative not update happens\n (default: no update)\n reset_ph: bool\n reset the perturbed policy by sampling a new perturbation\n update_param_noise_threshold_ph: float\n the desired threshold for the difference between non-perturbed and perturbed policy\n update_param_noise_scale_ph: bool\n whether or not to update the scale of the noise for the next time it is re-perturbed\n\n Returns\n -------\n Tensor of dtype tf.int64 and shape (BATCH_SIZE,) with an action to be performed for\n every element of the batch.\n\n\n======= train =======\n\n Function that takes a transition (s,a,r,s') and optimizes Bellman equation's error:\n\n td_error = Q(s,a) - (r + gamma * max_a' Q(s', a'))\n loss = huber_loss[td_error]\n\n Parameters\n ----------\n obs_t: object\n a batch of observations\n action: np.array\n actions that were selected upon seeing obs_t.\n dtype must be int32 and shape must be (batch_size,)\n reward: np.array\n immediate reward attained after executing those actions\n dtype must be float32 and shape must be (batch_size,)\n obs_tp1: object\n observations that followed obs_t\n done: np.array\n 1 if obs_t was the last observation in the episode and 0 otherwise\n obs_tp1 gets ignored, but must be of the valid shape.\n dtype must be float32 and shape must be (batch_size,)\n weight: np.array\n imporance weights for every element of the batch (gradient is multiplied\n by the importance weight) dtype must be float32 and shape must be (batch_size,)\n\n Returns\n -------\n td_error: np.array\n a list of differences between Q(s,a) and the target in Bellman's equation.\n dtype is float32 and shape is (batch_size,)\n\n======= update_target ========\n\n copy the parameters from optimized Q function to the target Q function.\n In Q learning we actually optimize the following error:\n\n Q(s,a) - (r + gamma * max_a' Q'(s', a'))\n\n Where Q' is lagging behind Q to stablize the learning. For example for Atari\n\n Q' is set to Q once every 10000 updates training steps.\n\n\"\"\"\nimport tensorflow as tf\nimport baselines.common.tf_util as U\n\n\ndef default_param_noise_filter(var):\n if var not in tf.trainable_variables():\n # We never perturb non-trainable vars.\n return False\n if \"fully_connected\" in var.name:\n # We perturb fully-connected layers.\n return True\n\n # The remaining layers are likely conv or layer norm layers, which we do not wish to\n # perturb (in the former case because they only extract features, in the latter case because\n # we use them for normalization purposes). If you change your network, you will likely want\n # to re-consider which layers to perturb and which to keep untouched.\n return False\n\n\ndef build_act(make_obs_ph, q_func, num_actions, scope=\"deepq\", reuse=None):\n \"\"\"Creates the act function:\n\n Parameters\n ----------\n make_obs_ph: str -> tf.placeholder or TfInput\n a function that take a name and creates a placeholder of input with that name\n q_func: (tf.Variable, int, str, bool) -> tf.Variable\n the model that takes the following inputs:\n observation_in: object\n the output of observation placeholder\n num_actions: int\n number of actions\n scope: str\n reuse: bool\n should be passed to outer variable scope\n and returns a tensor of shape (batch_size, num_actions) with values of every action.\n num_actions: int\n number of actions.\n scope: str or VariableScope\n optional scope for variable_scope.\n reuse: bool or None\n whether or not the variables should be reused. To be able to reuse the scope must be given.\n\n Returns\n -------\n act: (tf.Variable, bool, float) -> tf.Variable\n function to select and action given observation.\n` See the top of the file for details.\n \"\"\"\n with tf.variable_scope(scope, reuse=reuse):\n observations_ph = U.ensure_tf_input(make_obs_ph(\"observation\"))\n stochastic_ph = tf.placeholder(tf.bool, (), name=\"stochastic\")\n update_eps_ph = tf.placeholder(tf.float32, (), name=\"update_eps\")\n\n eps = tf.get_variable(\"eps\", (), initializer=tf.constant_initializer(0))\n\n q_values = q_func(observations_ph.get(), num_actions, scope=\"q_func\")\n deterministic_actions = tf.argmax(q_values, axis=1)\n\n batch_size = tf.shape(observations_ph.get())[0]\n random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)\n chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps\n stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)\n\n output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)\n update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))\n act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph],\n outputs=[output_actions, update_eps_expr, eps],\n givens={update_eps_ph: -1.0, stochastic_ph: True},\n updates=[update_eps_expr])\n return act\n\ndef build_test_act(make_obs_ph, q_func, num_actions, scope=\"deepq\", reuse=None, test_epsilon=0.0):\n \"\"\"Creates the act function:\n\n Parameters\n ----------\n make_obs_ph: str -> tf.placeholder or TfInput\n a function that take a name and creates a placeholder of input with that name\n q_func: (tf.Variable, int, str, bool) -> tf.Variable\n the model that takes the following inputs:\n observation_in: object\n the output of observation placeholder\n num_actions: int\n number of actions\n scope: str\n reuse: bool\n should be passed to outer variable scope\n and returns a tensor of shape (batch_size, num_actions) with values of every action.\n num_actions: int\n number of actions.\n scope: str or VariableScope\n optional scope for variable_scope.\n reuse: bool or None\n whether or not the variables should be reused. To be able to reuse the scope must be given.\n\n Returns\n -------\n act: (tf.Variable, bool, float) -> tf.Variable\n function to select and action given observation.\n` See the top of the file for details.\n \"\"\"\n with tf.variable_scope(scope, reuse=reuse):\n observations_ph = U.ensure_tf_input(make_obs_ph(\"observation\"))\n stochastic_ph = tf.placeholder(tf.bool, (), name=\"stochastic\")\n update_eps_ph = tf.placeholder(tf.float32, (), name=\"update_eps\")\n\n eps = tf.get_variable(\"eps\", (), initializer=tf.constant_initializer(0.0))\n\n q_func_results = q_func(observations_ph.get(), num_actions, scope=\"q_func\")\n q_values = q_func_results['q']\n s_value = q_func_results['s']\n a_values = q_func_results['a']\n deterministic_actions = tf.argmax(q_values, axis=1)\n\n batch_size = tf.shape(observations_ph.get())[0]\n random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)\n chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps\n stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)\n\n output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)\n update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))\n act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph],\n outputs=[output_actions, q_values, s_value, a_values, update_eps_expr],\n givens={update_eps_ph: test_epsilon, stochastic_ph: False},\n updates=[update_eps_expr])\n return act\n\ndef build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope=\"deepq\", reuse=None, param_noise_filter_func=None):\n \"\"\"Creates the act function with support for parameter space noise exploration (https://arxiv.org/abs/1706.01905):\n\n Parameters\n ----------\n make_obs_ph: str -> tf.placeholder or TfInput\n a function that take a name and creates a placeholder of input with that name\n q_func: (tf.Variable, int, str, bool) -> tf.Variable\n the model that takes the following inputs:\n observation_in: object\n the output of observation placeholder\n num_actions: int\n number of actions\n scope: str\n reuse: bool\n should be passed to outer variable scope\n and returns a tensor of shape (batch_size, num_actions) with values of every action.\n num_actions: int\n number of actions.\n scope: str or VariableScope\n optional scope for variable_scope.\n reuse: bool or None\n whether or not the variables should be reused. To be able to reuse the scope must be given.\n param_noise_filter_func: tf.Variable -> bool\n function that decides whether or not a variable should be perturbed. Only applicable\n if param_noise is True. If set to None, default_param_noise_filter is used by default.\n\n Returns\n -------\n act: (tf.Variable, bool, float, bool, float, bool) -> tf.Variable\n function to select and action given observation.\n` See the top of the file for details.\n \"\"\"\n if param_noise_filter_func is None:\n param_noise_filter_func = default_param_noise_filter\n\n with tf.variable_scope(scope, reuse=reuse):\n observations_ph = U.ensure_tf_input(make_obs_ph(\"observation\"))\n stochastic_ph = tf.placeholder(tf.bool, (), name=\"stochastic\")\n update_eps_ph = tf.placeholder(tf.float32, (), name=\"update_eps\")\n update_param_noise_threshold_ph = tf.placeholder(tf.float32, (), name=\"update_param_noise_threshold\")\n update_param_noise_scale_ph = tf.placeholder(tf.bool, (), name=\"update_param_noise_scale\")\n reset_ph = tf.placeholder(tf.bool, (), name=\"reset\")\n\n eps = tf.get_variable(\"eps\", (), initializer=tf.constant_initializer(0))\n param_noise_scale = tf.get_variable(\"param_noise_scale\", (), initializer=tf.constant_initializer(0.01), trainable=False)\n param_noise_threshold = tf.get_variable(\"param_noise_threshold\", (), initializer=tf.constant_initializer(0.05), trainable=False)\n\n # Unmodified Q.\n q_values = q_func(observations_ph.get(), num_actions, scope=\"q_func\")\n\n # Perturbable Q used for the actual rollout.\n q_values_perturbed = q_func(observations_ph.get(), num_actions, scope=\"perturbed_q_func\")\n # We have to wrap this code into a function due to the way tf.cond() works. See\n # https://stackoverflow.com/questions/37063952/confused-by-the-behavior-of-tf-cond for\n # a more detailed discussion.\n def perturb_vars(original_scope, perturbed_scope):\n all_vars = U.scope_vars(U.absolute_scope_name(\"q_func\"))\n all_perturbed_vars = U.scope_vars(U.absolute_scope_name(\"perturbed_q_func\"))\n assert len(all_vars) == len(all_perturbed_vars)\n perturb_ops = []\n for var, perturbed_var in zip(all_vars, all_perturbed_vars):\n if param_noise_filter_func(perturbed_var):\n # Perturb this variable.\n op = tf.assign(perturbed_var, var + tf.random_normal(shape=tf.shape(var), mean=0., stddev=param_noise_scale))\n else:\n # Do not perturb, just assign.\n op = tf.assign(perturbed_var, var)\n perturb_ops.append(op)\n assert len(perturb_ops) == len(all_vars)\n return tf.group(*perturb_ops)\n\n # Set up functionality to re-compute `param_noise_scale`. This perturbs yet another copy\n # of the network and measures the effect of that perturbation in action space. If the perturbation\n # is too big, reduce scale of perturbation, otherwise increase.\n q_values_adaptive = q_func(observations_ph.get(), num_actions, scope=\"adaptive_q_func\")\n perturb_for_adaption = perturb_vars(original_scope=\"q_func\", perturbed_scope=\"adaptive_q_func\")\n kl = tf.reduce_sum(tf.nn.softmax(q_values) * (tf.log(tf.nn.softmax(q_values)) - tf.log(tf.nn.softmax(q_values_adaptive))), axis=-1)\n mean_kl = tf.reduce_mean(kl)\n def update_scale():\n with tf.control_dependencies([perturb_for_adaption]):\n update_scale_expr = tf.cond(mean_kl < param_noise_threshold,\n lambda: param_noise_scale.assign(param_noise_scale * 1.01),\n lambda: param_noise_scale.assign(param_noise_scale / 1.01),\n )\n return update_scale_expr\n\n # Functionality to update the threshold for parameter space noise.\n update_param_noise_threshold_expr = param_noise_threshold.assign(tf.cond(update_param_noise_threshold_ph >= 0,\n lambda: update_param_noise_threshold_ph, lambda: param_noise_threshold))\n\n # Put everything together.\n deterministic_actions = tf.argmax(q_values_perturbed, axis=1)\n batch_size = tf.shape(observations_ph.get())[0]\n random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)\n chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps\n stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)\n\n output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)\n update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))\n updates = [\n update_eps_expr,\n tf.cond(reset_ph, lambda: perturb_vars(original_scope=\"q_func\", perturbed_scope=\"perturbed_q_func\"), lambda: tf.group(*[])),\n tf.cond(update_param_noise_scale_ph, lambda: update_scale(), lambda: tf.Variable(0., trainable=False)),\n update_param_noise_threshold_expr,\n ]\n act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph, reset_ph, update_param_noise_threshold_ph, update_param_noise_scale_ph],\n outputs=output_actions,\n givens={update_eps_ph: -1.0, stochastic_ph: True, reset_ph: False, update_param_noise_threshold_ph: False, update_param_noise_scale_ph: False},\n updates=updates)\n return act\n\n\ndef build_train(make_obs_ph, q_func, num_actions, optimizer, grad_norm_clipping=None, gamma=1.0,\n double_q=True, scope=\"deepq\", reuse=None, param_noise=False, param_noise_filter_func=None):\n \"\"\"Creates the train function:\n\n Parameters\n ----------\n make_obs_ph: str -> tf.placeholder or TfInput\n a function that takes a name and creates a placeholder of input with that name\n q_func: (tf.Variable, int, str, bool) -> tf.Variable\n the model that takes the following inputs:\n observation_in: object\n the output of observation placeholder\n num_actions: int\n number of actions\n scope: str\n reuse: bool\n should be passed to outer variable scope\n and returns a tensor of shape (batch_size, num_actions) with values of every action.\n num_actions: int\n number of actions\n reuse: bool\n whether or not to reuse the graph variables\n optimizer: tf.train.Optimizer\n optimizer to use for the Q-learning objective.\n grad_norm_clipping: float or None\n clip gradient norms to this value. If None no clipping is performed.\n gamma: float\n discount rate.\n double_q: bool\n if true will use Double Q Learning (https://arxiv.org/abs/1509.06461).\n In general it is a good idea to keep it enabled.\n scope: str or VariableScope\n optional scope for variable_scope.\n reuse: bool or None\n whether or not the variables should be reused. To be able to reuse the scope must be given.\n param_noise: bool\n whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)\n param_noise_filter_func: tf.Variable -> bool\n function that decides whether or not a variable should be perturbed. Only applicable\n if param_noise is True. If set to None, default_param_noise_filter is used by default.\n\n Returns\n -------\n act: (tf.Variable, bool, float) -> tf.Variable\n function to select and action given observation.\n` See the top of the file for details.\n train: (object, np.array, np.array, object, np.array, np.array) -> np.array\n optimize the error in Bellman's equation.\n` See the top of the file for details.\n update_target: () -> ()\n copy the parameters from optimized Q function to the target Q function.\n` See the top of the file for details.\n debug: {str: function}\n a bunch of functions to print debug data like q_values.\n \"\"\"\n if param_noise:\n act_f = build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse,\n param_noise_filter_func=param_noise_filter_func)\n else:\n act_f = build_act(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse)\n\n with tf.variable_scope(scope, reuse=reuse):\n # set up placeholders\n obs_t_input = U.ensure_tf_input(make_obs_ph(\"obs_t\"))\n act_t_ph = tf.placeholder(tf.int32, [None], name=\"action\")\n rew_t_ph = tf.placeholder(tf.float32, [None], name=\"reward\")\n obs_tp1_input = U.ensure_tf_input(make_obs_ph(\"obs_tp1\"))\n done_mask_ph = tf.placeholder(tf.float32, [None], name=\"done\")\n importance_weights_ph = tf.placeholder(tf.float32, [None], name=\"weight\")\n\n # q network evaluation\n q_t = q_func(obs_t_input.get(), num_actions, scope=\"q_func\", reuse=True) # reuse parameters from act\n q_func_vars = U.scope_vars(U.absolute_scope_name(\"q_func\"))\n\n # target q network evalution\n q_tp1 = q_func(obs_tp1_input.get(), num_actions, scope=\"target_q_func\")\n target_q_func_vars = U.scope_vars(U.absolute_scope_name(\"target_q_func\"))\n\n # q scores for actions which we know were selected in the given state.\n q_t_selected = tf.reduce_sum(q_t * tf.one_hot(act_t_ph, num_actions), 1)\n\n # compute estimate of best possible value starting from state at t + 1\n if double_q:\n q_tp1_using_online_net = q_func(obs_tp1_input.get(), num_actions, scope=\"q_func\", reuse=True)\n q_tp1_best_using_online_net = tf.arg_max(q_tp1_using_online_net, 1)\n q_tp1_best = tf.reduce_sum(q_tp1 * tf.one_hot(q_tp1_best_using_online_net, num_actions), 1)\n else:\n q_tp1_best = tf.reduce_max(q_tp1, 1)\n q_tp1_best_masked = (1.0 - done_mask_ph) * q_tp1_best\n\n # compute RHS of bellman equation\n q_t_selected_target = rew_t_ph + gamma * q_tp1_best_masked\n\n # compute the error (potentially clipped)\n td_error = q_t_selected - tf.stop_gradient(q_t_selected_target)\n errors = U.huber_loss(td_error)\n weighted_error = tf.reduce_mean(importance_weights_ph * errors)\n\n # compute optimization op (potentially with gradient clipping)\n if grad_norm_clipping is not None:\n optimize_expr = U.minimize_and_clip(optimizer,\n weighted_error,\n var_list=q_func_vars,\n clip_val=grad_norm_clipping)\n else:\n optimize_expr = optimizer.minimize(weighted_error, var_list=q_func_vars)\n\n # update_target_fn will be called periodically to copy Q network to target Q network\n update_target_expr = []\n for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name),\n sorted(target_q_func_vars, key=lambda v: v.name)):\n update_target_expr.append(var_target.assign(var))\n update_target_expr = tf.group(*update_target_expr)\n\n # Create callable functions\n train = U.function(\n inputs=[\n obs_t_input,\n act_t_ph,\n rew_t_ph,\n obs_tp1_input,\n done_mask_ph,\n importance_weights_ph\n ],\n outputs=td_error,\n updates=[optimize_expr]\n )\n update_target = U.function([], [], updates=[update_target_expr])\n\n q_values = U.function([obs_t_input], q_t)\n\n return act_f, train, update_target, {'q_values': q_values}\n" ]
[ [ "tensorflow.cond", "tensorflow.control_dependencies", "tensorflow.stack", "tensorflow.where", "tensorflow.group", "tensorflow.Variable", "tensorflow.stop_gradient", "tensorflow.trainable_variables", "tensorflow.argmax", "tensorflow.shape", "tensorflow.placeholder", "tensorflow.one_hot", "tensorflow.reduce_max", "tensorflow.nn.softmax", "tensorflow.arg_max", "tensorflow.reduce_mean", "tensorflow.assign", "tensorflow.constant_initializer", "tensorflow.variable_scope" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
tkuri/pytorch-CycleGAN-and-pix2pix
[ "b00b3f0bcebfb12d3f026c2a61c98ff63175a583", "b00b3f0bcebfb12d3f026c2a61c98ff63175a583" ]
[ "data/aligned3_tm_max_dataset.py", "models/pix2pix_in2_model.py" ]
[ "import os.path\nfrom data.base_dataset import BaseDataset, get_params, get_transform\nfrom data.image_folder import make_dataset\nfrom PIL import Image, ImageOps\nimport torch\n\nclass Aligned3TmMaxDataset(BaseDataset):\n \"\"\"A dataset class for paired image dataset.\n\n It assumes that the directory '/path/to/data/train' contains image pairs in the form of {A,B}.\n During test time, you need to prepare a directory '/path/to/data/test'.\n \"\"\"\n\n def __init__(self, opt):\n \"\"\"Initialize this dataset class.\n\n Parameters:\n opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions\n \"\"\"\n BaseDataset.__init__(self, opt)\n self.dir_ABC = os.path.join(opt.dataroot, opt.phase) # get the image directory\n self.ABC_paths = sorted(make_dataset(self.dir_ABC, opt.max_dataset_size)) # get image paths\n assert(self.opt.load_size >= self.opt.crop_size) # crop_size should be smaller than the size of loaded image\n self.input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc\n self.output_nc = self.opt.input_nc if self.opt.direction == 'BtoA' else self.opt.output_nc\n self.input2_nc = self.opt.input2_nc\n\n def __getitem__(self, index):\n \"\"\"Return a data point and its metadata information.\n\n Parameters:\n index - - a random integer for data indexing\n\n Returns a dictionary that contains A, B, A_paths and B_paths\n A (tensor) - - an image in the input domain\n B (tensor) - - its corresponding image in the target domain\n C (tensor) - - an alternative image in the input domain\n A_paths (str) - - image paths\n B_paths (str) - - image paths (same as A_paths)\n C_paths (str) - - image paths (same as A_paths)\n \"\"\"\n # read a image given a random integer index\n ABC_path = self.ABC_paths[index]\n ABC = Image.open(ABC_path).convert('RGB')\n # split AB image into A and B\n w, h = ABC.size\n h25 = int(h / 25)\n w3 = int(w / 3)\n A = []\n B = []\n C = []\n \n for i in range(25):\n A.append(ABC.crop((0, h25*i, w3, h25*(i+1))))\n B.append(ABC.crop((w3, h25*i, w3*2, h25*(i+1))))\n Ctmp = ImageOps.flip(ABC.crop((w3*2, h25*i, w, h25*(i+1))))\n Ctmp = Ctmp.convert(\"L\")\n _, vmax = Ctmp.getextrema()\n Ctmp = Ctmp.point(lambda x: 0 if x < vmax else 255) \n C.append(Ctmp)\n\n # apply the same transform to both A and B\n transform_params = get_params(self.opt, A[0].size)\n A_transform = get_transform(self.opt, transform_params, grayscale=(self.input_nc == 1))\n B_transform = get_transform(self.opt, transform_params, grayscale=(self.output_nc == 1))\n C_transform = get_transform(self.opt, transform_params, grayscale=(self.input2_nc == 1), convert=False)\n\n for i in range(25):\n A[i] = A_transform(A[i])\n B[i] = B_transform(B[i])\n C[i] = C_transform(C[i])\n \n Acat = torch.unsqueeze(A[0], 0)\n Bcat = torch.unsqueeze(B[0], 0)\n Ccat = torch.unsqueeze(C[0], 0)\n for i in range(1,25):\n Acat = torch.cat([Acat, torch.unsqueeze(A[i], 0)], dim=0)\n Bcat = torch.cat([Bcat, torch.unsqueeze(B[i], 0)], dim=0)\n Ccat = torch.cat([Ccat, torch.unsqueeze(C[i], 0)], dim=0)\n \n # print('Acat size:', Acat.size())\n # print('A_trans:', A.max(), A.min())\n # print('B_trans:', B.max(), B.min())\n # print('C_trans:', C.max(), C.min())\n\n return {'A': Acat, 'B': Bcat, 'C': Ccat, 'A_paths': ABC_path, 'B_paths': ABC_path, 'C_paths': ABC_path}\n\n def __len__(self):\n \"\"\"Return the total number of images in the dataset.\"\"\"\n return len(self.ABC_paths)\n", "import torch\nfrom .base_model import BaseModel\nfrom . import networks\n\n\nclass Pix2PixIn2Model(BaseModel):\n \"\"\" This class implements the pix2pix model, for learning a mapping from input images to output images given paired data.\n\n The model training requires '--dataset_mode aligned' dataset.\n By default, it uses a '--netG unet256' U-Net generator,\n a '--netD basic' discriminator (PatchGAN),\n and a '--gan_mode' vanilla GAN loss (the cross-entropy objective used in the orignal GAN paper).\n\n pix2pix paper: https://arxiv.org/pdf/1611.07004.pdf\n \"\"\"\n @staticmethod\n def modify_commandline_options(parser, is_train=True):\n \"\"\"Add new dataset-specific options, and rewrite default values for existing options.\n\n Parameters:\n parser -- original option parser\n is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.\n\n Returns:\n the modified parser.\n\n For pix2pix, we do not use image buffer\n The training objective is: GAN Loss + lambda_L1 * ||G(A)-B||_1\n By default, we use vanilla GAN loss, UNet with batchnorm, and aligned datasets.\n \"\"\"\n # changing the default values to match the pix2pix paper (https://phillipi.github.io/pix2pix/)\n parser.set_defaults(norm='batch', netG='unet_256', dataset_mode='aligned3')\n if is_train:\n parser.set_defaults(pool_size=0, gan_mode='vanilla')\n parser.add_argument('--lambda_L1', type=float, default=100.0, help='weight for L1 loss')\n\n return parser\n\n def __init__(self, opt):\n \"\"\"Initialize the pix2pix class.\n\n Parameters:\n opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions\n \"\"\"\n BaseModel.__init__(self, opt)\n # specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>\n self.loss_names = ['G_GAN', 'G_L1', 'D_real', 'D_fake']\n # specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>\n self.visual_names = ['real_A', 'fake_B', 'real_B', 'real_C']\n # specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>\n if self.isTrain:\n self.model_names = ['G', 'D']\n else: # during test time, only load G\n self.model_names = ['G']\n\n # opt.input_nc = opt.input_nc + opt.input2_nc\n print('input_nc:', opt.input_nc)\n # define networks (both generator and discriminator)\n # self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,\n # not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)\n self.netG = networks.define_G(opt.input_nc + opt.input2_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)\n\n if self.isTrain: # define a discriminator; conditional GANs need to take both input and output images; Therefore, #channels for D is input_nc + output_nc\n # self.netD = networks.define_D(opt.input_nc + opt.output_nc, opt.ndf, opt.netD,\n # opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)\n self.netD = networks.define_D(opt.input_nc + opt.input2_nc + opt.output_nc, opt.ndf, opt.netD,\n opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)\n\n if self.isTrain:\n # define loss functions\n self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)\n self.criterionL1 = torch.nn.L1Loss()\n # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.\n self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizers.append(self.optimizer_G)\n self.optimizers.append(self.optimizer_D)\n\n def set_input(self, input):\n \"\"\"Unpack input data from the dataloader and perform necessary pre-processing steps.\n\n Parameters:\n input (dict): include the data itself and its metadata information.\n\n The option 'direction' can be used to swap images in domain A and domain B.\n \"\"\"\n AtoB = self.opt.direction == 'AtoB'\n self.real_A = input['A' if AtoB else 'B'].to(self.device)\n self.real_B = input['B' if AtoB else 'A'].to(self.device)\n self.real_C = input['C'].to(self.device)\n self.real_AC = torch.cat([self.real_A, self.real_C], dim=1)\n self.image_paths = input['A_paths' if AtoB else 'B_paths']\n\n def forward(self):\n \"\"\"Run forward pass; called by both functions <optimize_parameters> and <test>.\"\"\"\n self.fake_B = self.netG(self.real_AC) # G(A)\n\n def backward_D(self):\n \"\"\"Calculate GAN loss for the discriminator\"\"\"\n # Fake; stop backprop to the generator by detaching fake_B\n # fake_AB = torch.cat((self.real_A, self.fake_B), 1) # we use conditional GANs; we need to feed both input and output to the discriminator\n # pred_fake = self.netD(fake_AB.detach())\n fake_ACB = torch.cat((self.real_AC, self.fake_B), 1) # we use conditional GANs; we need to feed both input and output to the discriminator\n pred_fake = self.netD(fake_ACB.detach())\n self.loss_D_fake = self.criterionGAN(pred_fake, False)\n # Real\n # real_AB = torch.cat((self.real_A, self.real_B), 1)\n # pred_real = self.netD(real_AB)\n real_ACB = torch.cat((self.real_AC, self.real_B), 1)\n pred_real = self.netD(real_ACB)\n self.loss_D_real = self.criterionGAN(pred_real, True)\n # combine loss and calculate gradients\n self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5\n self.loss_D.backward()\n\n def backward_G(self):\n \"\"\"Calculate GAN and L1 loss for the generator\"\"\"\n # First, G(A) should fake the discriminator\n # fake_AB = torch.cat((self.real_A, self.fake_B), 1)\n # pred_fake = self.netD(fake_AB)\n fake_ACB = torch.cat((self.real_AC, self.fake_B), 1)\n pred_fake = self.netD(fake_ACB)\n self.loss_G_GAN = self.criterionGAN(pred_fake, True)\n # Second, G(A) = B\n self.loss_G_L1 = self.criterionL1(self.fake_B, self.real_B) * self.opt.lambda_L1\n # combine loss and calculate gradients\n self.loss_G = self.loss_G_GAN + self.loss_G_L1\n self.loss_G.backward()\n\n def optimize_parameters(self):\n self.forward() # compute fake images: G(A)\n # update D\n self.set_requires_grad(self.netD, True) # enable backprop for D\n self.optimizer_D.zero_grad() # set D's gradients to zero\n self.backward_D() # calculate gradients for D\n self.optimizer_D.step() # update D's weights\n # update G\n self.set_requires_grad(self.netD, False) # D requires no gradients when optimizing G\n self.optimizer_G.zero_grad() # set G's gradients to zero\n self.backward_G() # calculate graidents for G\n self.optimizer_G.step() # udpate G's weights\n" ]
[ [ "torch.unsqueeze" ], [ "torch.nn.L1Loss", "torch.cat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
alat-rights/pytorch-lightning
[ "a4f1f3dc28982eb6578df62ca92b93f83a2defcc" ]
[ "pytorch_lightning/strategies/ddp.py" ]
[ "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport logging\nimport os\nimport shutil\nimport signal\nimport subprocess\nimport sys\nimport tempfile\nimport time\nfrom pathlib import Path\nfrom time import sleep\nfrom typing import Any, Callable, Dict, List, Optional, Union\n\nimport __main__\nimport numpy as np\nimport torch\nimport torch.distributed\nfrom torch.distributed import GradBucket\nfrom torch.nn import Module\nfrom torch.nn.parallel.distributed import DistributedDataParallel\n\nimport pytorch_lightning as pl\nfrom pytorch_lightning.core.optimizer import LightningOptimizer\nfrom pytorch_lightning.overrides import LightningDistributedModule\nfrom pytorch_lightning.overrides.distributed import prepare_for_backward\nfrom pytorch_lightning.plugins.environments.cluster_environment import ClusterEnvironment\nfrom pytorch_lightning.plugins.io.checkpoint_plugin import CheckpointIO\nfrom pytorch_lightning.plugins.precision import PrecisionPlugin\nfrom pytorch_lightning.strategies.parallel import ParallelStrategy\nfrom pytorch_lightning.strategies.strategy import TBroadcast\nfrom pytorch_lightning.trainer.states import TrainerFn\nfrom pytorch_lightning.utilities import (\n _FAIRSCALE_AVAILABLE,\n _HYDRA_AVAILABLE,\n _IS_WINDOWS,\n _TORCH_GREATER_EQUAL_1_8,\n _TORCH_GREATER_EQUAL_1_9,\n _TORCH_GREATER_EQUAL_1_10,\n)\nfrom pytorch_lightning.utilities.distributed import _revert_sync_batchnorm, distributed_available\nfrom pytorch_lightning.utilities.distributed import group as _group\nfrom pytorch_lightning.utilities.distributed import init_dist_connection, ReduceOp, sync_ddp_if_available\nfrom pytorch_lightning.utilities.enums import _StrategyType\nfrom pytorch_lightning.utilities.exceptions import DeadlockDetectedException\nfrom pytorch_lightning.utilities.rank_zero import rank_zero_only, rank_zero_warn\nfrom pytorch_lightning.utilities.seed import reset_seed\nfrom pytorch_lightning.utilities.types import STEP_OUTPUT\n\nif _FAIRSCALE_AVAILABLE:\n from fairscale.optim import OSS\nif _HYDRA_AVAILABLE:\n from hydra.core.hydra_config import HydraConfig\n from hydra.utils import get_original_cwd, to_absolute_path\nif _TORCH_GREATER_EQUAL_1_8:\n from pytorch_lightning.utilities.distributed import register_ddp_comm_hook\n\n\nlog = logging.getLogger(__name__)\n\n\nclass DDPStrategy(ParallelStrategy):\n \"\"\"Plugin for multi-process single-device training on one or multiple nodes.\n\n The main process in each node spawns N-1 child processes via :func:`subprocess.Popen`, where N is the number of\n devices (e.g. GPU) per node. It is very similar to how :mod:`torch.distributed.launch` launches processes.\n \"\"\"\n\n distributed_backend = _StrategyType.DDP\n\n def __init__(\n self,\n accelerator: Optional[\"pl.accelerators.accelerator.Accelerator\"] = None,\n parallel_devices: Optional[List[torch.device]] = None,\n cluster_environment: Optional[ClusterEnvironment] = None,\n checkpoint_io: Optional[CheckpointIO] = None,\n precision_plugin: Optional[PrecisionPlugin] = None,\n ddp_comm_state: Optional[object] = None,\n ddp_comm_hook: Optional[Callable[[Any, GradBucket], torch.futures.Future[torch.Tensor]]] = None,\n ddp_comm_wrapper: Optional[\n Callable[\n [Callable[[Any, GradBucket], torch.futures.Future[torch.Tensor]], Any],\n torch.futures.Future[torch.Tensor],\n ]\n ] = None,\n model_averaging_period: Optional[int] = None,\n **kwargs: Union[Any, Dict[str, Any]],\n ) -> None:\n super().__init__(\n accelerator=accelerator,\n parallel_devices=parallel_devices,\n cluster_environment=cluster_environment,\n checkpoint_io=checkpoint_io,\n precision_plugin=precision_plugin,\n )\n log.detail(f\"{self.__class__.__name__}: initializing DDP plugin\")\n self.interactive_ddp_procs: List[subprocess.Popen] = []\n self._num_nodes = 1\n self.sync_batchnorm = False\n self._ddp_kwargs = kwargs\n self._ddp_comm_state = ddp_comm_state\n self._ddp_comm_hook = ddp_comm_hook\n self._ddp_comm_wrapper = ddp_comm_wrapper\n self._model_averaging_period = model_averaging_period\n self._pids: Optional[List[int]] = None\n self._sync_dir: Optional[str] = None\n self._rank_0_has_called_call_children_scripts: bool = False\n self.set_world_ranks()\n\n @property\n def is_distributed(self) -> bool:\n return True\n\n @property\n def root_device(self) -> torch.device:\n return self.parallel_devices[self.local_rank]\n\n @property\n def num_nodes(self) -> int:\n return self._num_nodes\n\n @num_nodes.setter\n def num_nodes(self, num_nodes: int) -> None:\n # note that world ranks is related to num_nodes, when resetting it, need to reset world ranks\n self._num_nodes = num_nodes\n self.set_world_ranks()\n\n @property\n def num_processes(self) -> int:\n return len(self.parallel_devices) if self.parallel_devices is not None else 0\n\n @property\n def distributed_sampler_kwargs(self) -> Dict[str, Any]:\n distributed_sampler_kwargs = dict(num_replicas=(self.num_nodes * self.num_processes), rank=self.global_rank)\n return distributed_sampler_kwargs\n\n @property\n def _is_single_process_single_device(self) -> bool:\n return True\n\n def setup_environment(self) -> None:\n # start the other scripts\n assert self.cluster_environment is not None\n if not self.cluster_environment.creates_processes_externally:\n self._call_children_scripts()\n\n self.setup_distributed()\n super().setup_environment()\n\n def setup(self, trainer: \"pl.Trainer\") -> None:\n super().setup(trainer)\n # share ddp pids to all processes\n self._rank_0_has_called_call_children_scripts = (\n self.broadcast(self._rank_0_has_called_call_children_scripts) is True\n )\n if self._should_run_deadlock_detection():\n self._share_information_to_prevent_deadlock()\n\n # move the model to the correct device\n self.model_to_device()\n\n assert self.model is not None\n if self.sync_batchnorm:\n self.model = self.configure_sync_batchnorm(self.model)\n\n # skip wrapping the model if we are not fitting as no gradients need to be exchanged\n assert self.lightining_module is not None\n trainer_fn = self.lightning_module.trainer.state.fn\n if trainer_fn == TrainerFn.FITTING:\n self.configure_ddp()\n\n def _setup_model(self, model: Module) -> DistributedDataParallel:\n \"\"\"Wraps the model into a :class:`~torch.nn.parallel.distributed.DistributedDataParallel` module.\"\"\"\n device_ids = self.determine_ddp_device_ids()\n log.detail(f\"setting up DDP model with device ids: {device_ids}, kwargs: {self._ddp_kwargs}\")\n return DistributedDataParallel(module=model, device_ids=device_ids, **self._ddp_kwargs)\n\n def _call_children_scripts(self) -> None:\n # bookkeeping of spawned processes\n self._check_can_spawn_children()\n\n assert self.cluster_environment is not None\n # DDP Environment variables\n os.environ[\"MASTER_ADDR\"] = self.cluster_environment.main_address\n os.environ[\"MASTER_PORT\"] = str(self.cluster_environment.main_port)\n\n # allow the user to pass the node rank\n os.environ[\"NODE_RANK\"] = str(self.cluster_environment.node_rank())\n os.environ[\"LOCAL_RANK\"] = str(self.cluster_environment.local_rank())\n\n # Check if the current calling command looked like `python a/b/c.py` or `python -m a.b.c`\n # See https://docs.python.org/3/reference/import.html#main-spec\n if __main__.__spec__ is None: # pragma: no-cover\n # Script called as `python a/b/c.py`\n # when user is using hydra find the absolute path\n path_lib = os.path.abspath if not _HYDRA_AVAILABLE else to_absolute_path\n\n # pull out the commands used to run the script and resolve the abs file path\n command = sys.argv\n try:\n full_path = path_lib(command[0])\n except Exception:\n full_path = os.path.abspath(command[0])\n\n command[0] = full_path\n # use the same python interpreter and actually running\n command = [sys.executable] + command\n else: # Script called as `python -m a.b.c`\n command = [sys.executable, \"-m\", __main__.__spec__.name] + sys.argv[1:]\n\n os.environ[\"WORLD_SIZE\"] = f\"{self.num_processes * self.num_nodes}\"\n\n self.interactive_ddp_procs = []\n\n for local_rank in range(1, self.num_processes):\n env_copy = os.environ.copy()\n env_copy[\"LOCAL_RANK\"] = f\"{local_rank}\"\n\n # remove env var if global seed not set\n if os.environ.get(\"PL_GLOBAL_SEED\") is None and \"PL_GLOBAL_SEED\" in env_copy:\n del env_copy[\"PL_GLOBAL_SEED\"]\n\n # start process\n # if hydra is available and initialized, make sure to set the cwd correctly\n cwd: Optional[str] = None\n if _HYDRA_AVAILABLE:\n if HydraConfig.initialized():\n cwd = get_original_cwd()\n os_cwd = f'\"{os.getcwd()}\"'\n command += [f\"hydra.run.dir={os_cwd}\", f\"hydra.job.name=train_ddp_process_{local_rank}\"]\n proc = subprocess.Popen(command, env=env_copy, cwd=cwd)\n self.interactive_ddp_procs.append(proc)\n\n # starting all processes at once can cause issues\n # with dataloaders delay between 1-10 seconds\n delay = np.random.uniform(1, 5, 1)[0]\n sleep(delay)\n\n self._rank_0_has_called_call_children_scripts = True\n\n def setup_distributed(self) -> None:\n log.detail(f\"{self.__class__.__name__}: setting up distributed...\")\n reset_seed()\n\n # determine which process we are and world size\n self.set_world_ranks()\n\n # set warning rank\n rank_zero_only.rank = self.global_rank\n\n # set up server using proc 0's ip address\n # try to init for 20 times at max in case ports are taken\n # where to store ip_table\n assert self.cluster_environment is not None\n init_dist_connection(self.cluster_environment, self.torch_distributed_backend)\n\n def _check_can_spawn_children(self) -> None:\n if self.local_rank != 0:\n raise RuntimeError(\n \"Lightning attempted to launch new distributed processes with `local_rank > 0`. This should not happen.\"\n \" Possible reasons: 1) LOCAL_RANK environment variable was incorrectly modified by the user,\"\n \" 2) `ClusterEnvironment.creates_processes_externally` incorrectly implemented.\"\n )\n\n def set_world_ranks(self) -> None:\n if self.cluster_environment is None:\n return\n self.cluster_environment.set_global_rank(self.node_rank * self.num_processes + self.local_rank)\n self.cluster_environment.set_world_size(self.num_nodes * self.num_processes)\n rank_zero_only.rank = self.cluster_environment.global_rank()\n\n def pre_configure_ddp(self) -> None:\n # if unset, default `find_unused_parameters` `True`\n # Many models require setting this parameter to True, as there are corner cases\n # when not all parameter backward hooks are fired by the autograd engine even if require_grad is set to True.\n # This flag does come with a performance hit, so it is suggested to disable in cases where it is possible.\n self._ddp_kwargs[\"find_unused_parameters\"] = self._ddp_kwargs.get(\"find_unused_parameters\", True)\n assert self.lightning_module is not None\n if not self.lightning_module.automatic_optimization and not self._ddp_kwargs.get(\n \"find_unused_parameters\", False\n ):\n # TODO: PyTorch 1.7.0 DDP introduces `self.reducer._rebuild_buckets()` breaking manual_optimization\n rank_zero_warn(\n \"From PyTorch 1.7.0, Lightning `manual_optimization` needs to set `find_unused_parameters=True` to\"\n \" properly work with DDP. Using `find_unused_parameters=True`.\"\n )\n self._ddp_kwargs[\"find_unused_parameters\"] = True\n\n def _register_ddp_hooks(self) -> None:\n log.detail(f\"{self.__class__.__name__}: registering ddp hooks\")\n # In 1.8, DDP communication hooks only work with NCCL backend and SPSD (single process single device) mode\n # Since 1.9, DDP communication hooks can work on all backends.\n if _TORCH_GREATER_EQUAL_1_9 or (\n _TORCH_GREATER_EQUAL_1_8 and self.root_device.type == \"cuda\" and self._is_single_process_single_device\n ):\n register_ddp_comm_hook(\n model=self.model,\n ddp_comm_state=self._ddp_comm_state,\n ddp_comm_hook=self._ddp_comm_hook,\n ddp_comm_wrapper=self._ddp_comm_wrapper,\n )\n\n if _TORCH_GREATER_EQUAL_1_10 and self.lightning_module.trainer.state.fn == TrainerFn.FITTING:\n import torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook as post_localSGD\n\n if isinstance(self._ddp_comm_state, post_localSGD.PostLocalSGDState):\n self._reinit_optimizers_with_post_localSGD(self._ddp_comm_state.start_localSGD_iter)\n\n def _reinit_optimizers_with_post_localSGD(self, warmup_steps: int) -> None:\n log.detail(f\"{self.__class__.__name__}: reinitializing optimizers with post localSGD\")\n optimizers = self.optimizers\n if self._model_averaging_period is None:\n raise ValueError(\n \"Post-localSGD algorithm is used, but model averaging period is not provided to DDP strategy.\"\n )\n if _TORCH_GREATER_EQUAL_1_10:\n if not _IS_WINDOWS:\n from torch.distributed.optim import DistributedOptimizer\n import torch.distributed.algorithms.model_averaging.averagers as averagers\n from torch.distributed.optim import PostLocalSGDOptimizer, ZeroRedundancyOptimizer\n\n averager = averagers.PeriodicModelAverager(period=self._model_averaging_period, warmup_steps=warmup_steps)\n for x, optimizer in enumerate(optimizers):\n if isinstance(optimizer, LightningOptimizer):\n optimizer = optimizer._optimizer\n\n is_distributed_optimizer = isinstance(optimizer, DistributedOptimizer) if not _IS_WINDOWS else False\n if (\n is_distributed_optimizer\n or isinstance(optimizer, ZeroRedundancyOptimizer)\n or (_FAIRSCALE_AVAILABLE and isinstance(optimizer, OSS))\n ):\n raise ValueError(\n f\"Cannot wrap a distributed optimizer of type {optimizer.__name__} by PostLocalSGDOptimizer.\"\n )\n\n if isinstance(optimizer, PostLocalSGDOptimizer):\n continue\n\n optim_class = type(optimizer)\n post_localSGD_optimizer = PostLocalSGDOptimizer(\n params=optimizer.param_groups,\n optimizer_class=optim_class,\n averager=averager,\n **optimizer.defaults,\n )\n optimizers[x] = post_localSGD_optimizer\n del optimizer\n self.optimizers = optimizers\n\n def configure_ddp(self) -> None:\n log.detail(f\"{self.__class__.__name__}: configuring DistributedDataParallel\")\n self.pre_configure_ddp()\n assert self.model is not None\n self.model = self._setup_model(LightningDistributedModule(self.model))\n self._register_ddp_hooks()\n\n def determine_ddp_device_ids(self) -> Optional[List[int]]:\n if self.root_device.type == \"cpu\":\n return None\n return [self.root_device.index]\n\n def barrier(self, *args, **kwargs) -> None:\n if not distributed_available():\n return\n if _TORCH_GREATER_EQUAL_1_8 and torch.distributed.get_backend() == \"nccl\":\n torch.distributed.barrier(device_ids=self.determine_ddp_device_ids())\n else:\n torch.distributed.barrier()\n\n def broadcast(self, obj: TBroadcast, src: int = 0) -> TBroadcast:\n obj = [obj]\n if self.global_rank != src:\n obj = [None]\n torch.distributed.broadcast_object_list(obj, src, group=_group.WORLD)\n return obj[0]\n\n def pre_backward(self, closure_loss: torch.Tensor) -> None:\n \"\"\"Run before precision plugin executes backward.\"\"\"\n assert self.model is not None\n assert self.lightning_module is not None\n if not self.lightning_module.automatic_optimization:\n prepare_for_backward(self.model, closure_loss)\n\n def model_to_device(self) -> None:\n log.detail(f\"{self.__class__.__name__}: moving model to device [{self.root_device}]...\")\n if self.model:\n self.model.to(self.root_device)\n\n def reduce(\n self, tensor: torch.Tensor, group: Optional[Any] = None, reduce_op: Union[ReduceOp, str, None] = \"mean\"\n ) -> torch.Tensor:\n \"\"\"Reduces a tensor from several distributed processes to one aggregated tensor.\n\n Args:\n tensor: the tensor to sync and reduce\n group: the process group to gather results from. Defaults to all processes (world)\n reduce_op: the reduction operation. Defaults to 'mean'/'avg'.\n Can also be a string 'sum' to calculate the sum during reduction.\n\n Return:\n reduced value, except when the input was not a tensor the output remains is unchanged\n \"\"\"\n if isinstance(tensor, torch.Tensor):\n tensor = sync_ddp_if_available(tensor, group, reduce_op=reduce_op)\n return tensor\n\n def training_step(self, *args, **kwargs) -> STEP_OUTPUT:\n with self.precision_plugin.train_step_context():\n assert self.model is not None\n return self.model(*args, **kwargs)\n\n def validation_step(self, *args, **kwargs) -> Optional[STEP_OUTPUT]:\n with self.precision_plugin.val_step_context():\n if isinstance(self.model, DistributedDataParallel):\n # used when calling `trainer.fit`\n return self.model(*args, **kwargs)\n else:\n # used when calling `trainer.validate`\n assert self.lightning_module is not None\n return self.lightning_module.validation_step(*args, **kwargs)\n\n def test_step(self, *args, **kwargs) -> Optional[STEP_OUTPUT]:\n with self.precision_plugin.test_step_context():\n assert self.lightning_module is not None\n return self.lightning_module.test_step(*args, **kwargs)\n\n def predict_step(self, *args, **kwargs) -> STEP_OUTPUT:\n with self.precision_plugin.predict_step_context():\n assert self.lightning_module is not None\n return self.lightning_module.predict_step(*args, **kwargs)\n\n def post_training_step(self) -> None:\n assert self.model is not None\n assert self.lightning_module is not None\n if not self.lightning_module.automatic_optimization:\n self.model.require_backward_grad_sync = True\n\n @classmethod\n def register_strategies(cls, strategy_registry: Dict) -> None:\n strategy_registry.register(\n \"ddp_find_unused_parameters_false\",\n cls,\n description=\"DDP Strategy with `find_unused_parameters` as False\",\n find_unused_parameters=False,\n )\n\n def _should_run_deadlock_detection(self) -> bool:\n \"\"\"Determines whether the plugin will perform process reconciliation in case of errors.\n\n If the environment variable `PL_RECONCILE_PROCESS` is set, run detection regardless of the cluster environment.\n By default this is disabled. Otherwise, if the cluster environment creates the processes, allow the scheduler /\n parent process to perform the process termination, external to Lightning.\n \"\"\"\n return os.getenv(\"PL_RECONCILE_PROCESS\", \"0\") == \"1\" or self._rank_0_has_called_call_children_scripts\n\n def _share_information_to_prevent_deadlock(self) -> None:\n self._share_pids()\n\n # there should be a unique sync_dir per nodes.\n if self.local_rank == 0:\n # create a temporary directory used to synchronize processes on deadlock.\n self._sync_dir = tempfile.mkdtemp()\n\n sync_dirs = []\n global_node_rank_zero = 0\n for _ in range(self.num_nodes):\n sync_dirs.append(self.broadcast(self._sync_dir, global_node_rank_zero))\n global_node_rank_zero += self.world_size // self.num_nodes\n\n self._sync_dir = sync_dirs[self.node_rank]\n\n def _share_pids(self) -> None:\n \"\"\"Make all DDP processes aware of all processes pids.\"\"\"\n self.barrier()\n pids = self.all_gather(torch.tensor(os.getpid(), device=self.root_device))\n pids = pids.cpu().numpy().tolist()\n self._pids = pids if isinstance(pids, list) else [pids]\n\n def reconciliate_processes(self, trace: str) -> None:\n if self.world_size < 2:\n return\n\n if not self._should_run_deadlock_detection():\n return\n\n sync_dir = self._sync_dir\n\n if not sync_dir:\n rank_zero_warn(\"Error handling mechanism for deadlock detection is uninitialized. Skipping check.\")\n return\n\n # The cluster may be configured to periodically purge the `/tmp`\n # directory, in which case `sync_dir` may not exist anymore at this\n # point. Idempotently create it to ensure its existence.\n Path(sync_dir).mkdir(parents=True, exist_ok=True)\n\n # save a file locally.\n torch.save(True, os.path.join(sync_dir, f\"{self.global_rank}.pl\"))\n\n # sleep for a short time\n time.sleep(3)\n\n # return if all processes wrote a file in the `sync_dir`.\n # todo (tchaton) Add support for non-shared file-system which will fail.\n if len(os.listdir(sync_dir)) == (self.world_size // self.num_nodes):\n return\n\n if not self._pids:\n return\n for pid in self._pids:\n if pid != os.getpid():\n os.kill(pid, signal.SIGKILL)\n shutil.rmtree(sync_dir)\n raise DeadlockDetectedException(f\"DeadLock detected from rank: {self.global_rank} \\n {trace}\")\n\n def teardown(self) -> None:\n log.detail(f\"{self.__class__.__name__}: tearing down DDP plugin\")\n super().teardown()\n if isinstance(self.model, DistributedDataParallel):\n self.model = self.lightning_module\n\n assert self.model is not None\n if self.sync_batchnorm:\n self.model = _revert_sync_batchnorm(self.model)\n\n if self.root_device.type == \"cuda\":\n # GPU teardown\n log.detail(f\"{self.__class__.__name__}: moving model to CPU\")\n assert self.lightning_module is not None\n self.lightning_module.cpu()\n # clean up memory\n torch.cuda.empty_cache()\n" ]
[ [ "torch.nn.parallel.distributed.DistributedDataParallel", "torch.distributed.get_backend", "torch.cuda.empty_cache", "torch.distributed.barrier", "torch.distributed.optim.PostLocalSGDOptimizer", "torch.distributed.algorithms.model_averaging.averagers.PeriodicModelAverager", "numpy.random.uniform", "torch.distributed.broadcast_object_list" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vonguyenleduy/dnn_representation_selective_inference
[ "ea9924950441d30d2619a235551673f089f5a54f", "ea9924950441d30d2619a235551673f089f5a54f" ]
[ "ex3_len_interval_proposed_oc.py", "ex1_fpr_naive.py" ]
[ "import numpy as np\r\nfrom tensorflow.keras.models import load_model\r\nimport tensorflow as tf\r\nimport time\r\n\r\nimport gen_data\r\nimport util\r\n\r\n\r\ndef run():\r\n d = 8\r\n IMG_WIDTH = d\r\n IMG_HEIGHT = d\r\n IMG_CHANNELS = 1\r\n mu_1 = 0\r\n mu_2 = 2\r\n\r\n global_list_ineq = []\r\n\r\n X_test, Y_test = gen_data.generate(1, IMG_WIDTH, mu_1, mu_2)\r\n\r\n X_para, X_vec = util.create_X_para(X_test, d)\r\n\r\n X_para_pad = util.create_X_pad(X_para, d, IMG_CHANNELS)\r\n\r\n model = load_model('./model/test_' + str(d) + '.h5')\r\n # model.summary()\r\n\r\n weights = model.get_weights()\r\n\r\n kernel_1 = weights[0]\r\n bias_1 = weights[1]\r\n\r\n kernel_2 = weights[2]\r\n bias_2 = weights[3]\r\n\r\n out_conv_1, out_conv_1_para = util.conv(X_test, X_para_pad, kernel_1)\r\n\r\n _, d, _, no_channel = out_conv_1.shape\r\n\r\n out_conv_1 = out_conv_1 + bias_1\r\n\r\n for i in range(d):\r\n for j in range(d):\r\n for k in range(no_channel):\r\n out_conv_1_para[0][i][j][k][1] = out_conv_1_para[0][i][j][k][1] + bias_1[k]\r\n\r\n out_max_pooling, out_max_pooling_para, max_pooling_event = util.max_pooling(out_conv_1, out_conv_1_para)\r\n\r\n for element in max_pooling_event:\r\n global_list_ineq.append(element)\r\n\r\n out_up_sampling, out_up_sampling_para = util.up_sampling(out_max_pooling, out_max_pooling_para)\r\n\r\n _, d, _, no_channel = out_up_sampling.shape\r\n out_up_sampling_para_pad = util.create_X_pad(out_up_sampling_para, d, no_channel)\r\n out_conv_2, out_conv_2_para = util.conv(out_up_sampling, out_up_sampling_para_pad, kernel_2)\r\n\r\n _, d, _, no_channel = out_conv_2.shape\r\n\r\n out_conv_2 = out_conv_2 + bias_2\r\n\r\n for i in range(d):\r\n for j in range(d):\r\n for k in range(no_channel):\r\n out_conv_2_para[0][i][j][k][1] = out_conv_2_para[0][i][j][k][1] + bias_2[k]\r\n\r\n out_conv_2 = util.sigmoid(out_conv_2)\r\n output = out_conv_2\r\n\r\n for i in range(d):\r\n for j in range(d):\r\n for k in range(no_channel):\r\n pT = out_conv_2_para[0][i][j][k][0]\r\n q = out_conv_2_para[0][i][j][k][1]\r\n\r\n val = np.dot(pT, X_vec)[0][0] + q\r\n val = util.sigmoid(val)\r\n\r\n if val <= 0.5:\r\n global_list_ineq.append([pT, q])\r\n else:\r\n global_list_ineq.append([-pT, -q])\r\n\r\n output = output.flatten()\r\n binary_vec = []\r\n\r\n for each_e in output:\r\n if each_e <= 0.5:\r\n binary_vec.append(0)\r\n else:\r\n binary_vec.append(1)\r\n\r\n x = X_vec\r\n\r\n eta, etaTx = util.construct_test_statistic(x, binary_vec, d * d)\r\n u, v = util.compute_u_v(x, eta, d * d)\r\n\r\n Vminus = np.NINF\r\n Vplus = np.Inf\r\n\r\n for element in global_list_ineq:\r\n aT = element[0]\r\n b = element[1]\r\n\r\n a_scalar = np.dot(aT, v)[0][0]\r\n b_scalar = np.dot(aT, u)[0][0] + b\r\n\r\n if a_scalar == 0:\r\n if b > 0:\r\n print('Error B')\r\n\r\n elif a_scalar > 0:\r\n Vplus = min(Vplus, -b_scalar / a_scalar)\r\n else:\r\n Vminus = max(Vminus, -b_scalar / a_scalar)\r\n\r\n return Vplus - Vminus\r\n\r\n\r\nfrom mpi4py import MPI\r\nCOMM = MPI.COMM_WORLD\r\n\r\nstart_time = None\r\n\r\nif COMM.rank == 0:\r\n start_time = time.time()\r\n\r\n max_iteration = 120\r\n no_thread = COMM.size\r\n\r\n iter_each_thread = int(max_iteration / no_thread)\r\n\r\nelse:\r\n iter_each_thread = None\r\n\r\niter_each_thread = COMM.bcast(iter_each_thread, root=0)\r\n\r\nlocal_list_length = []\r\n\r\nfor i in range(iter_each_thread):\r\n\r\n length = run()\r\n\r\n if length is not None:\r\n local_list_length.append(length)\r\n\r\n\r\ntotal_list_length = COMM.gather(local_list_length, root=0)\r\n\r\nif COMM.rank == 0:\r\n total_list_length = [_i for temp in total_list_length for _i in temp]\r\n\r\n print(total_list_length)\r\n\r\n print(\"--- %s seconds ---\" % (time.time() - start_time))", "import numpy as np\nfrom tensorflow.keras.models import load_model\nimport tensorflow as tf\nimport time\n\nimport gen_data\nimport util\nimport parametric_si\n\n\ndef run():\n n = 16\n\n d = int(np.sqrt(n))\n IMG_WIDTH = d\n mu_1 = 0\n mu_2 = 0\n\n X_test, Y_test = gen_data.generate(1, IMG_WIDTH, mu_1, mu_2)\n\n model = load_model('./model/test_' + str(d) + '.h5')\n\n output = model.predict(X_test, verbose=0)\n\n output = output.flatten()\n X_vec = X_test.flatten()\n\n m_a = 0\n m_b = 0\n n_a = 0\n n_b = 0\n\n for i in range(len(output)):\n if output[i] <= 0.5:\n n_a = n_a + 1\n m_a = m_a + X_vec[i]\n else:\n n_b = n_b + 1\n m_b = m_b + X_vec[i]\n\n if (n_a == 0) or (n_b == 0):\n return None\n\n m_a = m_a / n_a\n m_b = m_b / n_b\n\n test_statistic = m_a - m_b\n\n pivot = util.compute_naive_p(test_statistic, n_a, n_b, 1)\n\n return pivot\n\n\nfrom mpi4py import MPI\nCOMM = MPI.COMM_WORLD\n\nstart_time = None\n\nif COMM.rank == 0:\n start_time = time.time()\n\n max_iteration = 200\n no_thread = COMM.size\n\n iter_each_thread = int(max_iteration / no_thread)\n\nelse:\n iter_each_thread = None\n\niter_each_thread = COMM.bcast(iter_each_thread, root=0)\n\nlocal_list_pivot = []\n\nfor i in range(iter_each_thread):\n\n pivot = run()\n\n if pivot is not None:\n local_list_pivot.append(pivot)\n\n\ntotal_list_pivot = COMM.gather(local_list_pivot, root=0)\n\nif COMM.rank == 0:\n total_list_pivot = [_i for temp in total_list_pivot for _i in temp]\n\n detect = 0\n reject = 0\n\n for pivot in total_list_pivot:\n if pivot is not None:\n detect = detect + 1\n if pivot < 0.05:\n reject = reject + 1\n\n print(reject, detect, reject / detect)\n\n print(\"--- %s seconds ---\" % (time.time() - start_time))" ]
[ [ "numpy.dot" ], [ "numpy.sqrt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
tommccoy1/copynet
[ "6bc6f3a81a4922d06bdd9e86dcd7125251282076" ]
[ "transformer.py" ]
[ "\n# From https://github.com/ischlag/TP-Transformer\n\nimport math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndef build_transformer(input_dim=None, hidden=None, dropout=None, max_length=None, n_layers=None, n_heads=None, myfilter=None, pad_idx=None):\n\n embedding = TokenEmbedding(d_vocab=input_dim,\n d_h=hidden,\n d_p=hidden,\n dropout=dropout,\n max_length=200)\n\n encoder = Encoder(hid_dim=hidden,\n n_layers=n_layers,\n n_heads=n_heads,\n pf_dim=myfilter,\n encoder_layer=EncoderLayer,\n self_attention=SelfAttention,\n positionwise_feedforward=PositionwiseFeedforward,\n dropout=dropout)\n\n decoder = Decoder(hid_dim=hidden,\n n_layers=n_layers,\n n_heads=n_heads,\n pf_dim=myfilter,\n decoder_layer=DecoderLayer,\n self_attention=SelfAttention,\n positionwise_feedforward=PositionwiseFeedforward,\n dropout=dropout)\n\n model = Seq2Seq(embedding=embedding,\n encoder=encoder,\n decoder=decoder,\n pad_idx=pad_idx)\n\n return model\n\n\nclass TokenEmbedding(nn.Module):\n def __init__(self, d_vocab, d_h, d_p, dropout, max_length):\n super(TokenEmbedding, self).__init__()\n self.dropout = nn.Dropout(dropout)\n\n # token encodings\n self.d_h = d_h\n self.tok_embedding = nn.Embedding(d_vocab, d_h)\n self.scale = torch.sqrt(torch.FloatTensor([d_h]))\n\n # Compute the positional encodings once in log space.\n pe = torch.zeros(max_length, d_p)\n position = torch.arange(0., max_length).unsqueeze(1)\n div_term = torch.exp(torch.arange(0., d_p, 2) *\n -(math.log(10000.0) / d_p))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0)\n self.register_buffer('pe', pe)\n # pe = [1, seq_len, d_p]\n\n self.reset_parameters() # init tok_embedding to N(0,1/sqrt(d_h))\n\n def forward(self, src):\n # src = [batch_size, src_seq_len]\n\n # scale up embedding to be N(0,1)\n tok_emb = self.tok_embedding(src) * self.scale.to(src.device)\n pos_emb = torch.autograd.Variable(self.pe[:, :src.size(1)],\n requires_grad=False)\n x = tok_emb + pos_emb\n x = self.dropout(x)\n\n # src = [batch_size, src_seq_len, d_h]\n return x\n\n def transpose_forward(self, trg):\n # trg = [batch_size, trg_seq_len, d_h]\n logits = torch.einsum('btd,vd->btv',trg,self.tok_embedding.weight)\n # logits = torch.matmul(trg, torch.transpose(self.tok_embedding.weight, 0, 1))\n # logits = [batch_size, trg_seq_len, d_vocab]\n return logits\n\n def reset_parameters(self):\n nn.init.normal_(self.tok_embedding.weight,\n mean=0,\n std=1./math.sqrt(self.d_h))\n\n\nclass Encoder(nn.Module):\n def __init__(self, hid_dim, n_layers, n_heads, pf_dim,\n encoder_layer, self_attention, positionwise_feedforward, dropout):\n super().__init__()\n\n self.layers = nn.ModuleList([encoder_layer(hid_dim, n_heads, pf_dim,\n self_attention,\n positionwise_feedforward,\n dropout)\n for _ in range(n_layers)])\n\n\n def forward(self, src, src_mask):\n # src = [batch_size, src_seq_len]\n # src_mask = [batch_size, src_seq_len]\n for layer in self.layers:\n src = layer(src, src_mask)\n\n return src\n\n\nclass EncoderLayer(nn.Module):\n def __init__(self, hid_dim, n_heads, pf_dim, self_attention,\n positionwise_feedforward, dropout):\n super().__init__()\n\n self.layernorm1 = nn.LayerNorm(hid_dim)\n self.layernorm2 = nn.LayerNorm(hid_dim)\n self.layernorm3 = nn.LayerNorm(hid_dim)\n self.MHA = self_attention(hid_dim, n_heads, dropout)\n self.densefilter = positionwise_feedforward(hid_dim, pf_dim, dropout)\n self.dropout1 = nn.Dropout(dropout)\n self.dropout2 = nn.Dropout(dropout)\n\n\n def forward(self, src, src_mask):\n # src = [batch_size, src_seq_size, hid_dim]\n # src_mask = [batch_size, src_seq_size]\n\n # sublayer 1\n z = self.layernorm1(src)\n z, attn = self.MHA(z, z, z, src_mask)\n z = self.dropout1(z)\n src = src + z\n\n # sublayer 2\n z = self.layernorm2(src)\n z = self.densefilter(z)\n z = self.dropout2(z)\n src = src + z\n\n return self.layernorm3(src)\n\n\nclass SelfAttention(nn.Module):\n def __init__(self, hid_dim, n_heads, dropout):\n super().__init__()\n\n self.hid_dim = hid_dim\n self.n_heads = n_heads\n\n assert hid_dim % n_heads == 0\n\n self.w_q = nn.Linear(hid_dim, hid_dim)\n self.w_k = nn.Linear(hid_dim, hid_dim)\n self.w_v = nn.Linear(hid_dim, hid_dim)\n\n self.linear = nn.Linear(hid_dim, hid_dim)\n self.dropout = nn.Dropout(dropout)\n self.scale = torch.sqrt(torch.FloatTensor([hid_dim // n_heads]))\n\n self.reset_parameters()\n\n def forward(self, query, key, value, mask=None):\n # query = key = value = [batch_size, seq_len, hid_dim]\n # src_mask = [batch_size, 1, 1, pad_seq]\n # trg_mask = [batch_size, 1, pad_seq, past_seq]\n\n bsz = query.shape[0]\n\n Q = self.w_q(query)\n K = self.w_k(key)\n V = self.w_v(value)\n # Q, K, V = [batch_size, seq_len, hid_dim]\n\n Q = Q.view(bsz, -1, self.n_heads, self.hid_dim // self.n_heads)\\\n .permute(0,2,1,3)\n K = K.view(bsz, -1, self.n_heads, self.hid_dim // self.n_heads)\\\n .permute(0,2,1,3)\n V = V.view(bsz, -1, self.n_heads, self.hid_dim // self.n_heads)\\\n .permute(0,2,1,3)\n # Q, K, V = [batch_size, n_heads, seq_size, hid_dim // n heads]\n\n energy = torch.einsum('bhid,bhjd->bhij',Q,K) / self.scale.to(key.device)\n # energy = torch.matmul(Q, K.permute(0, 1, 3, 2)) / self.scale.to(key.device)\n\n # energy = [batch_size, n_heads, query_pos , key_pos]\n # src_mask = [batch_size, 1 , 1 , attn]\n # trg_mask = [batch_size, 1 , query_specific, attn]\n\n if mask is not None:\n energy = energy.masked_fill(mask == 0, -1e10)\n\n attention = self.dropout(F.softmax(energy, dim=-1))\n # attention = [batch_size, n_heads, seq_size, seq_size]\n\n x = torch.einsum('bhjd,bhij->bhid',V,attention)\n # x = torch.matmul(attention, V)\n # x = [batch_size, n_heads, seq_size, hid_dim // n heads]\n\n x = x.permute(0, 2, 1, 3).contiguous()\n # x = [batch_size, seq_size, n_heads, hid_dim // n heads]\n\n x = x.view(bsz, -1, self.n_heads * (self.hid_dim // self.n_heads))\n # x = [batch_size, src_seq_size, hid_dim]\n\n x = self.linear(x)\n # x = [batch_size, seq_size, hid_dim]\n\n return x, attention.detach()\n\n def reset_parameters(self):\n # nn.init.xavier_normal_(self.w_q.weight)\n # nn.init.xavier_normal_(self.w_k.weight)\n # nn.init.xavier_normal_(self.w_v.weight)\n # nn.init.xavier_normal_(self.linear.weight)\n nn.init.xavier_uniform_(self.w_q.weight)\n nn.init.xavier_uniform_(self.w_k.weight)\n nn.init.xavier_uniform_(self.w_v.weight)\n nn.init.xavier_uniform_(self.linear.weight)\n\n\nclass PositionwiseFeedforward(nn.Module):\n def __init__(self, hid_dim, pf_dim, dropout):\n super().__init__()\n\n self.hid_dim = hid_dim\n self.pf_dim = pf_dim\n\n self.linear1 = nn.Linear(hid_dim, pf_dim)\n self.linear2 = nn.Linear(pf_dim, hid_dim)\n self.dropout = nn.Dropout(dropout)\n\n self.reset_parameters()\n\n def forward(self, x):\n # x = [batch_size, seq_size, hid_dim]\n\n x = self.linear1(x)\n x = self.dropout(F.relu(x))\n x = self.linear2(x)\n\n # x = [batch_size, seq_size, hid_dim]\n return x\n\n def reset_parameters(self):\n #nn.init.kaiming_normal_(self.linear1.weight, a=math.sqrt(5))\n #nn.init.xavier_normal_(self.linear2.weight)\n nn.init.xavier_uniform_(self.linear1.weight)\n nn.init.xavier_uniform_(self.linear2.weight)\n\n\nclass Decoder(nn.Module):\n def __init__(self, hid_dim, n_layers, n_heads, pf_dim, decoder_layer,\n self_attention, positionwise_feedforward, dropout):\n super().__init__()\n\n self.layers = nn.ModuleList([decoder_layer(hid_dim, n_heads, pf_dim,\n self_attention,\n positionwise_feedforward,\n dropout)\n for _ in range(n_layers)])\n\n def forward(self, trg, src, trg_mask, src_mask):\n # trg = [batch_size, trg_seq_size, hid_dim]\n # src = [batch_size, src_seq_size, hid_dim]\n # trg_mask = [batch_size, trg_seq_size]\n # src_mask = [batch_size, src_seq_size]\n for layer in self.layers:\n trg = layer(trg, src, trg_mask, src_mask)\n\n return trg\n\n\nclass DecoderLayer(nn.Module):\n def __init__(self, hid_dim, n_heads, pf_dim, self_attention,\n positionwise_feedforward, dropout):\n super().__init__()\n\n self.layernorm1 = nn.LayerNorm(hid_dim)\n self.layernorm2 = nn.LayerNorm(hid_dim)\n self.layernorm3 = nn.LayerNorm(hid_dim)\n self.layernorm4 = nn.LayerNorm(hid_dim)\n self.selfAttn = self_attention(hid_dim, n_heads, dropout)\n self.encAttn = self_attention(hid_dim, n_heads, dropout)\n self.densefilter = positionwise_feedforward(hid_dim, pf_dim, dropout)\n self.dropout1 = nn.Dropout(dropout)\n self.dropout2 = nn.Dropout(dropout)\n self.dropout3 = nn.Dropout(dropout)\n\n def forward(self, trg, src, trg_mask, src_mask):\n # trg = [batch_size, trg_seq_size, hid_dim]\n # src = [batch_size, src_seq_size, hid_dim]\n # trg_mask = [batch_size, trg_seq_size]\n # src_mask = [batch_size, src_seq_size]\n\n # self attention\n z = self.layernorm1(trg)\n z, attn = self.selfAttn(z, z, z, trg_mask)\n z = self.dropout1(z)\n trg = trg + z\n\n # encoder attention\n z = self.layernorm2(trg)\n z, attn = self.encAttn(z, src, src, src_mask)\n z = self.dropout2(z)\n trg = trg + z\n\n # dense filter\n z = self.layernorm3(trg)\n z = self.densefilter(z)\n z = self.dropout3(z)\n trg = trg + z\n\n return self.layernorm4(trg)\n\n\nclass Seq2Seq(nn.Module):\n def __init__(self, embedding, encoder, decoder, pad_idx):\n super().__init__()\n\n self.embedding = embedding\n self.encoder = encoder\n self.decoder = decoder\n self.pad_idx = pad_idx\n\n def make_masks(self, src, trg):\n # src = [batch_size, src_seq_size]\n # trg = [batch_size, trg_seq_size]\n\n src_mask = (src != self.pad_idx).unsqueeze(1).unsqueeze(2)\n trg_pad_mask = (trg != self.pad_idx).unsqueeze(1).unsqueeze(3)\n # trg_mask = [batch_size, 1, trg_seq_size, 1]\n trg_len = trg.shape[1]\n\n trg_sub_mask = torch.tril(\n torch.ones((trg_len, trg_len), dtype=torch.uint8, device=trg.device))\n\n #print(torch.BoolTensor(trg_pad_mask))\n #print(trg_sub_mask)\n trg_mask = trg_pad_mask & trg_sub_mask.type(torch.BoolTensor)\n\n # src_mask = [batch_size, 1, 1, pad_seq]\n # trg_mask = [batch_size, 1, pad_seq, past_seq]\n return src_mask, trg_mask\n\n def forward(self, src, trg):\n # src = [batch_size, src_seq_size]\n # trg = [batch_size, trg_seq_size]\n\n src_mask, trg_mask = self.make_masks(src, trg)\n # src_mask = [batch_size, 1, 1, pad_seq]\n # trg_mask = [batch_size, 1, pad_seq, past_seq]\n\n src = self.embedding(src)\n trg = self.embedding(trg)\n # src = [batch_size, src_seq_size, hid_dim]\n\n enc_src = self.encoder(src, src_mask)\n # enc_src = [batch_size, src_seq_size, hid_dim]\n\n out = self.decoder(trg, enc_src, trg_mask, src_mask)\n # out = [batch_size, trg_seq_size, hid_dim]\n\n logits = self.embedding.transpose_forward(out)\n # logits = [batch_size, trg_seq_size, d_vocab]\n\n return logits\n\n\n def make_src_mask(self, src):\n # src = [batch size, src sent len]\n src_mask = (src != self.pad_idx).unsqueeze(1).unsqueeze(2)\n return src_mask\n\n def make_trg_mask(self, trg):\n # trg = [batch size, trg sent len]\n trg_pad_mask = (trg != self.pad_idx).unsqueeze(1).unsqueeze(3)\n\n trg_len = trg.shape[1]\n\n trg_sub_mask = torch.tril(\n torch.ones((trg_len, trg_len), dtype=torch.uint8, device=trg.device))\n\n trg_mask = trg_pad_mask & trg_sub_mask.type(torch.BoolTensor)\n\n return trg_mask\n\n def greedy_inference(model, src, sos_idx, eos_idx, max_length, device):\n model.eval()\n src = src.to(device)\n src_mask = model.make_src_mask(src)\n src_emb = model.embedding(src)\n\n # run encoder\n enc_src = model.encoder(src_emb, src_mask)\n trg = torch.ones(src.shape[0], 1).fill_(sos_idx).type_as(src).to(device)\n\n done = torch.zeros(src.shape[0]).type(torch.uint8).to(device)\n for _ in range(max_length):\n trg_emb = model.embedding(trg)\n trg_mask = model.make_trg_mask(trg)\n # run decoder\n output = model.decoder(src=enc_src, trg=trg_emb,\n src_mask=src_mask, trg_mask=trg_mask)\n logits = model.embedding.transpose_forward(output)\n pred = torch.argmax(logits[:,[-1],:], dim=-1)\n trg = torch.cat([trg, pred], dim=1)\n\n eos_match = (pred.squeeze(1) == eos_idx)\n done = done.type(torch.BoolTensor) | eos_match\n\n if done.sum() == src.shape[0]:\n break\n\n return trg\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "torch.nn.Dropout", "torch.nn.functional.softmax", "torch.ones", "torch.sin", "torch.zeros", "torch.einsum", "torch.cat", "torch.arange", "torch.nn.Embedding", "torch.nn.LayerNorm", "torch.nn.Linear", "torch.nn.functional.relu", "torch.FloatTensor", "torch.nn.init.xavier_uniform_", "torch.cos", "torch.argmax" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
anonymous191116/S3ED
[ "250b3e39db7dfa9d2212795b6d9ea428acfa907b", "250b3e39db7dfa9d2212795b6d9ea428acfa907b" ]
[ "datasets/helper functions/combine_A_and_B.py", "data/extrac_features.py" ]
[ "from pdb import set_trace as st\nimport os\nimport numpy as np\nimport cv2\nimport argparse\n\nparser = argparse.ArgumentParser('create image pairs')\nparser.add_argument('--fold_A', dest='fold_A', help='input directory for image A', type=str,\n default='/media/disk2/daic/blur_data/3_14GAN/GAN_GO_PRO/A')\nparser.add_argument('--fold_B', dest='fold_B', help='input directory for image B', type=str,\n default='/media/disk2/daic/blur_data/3_14GAN/GAN_GO_PRO/B')\nparser.add_argument('--fold_AB', dest='fold_AB', help='output directory', type=str,\n default='/media/disk2/daic/blur_data/3_14GAN/GAN_GO_PRO/A_B')\nparser.add_argument('--num_imgs', dest='num_imgs', help='number of images',type=int,\n default=100000)\nparser.add_argument('--use_AB', dest='use_AB', help='if true: (0001_A, 0001_B) to (0001_AB)',action='store_true')\n\nargs = parser.parse_args()\n\nfor arg in vars(args):\n print('[%s] = ' % arg, getattr(args, arg))\n\nsplits = os.listdir(args.fold_A)\n\nfor sp in splits:\n img_fold_A = os.path.join(args.fold_A, sp)\n img_fold_B = os.path.join(args.fold_B, sp)\n img_list = os.listdir(img_fold_A)\n if args.use_AB: \n img_list = [img_path for img_path in img_list if '_A.' in img_path]\n\n num_imgs = min(args.num_imgs, len(img_list))\n print('split = %s, use %d/%d images' % (sp, num_imgs, len(img_list)))\n img_fold_AB = os.path.join(args.fold_AB, sp)\n if not os.path.isdir(img_fold_AB):\n os.makedirs(img_fold_AB)\n print('split = %s, number of images = %d' % (sp, num_imgs))\n for n in range(num_imgs):\n name_A = img_list[n]\n path_A = os.path.join(img_fold_A, name_A)\n if args.use_AB:\n name_B = name_A.replace('_A.', '_B.')\n else:\n name_B = name_A\n path_B = os.path.join(img_fold_B, name_B)\n if os.path.isfile(path_A) and os.path.isfile(path_B):\n name_AB = name_A\n if args.use_AB:\n name_AB = name_AB.replace('_A.', '.') # remove _A\n path_AB = os.path.join(img_fold_AB, name_AB)\n im_A = cv2.imread(path_A, cv2.IMREAD_COLOR)\n im_B = cv2.imread(path_B, cv2.IMREAD_COLOR)\n im_AB = np.concatenate([im_A, im_B], 1)\n cv2.imwrite(path_AB, im_AB)\n", "\"\"\"\r\nflowing https://github.com/ruotianluo/ImageCaptioning.pytorch\r\nthe part of 'Download COCO dataset and preprocessing' to get coco image features.\r\nset the folder for your blurred images path,\r\n\r\n\r\nPreprocess a raw json dataset into hdf5/json files for use in data_loader.lua\r\n\r\nInput: json file that has the form\r\n[{ file_path: 'path/img.jpg', captions: ['a caption', ...] }, ...]\r\nexample element in this list would look like\r\n{'captions': [u'A man with a red helmet on a small moped on a dirt road. ', u'Man riding a motor bike on a dirt road on the countryside.', u'A man riding on the back of a motorcycle.', u'A dirt path with a young person on a motor bike rests to the foreground of a verdant area with a bridge and a background of cloud-wreathed mountains. ', u'A man in a red shirt and a red hat is on a motorcycle on a hill side.'], 'file_path': u'val2014/COCO_val2014_000000391895.jpg', 'id': 391895}\r\n\r\nThis script reads this json, does some basic preprocessing on the captions\r\n(e.g. lowercase, etc.), creates a special UNK token, and encodes everything to arrays\r\n\r\nOutput: a json file and an hdf5 file\r\nThe hdf5 file contains several fields:\r\n/images is (N,3,256,256) uint8 array of raw image data in RGB format\r\n/labels is (M,max_length) uint32 array of encoded labels, zero padded\r\n/label_start_ix and /label_end_ix are (N,) uint32 arrays of pointers to the\r\n first and last indices (in range 1..M) of labels for each image\r\n/label_length stores the length of the sequence for each of the M sequences\r\n\r\nThe json file has a dict that contains:\r\n- an 'ix_to_word' field storing the vocab in form {ix:'word'}, where ix is 1-indexed\r\n- an 'images' field that is a list holding auxiliary information for each image,\r\n such as in particular the 'split' it was assigned to.\r\n\"\"\"\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nimport os\r\nimport json\r\nimport argparse\r\nfrom random import shuffle, seed\r\nimport string\r\n# non-standard dependencies:\r\nimport h5py\r\nfrom six.moves import cPickle\r\nimport numpy as np\r\nimport torch\r\nimport torchvision.models as models\r\nimport skimage.io\r\n\r\nfrom torchvision import transforms as trn\r\npreprocess = trn.Compose([\r\n trn.ToTensor(),\r\n trn.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\r\n])\r\n\r\nfrom misc.resnet_utils import myResnet\r\nimport misc.resnet as resnet\r\n\r\ndef main(params):\r\n net = getattr(resnet, params['model'])()\r\n net.load_state_dict(torch.load(os.path.join(params['model_root'],params['model']+'.pth')))\r\n my_resnet = myResnet(net)\r\n my_resnet.cuda()\r\n my_resnet.eval()\r\n\r\n imgs = json.load(open(params['input_json'], 'r'))\r\n imgs = imgs['images']\r\n N = len(imgs)\r\n\r\n seed(123) # make reproducible\r\n\r\n dir_fc = params['output_dir']+'_fc'\r\n dir_att = params['output_dir']+'_att'\r\n if not os.path.isdir(dir_fc):\r\n os.mkdir(dir_fc)\r\n if not os.path.isdir(dir_att):\r\n os.mkdir(dir_att)\r\n\r\n for i,img in enumerate(imgs):\r\n # load the image\r\n I = skimage.io.imread(os.path.join(params['images_root'], img['filepath'], img['filename']))\r\n # handle grayscale input images\r\n if len(I.shape) == 2:\r\n I = I[:,:,np.newaxis]\r\n I = np.concatenate((I,I,I), axis=2)\r\n\r\n I = I.astype('float32')/255.0\r\n I = torch.from_numpy(I.transpose([2,0,1])).cuda()\r\n I = preprocess(I)\r\n with torch.no_grad():\r\n tmp_fc, tmp_att = my_resnet(I, params['att_size'])\r\n # write to pkl\r\n np.save(os.path.join(dir_fc, str(img['cocoid'])), tmp_fc.data.cpu().float().numpy())\r\n np.savez_compressed(os.path.join(dir_att, str(img['cocoid'])), feat=tmp_att.data.cpu().float().numpy())\r\n\r\n if i % 1000 == 0:\r\n print('processing %d/%d (%.2f%% done)' % (i, N, i*100.0/N))\r\n print('wrote ', params['output_dir'])\r\n\r\nif __name__ == \"__main__\":\r\n\r\n parser = argparse.ArgumentParser()\r\n\r\n # input json\r\n parser.add_argument('--input_json', required=True, help='input json file to process into hdf5')\r\n parser.add_argument('--output_dir', default='data', help='output h5 file')\r\n\r\n # options\r\n parser.add_argument('--images_root', default='', help='root location in which images are stored, to be prepended to file_path in input json')\r\n parser.add_argument('--att_size', default=14, type=int, help='14x14 or 7x7')\r\n parser.add_argument('--model', default='resnet101', type=str, help='resnet101, resnet152')\r\n parser.add_argument('--model_root', default='./data/imagenet_weights', type=str, help='model root')\r\n\r\n args = parser.parse_args()\r\n params = vars(args) # convert to ordinary dict\r\n print('parsed input parameters:')\r\n print(json.dumps(params, indent = 2))\r\n main(params)\r\n" ]
[ [ "numpy.concatenate" ], [ "numpy.concatenate", "torch.no_grad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pjgrandinetti/mrsimulator
[ "01b447239d9f469df62b7293a74a3d0c34500a19", "01b447239d9f469df62b7293a74a3d0c34500a19" ]
[ "examples_source/1D_simulation(crystalline)/plot_6_coupled_spin_system.py", "tests/2D_spectrum_tests/test_ssb.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nCoupled spins 5/2-9/2 (Quad + J-coupling)\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n²⁷Al-⁹³Nb spin system spectrum.\n\"\"\"\n# %%\nimport matplotlib.pyplot as plt\n\nfrom mrsimulator import Simulator, SpinSystem\nfrom mrsimulator.methods import BlochDecayCTSpectrum\nfrom mrsimulator import signal_processing as sp\n\n# sphinx_gallery_thumbnail_number = 1\n\n# %%\n# **Spin System**\n#\n# Create a 27Al-93Nb coupled spin system.\nspin_system = SpinSystem(\n sites=[\n {\n \"isotope\": \"27Al\",\n \"isotropic_chemical_shift\": 0.0, # in ppm\n \"quadrupolar\": {\"Cq\": 5.0e6, \"eta\": 0.0}, # Cq is in Hz\n },\n {\n \"isotope\": \"93Nb\",\n \"isotropic_chemical_shift\": 0.0, # in ppm\n },\n ],\n couplings=[{\"site_index\": [0, 1], \"isotropic_j\": 200.0}], # j-coupling in Hz\n)\n\n# %%\n# **Method**\n#\n# Create a central transition selective Bloch decay spectrum method.\nmethod = BlochDecayCTSpectrum(\n channels=[\"27Al\"],\n magnetic_flux_density=9.4, # in T\n rotor_frequency=5e3, # in Hz\n spectral_dimensions=[\n {\n \"count\": 2048,\n \"spectral_width\": 4.0e4, # in Hz\n \"reference_offset\": -2e3, # in Hz\n }\n ],\n)\n\n# %%\n# **Simulator**\n#\n# Create the Simulator object and add the method and the spin system object.\nsim = Simulator()\nsim.spin_systems += [spin_system] # add the spin system\nsim.methods += [method] # add the method\nsim.run()\n\n# %%\n# **Post-Simulation Processing**\n#\n# Add post-simulation signal processing.\nprocessor = sp.SignalProcessor(\n operations=[\n sp.IFFT(),\n sp.apodization.Exponential(FWHM=\"30 Hz\"),\n sp.FFT(),\n ]\n)\nprocessed_data = processor.apply_operations(data=sim.methods[0].simulation)\n\n# %%\n# **Plot**\n#\n# The plot of the simulation before signal processing.\nplt.figure(figsize=(4.25, 3.0))\nax = plt.subplot(projection=\"csdm\")\nax.plot(processed_data.real, color=\"black\", linewidth=0.5)\nax.invert_xaxis()\nplt.tight_layout()\nplt.show()\n", "# -*- coding: utf-8 -*-\n\"\"\"Lineshape Test.\"\"\"\nimport numpy as np\nfrom mrsimulator import Simulator\nfrom mrsimulator import Site\nfrom mrsimulator import SpinSystem\nfrom mrsimulator.methods import BlochDecaySpectrum\nfrom mrsimulator.methods import Method2D\nfrom mrsimulator.methods import SSB2D\n\n\ndef SSB2D_setup(ist, vr, method_type):\n sites = [\n Site(\n isotope=ist,\n isotropic_chemical_shift=29,\n shielding_symmetric={\"zeta\": -70, \"eta\": 0.000},\n ),\n Site(\n isotope=ist,\n isotropic_chemical_shift=44,\n shielding_symmetric={\"zeta\": -96, \"eta\": 0.166},\n ),\n Site(\n isotope=ist,\n isotropic_chemical_shift=57,\n shielding_symmetric={\"zeta\": -120, \"eta\": 0.168},\n ),\n ]\n spin_systems = [SpinSystem(sites=[s]) for s in sites]\n\n B0 = 11.7\n if method_type == \"PASS\":\n method = SSB2D(\n channels=[ist],\n magnetic_flux_density=B0, # in T\n rotor_frequency=vr,\n spectral_dimensions=[\n {\n \"count\": 32,\n \"spectral_width\": 32 * vr, # in Hz\n \"label\": \"Anisotropic dimension\",\n },\n # The last spectral dimension block is the direct-dimension\n {\n \"count\": 2048,\n \"spectral_width\": 2e4, # in Hz\n \"reference_offset\": 5e3, # in Hz\n \"label\": \"Fast MAS dimension\",\n },\n ],\n )\n else:\n method = Method2D(\n channels=[ist],\n magnetic_flux_density=B0, # in T\n spectral_dimensions=[\n {\n \"count\": 64,\n \"spectral_width\": 8e4, # in Hz\n \"label\": \"Anisotropic dimension\",\n \"events\": [{\"rotor_angle\": 90 * 3.14159 / 180}],\n },\n # The last spectral dimension block is the direct-dimension\n {\n \"count\": 2048,\n \"spectral_width\": 2e4, # in Hz\n \"reference_offset\": 5e3, # in Hz\n \"label\": \"Fast MAS dimension\",\n },\n ],\n affine_matrix=[[1, -1], [0, 1]],\n )\n sim = Simulator()\n sim.spin_systems = spin_systems # add spin systems\n sim.methods = [method] # add the method.\n sim.run()\n\n data_ssb = sim.methods[0].simulation\n dim_ssb = data_ssb.x[0].coordinates.value\n\n if method_type == \"PASS\":\n bloch = BlochDecaySpectrum(\n channels=[ist],\n magnetic_flux_density=B0, # in T\n rotor_frequency=vr, # in Hz\n spectral_dimensions=[\n {\n \"count\": 32,\n \"spectral_width\": 32 * vr, # in Hz\n \"reference_offset\": 0, # in Hz\n \"label\": \"MAS dimension\",\n },\n ],\n )\n else:\n bloch = BlochDecaySpectrum(\n channels=[ist],\n magnetic_flux_density=B0, # in T\n rotor_frequency=vr, # in Hz\n rotor_angle=90 * 3.14159 / 180,\n spectral_dimensions=[\n {\n \"count\": 64,\n \"spectral_width\": 8e4, # in Hz\n \"reference_offset\": 0, # in Hz\n \"label\": \"MAS dimension\",\n },\n ],\n )\n\n for i in range(3):\n iso = spin_systems[i].sites[0].isotropic_chemical_shift\n sys = spin_systems[i].copy()\n sys.sites[0].isotropic_chemical_shift = 0\n sim2 = Simulator()\n sim2.spin_systems = [sys] # add spin systems\n sim2.methods = [bloch] # add the method.\n sim2.run()\n\n index = np.where(dim_ssb < iso)[0][-1]\n\n one_d_section = data_ssb.y[0].components[0][:, index]\n one_d_section /= one_d_section.max()\n\n one_d_sim = sim2.methods[0].simulation.y[0].components[0]\n one_d_sim /= one_d_sim.max()\n\n np.testing.assert_almost_equal(one_d_section, one_d_sim, decimal=6)\n\n\ndef test_01():\n SSB2D_setup(\"29Si\", 1500, \"PASS\")\n\n\ndef test_02():\n SSB2D_setup(\"31P\", 2500, \"PASS\")\n\n\ndef test_03():\n SSB2D_setup(\"29Si\", 1e9, \"MAF\")\n\n\ndef test_04():\n SSB2D_setup(\"31P\", 1e9, \"MAF\")\n" ]
[ [ "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ], [ "numpy.testing.assert_almost_equal", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
csm-adapt/karon
[ "988ef07eb9339b6dea3babd4f31337bce7b5d5e4" ]
[ "src/karon/tree/build.py" ]
[ "__all__ = [\"generate_tree\", \"from_parent\"]\n\n\nimport warnings\nimport numpy as np\n\n\ndef generate_tree(get_nodeid, get_parent, cmp=None):\n \"\"\"\n Defines the functions required to (a) extract a field from a\n node, (b) extract a field from a prospective parent node, and (c)\n compare the results to establish whether the prospective node\n is (`cmp` returns True) or is not (`cmp` returns False) the parent\n of the node.\n\n Example:\n\n def get_parent(node):\n return node.contents['parent name']\n def get_name(node):\n return node.contents['name']\n nodes = get_nodelist_from_file('foo.xlsx')\n tree = generate_tree(get_name, get_parent)(nodes)\n\n :param get_nodeid: Unary function that extracts a field from a Node object.\n :type get_nodeid: Unary function, signature: get_nodeid(Node).\n :param get_parent: Unary function that extracts a field from a Node object.\n :type get_parent: Unary function or None. If a unary function, the signature\n is get_parent(Node)\n :param cmp: (optional) Unary function that compares the results of\n parentID and nodeExtract. Returns True if the values match,\n False otherwise.\n :return: Unary function, signature: f(array-like-of-Nodes)\n \"\"\"\n\n def is_null(obj):\n try:\n return np.isnan(obj)\n except TypeError:\n return not bool(obj)\n\n def equal(lhs, rhs):\n if is_null(lhs) or is_null(rhs):\n return False\n else:\n return lhs == rhs\n\n def build(nodelist):\n \"\"\"\n Returns the parent of the node.\n :param nodelist: List of nodes to be used to build a tree\n :return:\n \"\"\"\n roots = []\n for node in nodelist:\n value = get_parent(node)\n # which nodes in \"nodelist\" are parents of \"node\"?\n parents = [n for n in nodelist if cmp(value, get_nodeid(n))]\n if len(parents) > 1:\n # TODO: Rather than return an error, compose a common parent\n # that combines properties from the matching parent\n # nodes. Along with the original child node\n # these matching parents become the children to\n # the common parent, thereby maintaining the single parent\n # required for a tree, but establishing a connection to\n # all matching parents.\n # COMPLICATIONS:\n # 1. What properties do the common parent have?\n # 2. How are the matching parent attributes combined\n # in the common parent? (list/append? reduce?)\n msg = f'{value} has more than one ({len(parents)}) matching '\\\n f'parent node: {[p.contents for p in parents]}'\n raise ValueError(msg)\n try:\n parent = parents[0]\n parent.add_child(node)\n except IndexError:\n # no parent found, therefore this node is a root node\n roots.append(node)\n return roots\n\n # handle positional parameters\n # handle optional parameters\n cmp = equal if cmp is None else cmp\n\n return build\n\n\ndef from_parent(nodes, get_key, get_parent):\n \"\"\"\n Builds up tree structures from a dictionary of nodes. The name of the\n parent node is given by `key` in the will-be child node contents, e.g.\n\n parent = node.contents[key]\n nodes[parent].add_child(node)\n\n Any node that does not specify a parent is the root node of its own\n tree.\n\n :param nodes: List of nodes that are to be structured into trees\n :type nodes: List-like.\n :param get_key: Gets the identifier for each node.\n :type get_key: Unary function or string. Unary function has the signature\n `get_key(node)` and returns a hashable object. If get_key is a string,\n returns node.contents[get_key].\n :param get_parent: Gets the identifier for the parent for each node.\n :type get_parent: Unary function or string. Unary function has the signature\n `get_parent(node)` and returns a hashable object. If get_parent is a\n string, returns node.contents[get_parent]. If no parent node is\n found, then returns None.\n :return: Constructed trees.\n :rtype: list\n \"\"\"\n # if get_key or get_parent are strings, then these will be interpretted\n # as the key in the Node.contents dictionary.\n if isinstance(get_key, str):\n key_str = get_key\n\n def get_key(node):\n return node.contents.get(key_str, None)\n\n if isinstance(get_parent, str):\n parent_str = get_parent\n\n def get_parent(node):\n return node.contents.get(parent_str, None)\n\n # construct a map of the nodes\n nodemap = {get_key(node): node for node in nodes}\n # construct the trees\n roots = []\n for key, node in iter(nodemap.items()):\n parent = get_parent(node)\n if parent is not None:\n try:\n nodemap[parent].add_child(node)\n except KeyError:\n warnings.warn(f\"{parent} was not found in the set of \"\n f\"nodes. Child will be treated as a root.\")\n roots.append(node)\n else:\n roots.append(node)\n return roots\n" ]
[ [ "numpy.isnan" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Qottmann/pennylane
[ "ba7cb0d27965bdb642d29648d70dd8246432eec0", "ba7cb0d27965bdb642d29648d70dd8246432eec0", "ba7cb0d27965bdb642d29648d70dd8246432eec0" ]
[ "pennylane/_qubit_device.py", "tests/test_prob.py", "tests/fourier/test_reconstruct.py" ]
[ "# Copyright 2018-2021 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis module contains the :class:`QubitDevice` abstract base class.\n\"\"\"\n\n# For now, arguments may be different from the signatures provided in Device\n# e.g. instead of expval(self, observable, wires, par) have expval(self, observable)\n# pylint: disable=arguments-differ, abstract-method, no-value-for-parameter,too-many-instance-attributes,too-many-branches, arguments-renamed\nimport abc\nfrom collections import OrderedDict\nimport itertools\nimport warnings\n\nimport numpy as np\n\nimport pennylane as qml\nfrom pennylane.operation import (\n Sample,\n Variance,\n Expectation,\n Probability,\n State,\n operation_derivative,\n)\nfrom pennylane import Device\nfrom pennylane.math import sum as qmlsum\nfrom pennylane.wires import Wires\n\nfrom pennylane.measure import MeasurementProcess\n\n\nclass QubitDevice(Device):\n \"\"\"Abstract base class for PennyLane qubit devices.\n\n The following abstract method **must** be defined:\n\n * :meth:`~.apply`: append circuit operations, compile the circuit (if applicable),\n and perform the quantum computation.\n\n Devices that generate their own samples (such as hardware) may optionally\n overwrite :meth:`~.probabilty`. This method otherwise automatically\n computes the probabilities from the generated samples, and **must**\n overwrite the following method:\n\n * :meth:`~.generate_samples`: Generate samples from the device from the\n exact or approximate probability distribution.\n\n Analytic devices **must** overwrite the following method:\n\n * :meth:`~.analytic_probability`: returns the probability or marginal probability from the\n device after circuit execution. :meth:`~.marginal_prob` may be used here.\n\n This device contains common utility methods for qubit-based devices. These\n do not need to be overwritten. Utility methods include:\n\n * :meth:`~.expval`, :meth:`~.var`, :meth:`~.sample`: return expectation values,\n variances, and samples of observables after the circuit has been rotated\n into the observable eigenbasis.\n\n Args:\n wires (int, Iterable[Number, str]]): Number of subsystems represented by the device,\n or iterable that contains unique labels for the subsystems as numbers (i.e., ``[-1, 0, 2]``)\n or strings (``['ancilla', 'q1', 'q2']``). Default 1 if not specified.\n shots (None, int, list[int]): Number of circuit evaluations/random samples used to estimate\n expectation values of observables. If ``None``, the device calculates probability, expectation values,\n and variances analytically. If an integer, it specifies the number of samples to estimate these quantities.\n If a list of integers is passed, the circuit evaluations are batched over the list of shots.\n cache (int): Number of device executions to store in a cache to speed up subsequent\n executions. A value of ``0`` indicates that no caching will take place. Once filled,\n older elements of the cache are removed and replaced with the most recent device\n executions to keep the cache up to date.\n \"\"\"\n\n # pylint: disable=too-many-public-methods\n C_DTYPE = np.complex128\n R_DTYPE = np.float64\n _asarray = staticmethod(np.asarray)\n _dot = staticmethod(np.dot)\n _abs = staticmethod(np.abs)\n _reduce_sum = staticmethod(lambda array, axes: np.sum(array, axis=tuple(axes)))\n _reshape = staticmethod(np.reshape)\n _flatten = staticmethod(lambda array: array.flatten())\n _gather = staticmethod(lambda array, indices: array[indices])\n _einsum = staticmethod(np.einsum)\n _cast = staticmethod(np.asarray)\n _transpose = staticmethod(np.transpose)\n _tensordot = staticmethod(np.tensordot)\n _conj = staticmethod(np.conj)\n _imag = staticmethod(np.imag)\n _roll = staticmethod(np.roll)\n _stack = staticmethod(np.stack)\n _outer = staticmethod(np.outer)\n _diag = staticmethod(np.diag)\n _real = staticmethod(np.real)\n\n @staticmethod\n def _scatter(indices, array, new_dimensions):\n new_array = np.zeros(new_dimensions, dtype=array.dtype.type)\n new_array[indices] = array\n return new_array\n\n observables = {\n \"PauliX\",\n \"PauliY\",\n \"PauliZ\",\n \"Hadamard\",\n \"Hermitian\",\n \"Identity\",\n \"Projector\",\n }\n\n def __init__(self, wires=1, shots=None, cache=0, analytic=None):\n super().__init__(wires=wires, shots=shots, analytic=analytic)\n\n self._samples = None\n \"\"\"None or array[int]: stores the samples generated by the device\n *after* rotation to diagonalize the observables.\"\"\"\n\n self._cache = cache\n \"\"\"int: Number of device executions to store in a cache to speed up subsequent\n executions. If set to zero, no caching occurs.\"\"\"\n\n self._cache_execute = OrderedDict()\n \"\"\"OrderedDict[int: Any]: Mapping from hashes of the circuit to results of executing the\n device.\"\"\"\n\n @classmethod\n def capabilities(cls):\n\n capabilities = super().capabilities().copy()\n capabilities.update(\n model=\"qubit\",\n supports_finite_shots=True,\n supports_tensor_observables=True,\n returns_probs=True,\n )\n return capabilities\n\n def reset(self):\n \"\"\"Reset the backend state.\n\n After the reset, the backend should be as if it was just constructed.\n Most importantly the quantum state is reset to its initial value.\n \"\"\"\n self._samples = None\n\n def execute(self, circuit, **kwargs):\n \"\"\"Execute a queue of quantum operations on the device and then\n measure the given observables.\n\n For plugin developers: instead of overwriting this, consider\n implementing a suitable subset of\n\n * :meth:`apply`\n\n * :meth:`~.generate_samples`\n\n * :meth:`~.probability`\n\n Additional keyword arguments may be passed to the this method\n that can be utilised by :meth:`apply`. An example would be passing\n the ``QNode`` hash that can be used later for parametric compilation.\n\n Args:\n circuit (~.CircuitGraph): circuit to execute on the device\n\n Raises:\n QuantumFunctionError: if the value of :attr:`~.Observable.return_type` is not supported\n\n Returns:\n array[float]: measured value(s)\n \"\"\"\n\n if self._cache:\n circuit_hash = circuit.graph.hash\n if circuit_hash in self._cache_execute:\n return self._cache_execute[circuit_hash]\n\n self.check_validity(circuit.operations, circuit.observables)\n\n # apply all circuit operations\n self.apply(circuit.operations, rotations=circuit.diagonalizing_gates, **kwargs)\n\n # generate computational basis samples\n if self.shots is not None or circuit.is_sampled:\n self._samples = self.generate_samples()\n\n multiple_sampled_jobs = circuit.is_sampled and self._has_partitioned_shots()\n\n # compute the required statistics\n if not self.analytic and self._shot_vector is not None:\n\n results = []\n s1 = 0\n\n for shot_tuple in self._shot_vector:\n s2 = s1 + np.prod(shot_tuple)\n r = self.statistics(\n circuit.observables, shot_range=[s1, s2], bin_size=shot_tuple.shots\n )\n\n if qml.math._multi_dispatch(r) == \"jax\": # pylint: disable=protected-access\n r = r[0]\n else:\n r = qml.math.squeeze(r)\n\n if shot_tuple.copies > 1:\n results.extend(r.T)\n else:\n results.append(r.T)\n\n s1 = s2\n\n if not multiple_sampled_jobs:\n # Can only stack single element outputs\n results = qml.math.stack(results)\n\n else:\n results = self.statistics(circuit.observables)\n\n if (circuit.all_sampled or not circuit.is_sampled) and not multiple_sampled_jobs:\n results = self._asarray(results)\n else:\n results = tuple(self._asarray(r) for r in results)\n\n if self._cache and circuit_hash not in self._cache_execute:\n self._cache_execute[circuit_hash] = results\n if len(self._cache_execute) > self._cache:\n self._cache_execute.popitem(last=False)\n\n # increment counter for number of executions of qubit device\n self._num_executions += 1\n\n if self.tracker.active:\n self.tracker.update(executions=1, shots=self._shots)\n self.tracker.record()\n\n return results\n\n @property\n def cache(self):\n \"\"\"int: Number of device executions to store in a cache to speed up subsequent\n executions. If set to zero, no caching occurs.\"\"\"\n return self._cache\n\n def batch_execute(self, circuits):\n \"\"\"Execute a batch of quantum circuits on the device.\n\n The circuits are represented by tapes, and they are executed one-by-one using the\n device's ``execute`` method. The results are collected in a list.\n\n For plugin developers: This function should be overwritten if the device can efficiently run multiple\n circuits on a backend, for example using parallel and/or asynchronous executions.\n\n Args:\n circuits (list[.tapes.QuantumTape]): circuits to execute on the device\n\n Returns:\n list[array[float]]: list of measured value(s)\n \"\"\"\n # TODO: This method and the tests can be globally implemented by Device\n # once it has the same signature in the execute() method\n\n results = []\n for circuit in circuits:\n # we need to reset the device here, else it will\n # not start the next computation in the zero state\n self.reset()\n\n res = self.execute(circuit)\n results.append(res)\n\n if self.tracker.active:\n self.tracker.update(batches=1, batch_len=len(circuits))\n self.tracker.record()\n\n return results\n\n @abc.abstractmethod\n def apply(self, operations, **kwargs):\n \"\"\"Apply quantum operations, rotate the circuit into the measurement\n basis, and compile and execute the quantum circuit.\n\n This method receives a list of quantum operations queued by the QNode,\n and should be responsible for:\n\n * Constructing the quantum program\n * (Optional) Rotating the quantum circuit using the rotation\n operations provided. This diagonalizes the circuit so that arbitrary\n observables can be measured in the computational basis.\n * Compile the circuit\n * Execute the quantum circuit\n\n Both arguments are provided as lists of PennyLane :class:`~.Operation`\n instances. Useful properties include :attr:`~.Operation.name`,\n :attr:`~.Operation.wires`, and :attr:`~.Operation.parameters`,\n and :attr:`~.Operation.inverse`:\n\n >>> op = qml.RX(0.2, wires=[0])\n >>> op.name # returns the operation name\n \"RX\"\n >>> op.wires # returns a Wires object representing the wires that the operation acts on\n <Wires = [0]>\n >>> op.parameters # returns a list of parameters\n [0.2]\n >>> op.inverse # check if the operation should be inverted\n False\n >>> op = qml.RX(0.2, wires=[0]).inv\n >>> op.inverse\n True\n\n Args:\n operations (list[~.Operation]): operations to apply to the device\n\n Keyword args:\n rotations (list[~.Operation]): operations that rotate the circuit\n pre-measurement into the eigenbasis of the observables.\n hash (int): the hash value of the circuit constructed by `CircuitGraph.hash`\n \"\"\"\n\n @staticmethod\n def active_wires(operators):\n \"\"\"Returns the wires acted on by a set of operators.\n\n Args:\n operators (list[~.Operation]): operators for which\n we are gathering the active wires\n\n Returns:\n Wires: wires activated by the specified operators\n \"\"\"\n list_of_wires = [op.wires for op in operators]\n\n return Wires.all_wires(list_of_wires)\n\n def statistics(self, observables, shot_range=None, bin_size=None):\n \"\"\"Process measurement results from circuit execution and return statistics.\n\n This includes returning expectation values, variance, samples, probabilities, states, and\n density matrices.\n\n Args:\n observables (List[.Observable]): the observables to be measured\n shot_range (tuple[int]): 2-tuple of integers specifying the range of samples\n to use. If not specified, all samples are used.\n bin_size (int): Divides the shot range into bins of size ``bin_size``, and\n returns the measurement statistic separately over each bin. If not\n provided, the entire shot range is treated as a single bin.\n\n Raises:\n QuantumFunctionError: if the value of :attr:`~.Observable.return_type` is not supported\n\n Returns:\n Union[float, List[float]]: the corresponding statistics\n\n .. UsageDetails::\n\n The ``shot_range`` and ``bin_size`` arguments allow for the statistics\n to be performed on only a subset of device samples. This finer level\n of control is accessible from the main UI by instantiating a device\n with a batch of shots.\n\n For example, consider the following device:\n\n >>> dev = qml.device(\"my_device\", shots=[5, (10, 3), 100])\n\n This device will execute QNodes using 135 shots, however\n measurement statistics will be **course grained** across these 135\n shots:\n\n * All measurement statistics will first be computed using the\n first 5 shots --- that is, ``shots_range=[0, 5]``, ``bin_size=5``.\n\n * Next, the tuple ``(10, 3)`` indicates 10 shots, repeated 3 times. We will want to use\n ``shot_range=[5, 35]``, performing the expectation value in bins of size 10\n (``bin_size=10``).\n\n * Finally, we repeat the measurement statistics for the final 100 shots,\n ``shot_range=[35, 135]``, ``bin_size=100``.\n \"\"\"\n results = []\n\n for obs in observables:\n # Pass instances directly\n if obs.return_type is Expectation:\n results.append(self.expval(obs, shot_range=shot_range, bin_size=bin_size))\n\n elif obs.return_type is Variance:\n results.append(self.var(obs, shot_range=shot_range, bin_size=bin_size))\n\n elif obs.return_type is Sample:\n results.append(self.sample(obs, shot_range=shot_range, bin_size=bin_size))\n\n elif obs.return_type is Probability:\n results.append(\n self.probability(wires=obs.wires, shot_range=shot_range, bin_size=bin_size)\n )\n\n elif obs.return_type is State:\n if len(observables) > 1:\n raise qml.QuantumFunctionError(\n \"The state or density matrix cannot be returned in combination\"\n \" with other return types\"\n )\n if self.wires.labels != tuple(range(self.num_wires)):\n raise qml.QuantumFunctionError(\n \"Returning the state is not supported when using custom wire labels\"\n )\n # Check if the state is accessible and decide to return the state or the density\n # matrix.\n results.append(self.access_state(wires=obs.wires))\n\n elif obs.return_type is not None:\n raise qml.QuantumFunctionError(\n f\"Unsupported return type specified for observable {obs.name}\"\n )\n\n return results\n\n def access_state(self, wires=None):\n \"\"\"Check that the device has access to an internal state and return it if available.\n\n Args:\n wires (Wires): wires of the reduced system\n\n Raises:\n QuantumFunctionError: if the device is not capable of returning the state\n\n Returns:\n array or tensor: the state or the density matrix of the device\n \"\"\"\n if not self.capabilities().get(\"returns_state\"):\n raise qml.QuantumFunctionError(\n \"The current device is not capable of returning the state\"\n )\n\n state = getattr(self, \"state\", None)\n\n if state is None:\n raise qml.QuantumFunctionError(\"The state is not available in the current device\")\n\n if wires:\n density_matrix = self.density_matrix(wires)\n return density_matrix\n\n return state\n\n def generate_samples(self):\n r\"\"\"Returns the computational basis samples generated for all wires.\n\n Note that PennyLane uses the convention :math:`|q_0,q_1,\\dots,q_{N-1}\\rangle` where\n :math:`q_0` is the most significant bit.\n\n .. warning::\n\n This method should be overwritten on devices that\n generate their own computational basis samples, with the resulting\n computational basis samples stored as ``self._samples``.\n\n Returns:\n array[complex]: array of samples in the shape ``(dev.shots, dev.num_wires)``\n \"\"\"\n number_of_states = 2 ** self.num_wires\n\n rotated_prob = self.analytic_probability()\n\n samples = self.sample_basis_states(number_of_states, rotated_prob)\n return QubitDevice.states_to_binary(samples, self.num_wires)\n\n def sample_basis_states(self, number_of_states, state_probability):\n \"\"\"Sample from the computational basis states based on the state\n probability.\n\n This is an auxiliary method to the generate_samples method.\n\n Args:\n number_of_states (int): the number of basis states to sample from\n state_probability (array[float]): the computational basis probability vector\n\n Returns:\n array[int]: the sampled basis states\n \"\"\"\n if self.shots is None:\n\n raise qml.QuantumFunctionError(\n \"The number of shots has to be explicitly set on the device \"\n \"when using sample-based measurements.\"\n )\n\n shots = self.shots\n\n basis_states = np.arange(number_of_states)\n return np.random.choice(basis_states, shots, p=state_probability)\n\n @staticmethod\n def generate_basis_states(num_wires, dtype=np.uint32):\n \"\"\"\n Generates basis states in binary representation according to the number\n of wires specified.\n\n The states_to_binary method creates basis states faster (for larger\n systems at times over x25 times faster) than the approach using\n ``itertools.product``, at the expense of using slightly more memory.\n\n Due to the large size of the integer arrays for more than 32 bits,\n memory allocation errors may arise in the states_to_binary method.\n Hence we constraint the dtype of the array to represent unsigned\n integers on 32 bits. Due to this constraint, an overflow occurs for 32\n or more wires, therefore this approach is used only for fewer wires.\n\n For smaller number of wires speed is comparable to the next approach\n (using ``itertools.product``), hence we resort to that one for testing\n purposes.\n\n Args:\n num_wires (int): the number wires\n dtype=np.uint32 (type): the data type of the arrays to use\n\n Returns:\n array[int]: the sampled basis states\n \"\"\"\n if 2 < num_wires < 32:\n states_base_ten = np.arange(2 ** num_wires, dtype=dtype)\n return QubitDevice.states_to_binary(states_base_ten, num_wires, dtype=dtype)\n\n # A slower, but less memory intensive method\n basis_states_generator = itertools.product((0, 1), repeat=num_wires)\n return np.fromiter(itertools.chain(*basis_states_generator), dtype=int).reshape(\n -1, num_wires\n )\n\n @staticmethod\n def states_to_binary(samples, num_wires, dtype=np.int64):\n \"\"\"Convert basis states from base 10 to binary representation.\n\n This is an auxiliary method to the generate_samples method.\n\n Args:\n samples (array[int]): samples of basis states in base 10 representation\n num_wires (int): the number of qubits\n dtype (type): Type of the internal integer array to be used. Can be\n important to specify for large systems for memory allocation\n purposes.\n\n Returns:\n array[int]: basis states in binary representation\n \"\"\"\n powers_of_two = 1 << np.arange(num_wires, dtype=dtype)\n states_sampled_base_ten = samples[:, None] & powers_of_two\n return (states_sampled_base_ten > 0).astype(dtype)[:, ::-1]\n\n @property\n def circuit_hash(self):\n \"\"\"The hash of the circuit upon the last execution.\n\n This can be used by devices in :meth:`~.apply` for parametric compilation.\n \"\"\"\n raise NotImplementedError\n\n @property\n def state(self):\n \"\"\"Returns the state vector of the circuit prior to measurement.\n\n .. note::\n\n Only state vector simulators support this property. Please see the\n plugin documentation for more details.\n \"\"\"\n raise NotImplementedError\n\n def density_matrix(self, wires):\n \"\"\"Returns the reduced density matrix prior to measurement.\n\n .. note::\n\n Only state vector simulators support this property. Please see the\n plugin documentation for more details.\n \"\"\"\n raise NotImplementedError\n\n def analytic_probability(self, wires=None):\n r\"\"\"Return the (marginal) probability of each computational basis\n state from the last run of the device.\n\n PennyLane uses the convention\n :math:`|q_0,q_1,\\dots,q_{N-1}\\rangle` where :math:`q_0` is the most\n significant bit.\n\n If no wires are specified, then all the basis states representable by\n the device are considered and no marginalization takes place.\n\n .. note::\n\n :meth:`marginal_prob` may be used as a utility method\n to calculate the marginal probability distribution.\n\n Args:\n wires (Iterable[Number, str], Number, str, Wires): wires to return\n marginal probabilities for. Wires not provided are traced out of the system.\n\n Returns:\n array[float]: list of the probabilities\n \"\"\"\n raise NotImplementedError\n\n def estimate_probability(self, wires=None, shot_range=None, bin_size=None):\n \"\"\"Return the estimated probability of each computational basis state\n using the generated samples.\n\n Args:\n wires (Iterable[Number, str], Number, str, Wires): wires to calculate\n marginal probabilities for. Wires not provided are traced out of the system.\n shot_range (tuple[int]): 2-tuple of integers specifying the range of samples\n to use. If not specified, all samples are used.\n bin_size (int): Divides the shot range into bins of size ``bin_size``, and\n returns the measurement statistic separately over each bin. If not\n provided, the entire shot range is treated as a single bin.\n\n Returns:\n array[float]: list of the probabilities\n \"\"\"\n\n wires = wires or self.wires\n # convert to a wires object\n wires = Wires(wires)\n # translate to wire labels used by device\n device_wires = self.map_wires(wires)\n\n sample_slice = Ellipsis if shot_range is None else slice(*shot_range)\n samples = self._samples[sample_slice, device_wires]\n\n # convert samples from a list of 0, 1 integers, to base 10 representation\n powers_of_two = 2 ** np.arange(len(device_wires))[::-1]\n indices = samples @ powers_of_two\n\n # count the basis state occurrences, and construct the probability vector\n if bin_size is not None:\n bins = len(samples) // bin_size\n\n indices = indices.reshape((bins, -1))\n prob = np.zeros([2 ** len(device_wires), bins], dtype=np.float64)\n\n # count the basis state occurrences, and construct the probability vector\n for b, idx in enumerate(indices):\n basis_states, counts = np.unique(idx, return_counts=True)\n prob[basis_states, b] = counts / bin_size\n\n else:\n basis_states, counts = np.unique(indices, return_counts=True)\n prob = np.zeros([2 ** len(device_wires)], dtype=np.float64)\n prob[basis_states] = counts / len(samples)\n\n return self._asarray(prob, dtype=self.R_DTYPE)\n\n def probability(self, wires=None, shot_range=None, bin_size=None):\n \"\"\"Return either the analytic probability or estimated probability of\n each computational basis state.\n\n Devices that require a finite number of shots always return the\n estimated probability.\n\n Args:\n wires (Iterable[Number, str], Number, str, Wires): wires to return\n marginal probabilities for. Wires not provided are traced out of the system.\n\n Returns:\n array[float]: list of the probabilities\n \"\"\"\n\n if self.shots is None:\n return self.analytic_probability(wires=wires)\n\n return self.estimate_probability(wires=wires, shot_range=shot_range, bin_size=bin_size)\n\n def marginal_prob(self, prob, wires=None):\n r\"\"\"Return the marginal probability of the computational basis\n states by summing the probabiliites on the non-specified wires.\n\n If no wires are specified, then all the basis states representable by\n the device are considered and no marginalization takes place.\n\n .. note::\n\n If the provided wires are not in the order as they appear on the device,\n the returned marginal probabilities take this permutation into account.\n\n For example, if the addressable wires on this device are ``Wires([0, 1, 2])`` and\n this function gets passed ``wires=[2, 0]``, then the returned marginal\n probability vector will take this 'reversal' of the two wires\n into account:\n\n .. math::\n\n \\mathbb{P}^{(2, 0)}\n = \\left[\n |00\\rangle, |10\\rangle, |01\\rangle, |11\\rangle\n \\right]\n\n Args:\n prob: The probabilities to return the marginal probabilities\n for\n wires (Iterable[Number, str], Number, str, Wires): wires to return\n marginal probabilities for. Wires not provided\n are traced out of the system.\n\n Returns:\n array[float]: array of the resulting marginal probabilities.\n \"\"\"\n\n if wires is None:\n # no need to marginalize\n return prob\n\n wires = Wires(wires)\n # determine which subsystems are to be summed over\n inactive_wires = Wires.unique_wires([self.wires, wires])\n\n # translate to wire labels used by device\n device_wires = self.map_wires(wires)\n inactive_device_wires = self.map_wires(inactive_wires)\n\n # reshape the probability so that each axis corresponds to a wire\n prob = self._reshape(prob, [2] * self.num_wires)\n\n # sum over all inactive wires\n # hotfix to catch when default.qubit uses this method\n # since then device_wires is a list\n if isinstance(inactive_device_wires, Wires):\n prob = self._flatten(self._reduce_sum(prob, inactive_device_wires.labels))\n else:\n prob = self._flatten(self._reduce_sum(prob, inactive_device_wires))\n\n # The wires provided might not be in consecutive order (i.e., wires might be [2, 0]).\n # If this is the case, we must permute the marginalized probability so that\n # it corresponds to the orders of the wires passed.\n num_wires = len(device_wires)\n basis_states = self.generate_basis_states(num_wires)\n basis_states = basis_states[:, np.argsort(np.argsort(device_wires))]\n\n powers_of_two = 2 ** np.arange(len(device_wires))[::-1]\n perm = basis_states @ powers_of_two\n return self._gather(prob, perm)\n\n def expval(self, observable, shot_range=None, bin_size=None):\n\n if observable.name == \"Projector\":\n # branch specifically to handle the projector observable\n idx = int(\"\".join(str(i) for i in observable.parameters[0]), 2)\n probs = self.probability(\n wires=observable.wires, shot_range=shot_range, bin_size=bin_size\n )\n return probs[idx]\n\n # exact expectation value\n if self.shots is None:\n try:\n eigvals = self._asarray(observable.eigvals, dtype=self.R_DTYPE)\n except NotImplementedError as e:\n raise ValueError(\n f\"Cannot compute analytic expectations of {observable.name}.\"\n ) from e\n\n prob = self.probability(wires=observable.wires)\n return self._dot(eigvals, prob)\n\n # estimate the ev\n samples = self.sample(observable, shot_range=shot_range, bin_size=bin_size)\n return np.squeeze(np.mean(samples, axis=0))\n\n def var(self, observable, shot_range=None, bin_size=None):\n\n if observable.name == \"Projector\":\n # branch specifically to handle the projector observable\n idx = int(\"\".join(str(i) for i in observable.parameters[0]), 2)\n probs = self.probability(\n wires=observable.wires, shot_range=shot_range, bin_size=bin_size\n )\n return probs[idx] - probs[idx] ** 2\n\n # exact variance value\n if self.shots is None:\n try:\n eigvals = self._asarray(observable.eigvals, dtype=self.R_DTYPE)\n except NotImplementedError as e:\n # if observable has no info on eigenvalues, we cannot return this measurement\n raise ValueError(f\"Cannot compute analytic variance of {observable.name}.\") from e\n prob = self.probability(wires=observable.wires)\n return self._dot((eigvals ** 2), prob) - self._dot(eigvals, prob) ** 2\n\n # estimate the variance\n samples = self.sample(observable, shot_range=shot_range, bin_size=bin_size)\n return np.squeeze(np.var(samples, axis=0))\n\n def sample(self, observable, shot_range=None, bin_size=None):\n\n # translate to wire labels used by device\n device_wires = self.map_wires(observable.wires)\n name = observable.name\n sample_slice = Ellipsis if shot_range is None else slice(*shot_range)\n\n if isinstance(name, str) and name in {\"PauliX\", \"PauliY\", \"PauliZ\", \"Hadamard\"}:\n # Process samples for observables with eigenvalues {1, -1}\n samples = 1 - 2 * self._samples[sample_slice, device_wires[0]]\n\n elif isinstance(\n observable, MeasurementProcess\n ): # if no observable was provided then return the raw samples\n if (\n len(observable.wires) != 0\n ): # if wires are provided, then we only return samples from those wires\n samples = self._samples[sample_slice, np.array(device_wires)]\n else:\n samples = self._samples[sample_slice]\n\n else:\n\n # Replace the basis state in the computational basis with the correct eigenvalue.\n # Extract only the columns of the basis samples required based on ``wires``.\n samples = self._samples[\n sample_slice, np.array(device_wires)\n ] # Add np.array here for Jax support.\n powers_of_two = 2 ** np.arange(samples.shape[-1])[::-1]\n indices = samples @ powers_of_two\n try:\n samples = observable.eigvals[indices]\n except NotImplementedError as e:\n # if observable has no info on eigenvalues, we cannot return this measurement\n raise ValueError(f\"Cannot compute samples of {observable.name}.\") from e\n\n if bin_size is None:\n return samples\n\n return samples.reshape((bin_size, -1))\n\n def adjoint_jacobian(self, tape, starting_state=None, use_device_state=False):\n \"\"\"Implements the adjoint method outlined in\n `Jones and Gacon <https://arxiv.org/abs/2009.02823>`__ to differentiate an input tape.\n\n After a forward pass, the circuit is reversed by iteratively applying inverse (adjoint)\n gates to scan backwards through the circuit.\n\n .. note::\n The adjoint differentiation method has the following restrictions:\n\n * As it requires knowledge of the statevector, only statevector simulator devices can be\n used.\n\n * Only expectation values are supported as measurements.\n\n * Does not work for Hamiltonian observables.\n\n Args:\n tape (.QuantumTape): circuit that the function takes the gradient of\n\n Keyword Args:\n starting_state (tensor_like): post-forward pass state to start execution with. It should be\n complex-valued. Takes precedence over ``use_device_state``.\n use_device_state (bool): use current device state to initialize. A forward pass of the same\n circuit should be the last thing the device has executed. If a ``starting_state`` is\n provided, that takes precedence.\n\n Returns:\n array: the derivative of the tape with respect to trainable parameters.\n Dimensions are ``(len(observables), len(trainable_params))``.\n\n Raises:\n QuantumFunctionError: if the input tape has measurements that are not expectation values\n or contains a multi-parameter operation aside from :class:`~.Rot`\n \"\"\"\n # broadcasted inner product not summing over first dimension of b\n sum_axes = tuple(range(1, self.num_wires + 1))\n dot_product_real = lambda b, k: self._real(qmlsum(self._conj(b) * k, axis=sum_axes))\n\n for m in tape.measurements:\n if m.return_type is not qml.operation.Expectation:\n raise qml.QuantumFunctionError(\n \"Adjoint differentiation method does not support\"\n f\" measurement {m.return_type.value}\"\n )\n\n if m.obs.name == \"Hamiltonian\":\n raise qml.QuantumFunctionError(\n \"Adjoint differentiation method does not support Hamiltonian observables.\"\n )\n\n if not hasattr(m.obs, \"base_name\"):\n m.obs.base_name = None # This is needed for when the observable is a tensor product\n\n if self.shots is not None:\n warnings.warn(\n \"Requested adjoint differentiation to be computed with finite shots.\"\n \" The derivative is always exact when using the adjoint differentiation method.\",\n UserWarning,\n )\n\n # Initialization of state\n if starting_state is not None:\n ket = self._reshape(starting_state, [2] * self.num_wires)\n else:\n if not use_device_state:\n self.reset()\n self.execute(tape)\n ket = self._pre_rotated_state\n\n n_obs = len(tape.observables)\n bras = np.empty([n_obs] + [2] * self.num_wires, dtype=np.complex128)\n for kk in range(n_obs):\n bras[kk, ...] = self._apply_operation(ket, tape.observables[kk])\n\n expanded_ops = []\n for op in reversed(tape.operations):\n if op.num_params > 1:\n if isinstance(op, qml.Rot) and not op.inverse:\n ops = op.decompose()\n expanded_ops.extend(reversed(ops))\n else:\n raise qml.QuantumFunctionError(\n f\"The {op.name} operation is not supported using \"\n 'the \"adjoint\" differentiation method'\n )\n else:\n if op.name not in (\"QubitStateVector\", \"BasisState\"):\n expanded_ops.append(op)\n\n jac = np.zeros((len(tape.observables), len(tape.trainable_params)))\n\n param_number = len(tape._par_info) - 1 # pylint: disable=protected-access\n trainable_param_number = len(tape.trainable_params) - 1\n for op in expanded_ops:\n\n if (op.grad_method is not None) and (param_number in tape.trainable_params):\n d_op_matrix = operation_derivative(op)\n\n op.inv()\n # Ideally use use op.adjoint() here\n # then we don't have to re-invert the operation at the end\n ket = self._apply_operation(ket, op)\n\n if op.grad_method is not None:\n if param_number in tape.trainable_params:\n\n ket_temp = self._apply_unitary(ket, d_op_matrix, op.wires)\n\n jac[:, trainable_param_number] = 2 * dot_product_real(bras, ket_temp)\n\n trainable_param_number -= 1\n param_number -= 1\n\n for kk in range(n_obs):\n bras[kk, ...] = self._apply_operation(bras[kk, ...], op)\n op.inv()\n\n return jac\n", "# Copyright 2018-2020 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nUnit tests for the PennyLane qml.probs() measurement function.\n\"\"\"\nimport pytest\n\nimport numpy as np\nimport pennylane as qml\n\n\n# make the test deterministic\nnp.random.seed(42)\n\n\[email protected]\ndef init_state(scope=\"session\"):\n \"\"\"Fixture that creates an initial state\"\"\"\n\n def _init_state(n):\n \"\"\"An initial state over n wires\"\"\"\n state = np.random.random([2 ** n]) + np.random.random([2 ** n]) * 1j\n state /= np.linalg.norm(state)\n return state\n\n return _init_state\n\n\ndef test_full_prob(init_state, tol):\n \"\"\"Test that the correct probability is returned.\"\"\"\n dev = qml.device(\"default.qubit\", wires=4)\n state = init_state(4)\n\n @qml.qnode(dev)\n def circuit():\n qml.QubitStateVector(state, wires=list(range(4)))\n return qml.probs(wires=range(4))\n\n res = circuit()\n expected = np.abs(state) ** 2\n assert np.allclose(res, expected, atol=tol, rtol=0)\n\n\ndef test_marginal_prob(init_state, tol):\n \"\"\"Test that the correct marginal probability is returned.\"\"\"\n dev = qml.device(\"default.qubit\", wires=4)\n state = init_state(4)\n\n @qml.qnode(dev)\n def circuit():\n qml.QubitStateVector(state, wires=list(range(4)))\n return qml.probs(wires=[1, 3])\n\n res = circuit()\n expected = np.reshape(np.abs(state) ** 2, [2] * 4)\n expected = np.einsum(\"ijkl->jl\", expected).flatten()\n assert np.allclose(res, expected, atol=tol, rtol=0)\n\n\ndef test_marginal_prob_more_wires(init_state, mocker, tol):\n \"\"\"Test that the correct marginal probability is returned, when the\n states_to_binary method is used for probability computations.\"\"\"\n dev = qml.device(\"default.qubit\", wires=4)\n state = init_state(4)\n\n spy = mocker.spy(qml.QubitDevice, \"states_to_binary\")\n\n @qml.qnode(dev)\n def circuit():\n qml.QubitStateVector(state, wires=list(range(4)))\n return qml.probs(wires=[1, 0, 3]) # <--- more than 2 wires: states_to_binary used\n\n res = circuit()\n\n expected = np.reshape(np.abs(state) ** 2, [2] * 4)\n expected = np.einsum(\"ijkl->jil\", expected).flatten()\n assert np.allclose(res, expected, atol=tol, rtol=0)\n\n spy.assert_called_once()\n\n\ndef test_integration(tol):\n \"\"\"Test the probability is correct for a known state preparation.\"\"\"\n dev = qml.device(\"default.qubit\", wires=2)\n\n @qml.qnode(dev)\n def circuit():\n qml.Hadamard(wires=1)\n qml.CNOT(wires=[0, 1])\n return qml.probs(wires=[0, 1])\n\n # expected probability, using [00, 01, 10, 11]\n # ordering, is [0.5, 0.5, 0, 0]\n\n res = circuit()\n expected = np.array([0.5, 0.5, 0, 0])\n assert np.allclose(res, expected, atol=tol, rtol=0)\n\n\ndef test_integration_analytic_false(tol):\n \"\"\"Test the probability is correct for a known state preparation when the\n analytic attribute is set to False.\"\"\"\n dev = qml.device(\"default.qubit\", wires=3, shots=1000)\n\n @qml.qnode(dev)\n def circuit():\n qml.PauliX(0)\n return qml.probs(wires=[0])\n\n res = circuit()\n expected = np.array([0, 1])\n assert np.allclose(res, expected, atol=tol, rtol=0)\n\n\ndef test_numerical_analytic_diff_agree(init_state, tol):\n \"\"\"Test that the finite difference and parameter shift rule\n provide the same Jacobian.\"\"\"\n w = 4\n dev = qml.device(\"default.qubit\", wires=w)\n state = init_state(w)\n\n def circuit(x, y, z):\n for i in range(w):\n qml.RX(x, wires=i)\n qml.PhaseShift(z, wires=i)\n qml.RY(y, wires=i)\n\n qml.CNOT(wires=[0, 1])\n qml.CNOT(wires=[1, 2])\n qml.CNOT(wires=[2, 3])\n\n return qml.probs(wires=[1, 3])\n\n params = [0.543, -0.765, -0.3]\n\n circuit_F = qml.QNode(circuit, dev, diff_method=\"finite-diff\")\n circuit_A = qml.QNode(circuit, dev, diff_method=\"parameter-shift\")\n res_F = qml.jacobian(circuit_F)(*params)\n res_A = qml.jacobian(circuit_A)(*params)\n\n # Both jacobians should be of shape (2**prob.wires, num_params)\n assert isinstance(res_F, tuple) and len(res_F) == 3\n assert all(_r.shape == (2 ** 2,) for _r in res_F)\n assert isinstance(res_A, tuple) and len(res_A) == 3\n assert all(_r.shape == (2 ** 2,) for _r in res_A)\n\n # Check that they agree up to numeric tolerance\n assert all(np.allclose(_rF, _rA, atol=tol, rtol=0) for _rF, _rA in zip(res_F, res_A))\n\n\[email protected](\"hermitian\", [1 / np.sqrt(2) * np.array([[1, 1], [1, -1]])])\ndef test_prob_generalize_param_one_qubit(hermitian, init_state, tol):\n \"\"\"Test that the correct probability is returned.\"\"\"\n dev = qml.device(\"default.qubit\", wires=1)\n\n @qml.qnode(dev)\n def circuit(x):\n qml.RZ(x, wires=0)\n return qml.probs(op=qml.Hermitian(hermitian, wires=0))\n\n res = circuit(0.56)\n\n def circuit_rotated(x):\n qml.RZ(x, wires=0)\n qml.Hermitian(hermitian, wires=0).diagonalizing_gates()\n\n state = np.array([1, 0])\n matrix = qml.transforms.get_unitary_matrix(circuit_rotated)(0.56)\n state = np.dot(matrix, state)\n expected = np.reshape(np.abs(state) ** 2, [2] * 1)\n expected = expected.flatten()\n\n assert np.allclose(res, expected, atol=tol, rtol=0)\n\n\[email protected](\"hermitian\", [1 / np.sqrt(2) * np.array([[1, 1], [1, -1]])])\ndef test_prob_generalize_param(hermitian, init_state, tol):\n \"\"\"Test that the correct probability is returned.\"\"\"\n dev = qml.device(\"default.qubit\", wires=3)\n\n @qml.qnode(dev)\n def circuit(x, y):\n qml.RZ(x, wires=0)\n qml.CNOT(wires=[0, 1])\n qml.RY(y, wires=1)\n qml.CNOT(wires=[0, 2])\n return qml.probs(op=qml.Hermitian(hermitian, wires=0))\n\n res = circuit(0.56, 0.1)\n\n def circuit_rotated(x, y):\n qml.RZ(x, wires=0)\n qml.CNOT(wires=[0, 1])\n qml.RY(y, wires=1)\n qml.CNOT(wires=[0, 2])\n qml.Hermitian(hermitian, wires=0).diagonalizing_gates()\n\n state = np.array([1, 0, 0, 0, 0, 0, 0, 0])\n matrix = qml.transforms.get_unitary_matrix(circuit_rotated)(0.56, 0.1)\n state = np.dot(matrix, state)\n expected = np.reshape(np.abs(state) ** 2, [2] * 3)\n expected = np.einsum(\"ijk->i\", expected).flatten()\n assert np.allclose(res, expected, atol=tol, rtol=0)\n\n\[email protected](\"hermitian\", [1 / np.sqrt(2) * np.array([[1, 1], [1, -1]])])\ndef test_prob_generalize_param_multiple(hermitian, init_state, tol):\n \"\"\"Test that the correct probability is returned.\"\"\"\n dev = qml.device(\"default.qubit\", wires=3)\n\n @qml.qnode(dev)\n def circuit(x, y):\n qml.RZ(x, wires=0)\n qml.CNOT(wires=[0, 1])\n qml.RY(y, wires=1)\n qml.CNOT(wires=[0, 2])\n return (\n qml.probs(op=qml.Hermitian(hermitian, wires=0)),\n qml.probs(wires=[1]),\n qml.probs(wires=[2]),\n )\n\n res = circuit(0.56, 0.1)\n res = np.reshape(res, (3, 2))\n\n def circuit_rotated(x, y):\n qml.RZ(x, wires=0)\n qml.CNOT(wires=[0, 1])\n qml.RY(y, wires=1)\n qml.CNOT(wires=[0, 2])\n qml.Hermitian(hermitian, wires=0).diagonalizing_gates()\n\n state = np.array([1, 0, 0, 0, 0, 0, 0, 0])\n matrix = qml.transforms.get_unitary_matrix(circuit_rotated)(0.56, 0.1)\n state = np.dot(matrix, state)\n\n expected = np.reshape(np.abs(state) ** 2, [2] * 3)\n expected_0 = np.einsum(\"ijk->i\", expected).flatten()\n expected_1 = np.einsum(\"ijk->j\", expected).flatten()\n expected_2 = np.einsum(\"ijk->k\", expected).flatten()\n\n assert np.allclose(res[0], expected_0, atol=tol, rtol=0)\n assert np.allclose(res[1], expected_1, atol=tol, rtol=0)\n assert np.allclose(res[2], expected_2, atol=tol, rtol=0)\n\n\[email protected](\"hermitian\", [1 / np.sqrt(2) * np.array([[1, 1], [1, -1]])])\[email protected](\"wire\", [0, 1, 2, 3])\ndef test_prob_generalize_initial_state(hermitian, wire, init_state, tol):\n \"\"\"Test that the correct probability is returned.\"\"\"\n dev = qml.device(\"default.qubit\", wires=4)\n state = init_state(4)\n\n @qml.qnode(dev)\n def circuit():\n qml.QubitStateVector(state, wires=list(range(4)))\n qml.PauliX(wires=0)\n qml.PauliX(wires=1)\n qml.PauliX(wires=2)\n qml.PauliX(wires=3)\n return qml.probs(op=qml.Hermitian(hermitian, wires=wire))\n\n res = circuit()\n\n def circuit_rotated():\n qml.PauliX(wires=0)\n qml.PauliX(wires=1)\n qml.PauliX(wires=2)\n qml.PauliX(wires=3)\n qml.Hermitian(hermitian, wires=wire).diagonalizing_gates()\n\n matrix = qml.transforms.get_unitary_matrix(circuit_rotated)()\n state = np.dot(matrix, state)\n expected = np.reshape(np.abs(state) ** 2, [2] * 4)\n\n if wire == 0:\n expected = np.einsum(\"ijkl->i\", expected).flatten()\n elif wire == 1:\n expected = np.einsum(\"ijkl->j\", expected).flatten()\n elif wire == 2:\n expected = np.einsum(\"ijkl->k\", expected).flatten()\n elif wire == 3:\n expected = np.einsum(\"ijkl->l\", expected).flatten()\n\n assert np.allclose(res, expected, atol=tol, rtol=0)\n\n\[email protected](\"operation\", [qml.PauliX, qml.PauliY, qml.Hadamard])\[email protected](\"wire\", [0, 1, 2, 3])\ndef test_operation_prob(operation, wire, init_state, tol):\n \"Test the rotated probability with different wires and rotating operations.\"\n dev = qml.device(\"default.qubit\", wires=4)\n state = init_state(4)\n\n @qml.qnode(dev)\n def circuit():\n qml.QubitStateVector(state, wires=list(range(4)))\n qml.PauliX(wires=0)\n qml.PauliZ(wires=1)\n qml.PauliY(wires=2)\n qml.PauliZ(wires=3)\n return qml.probs(op=operation(wires=wire))\n\n res = circuit()\n\n def circuit_rotated():\n qml.PauliX(wires=0)\n qml.PauliZ(wires=1)\n qml.PauliY(wires=2)\n qml.PauliZ(wires=3)\n operation(wires=wire).diagonalizing_gates()\n\n matrix = qml.transforms.get_unitary_matrix(circuit_rotated)()\n state = np.dot(matrix, state)\n expected = np.reshape(np.abs(state) ** 2, [2] * 4)\n\n if wire == 0:\n expected = np.einsum(\"ijkl->i\", expected).flatten()\n elif wire == 1:\n expected = np.einsum(\"ijkl->j\", expected).flatten()\n elif wire == 2:\n expected = np.einsum(\"ijkl->k\", expected).flatten()\n elif wire == 3:\n expected = np.einsum(\"ijkl->l\", expected).flatten()\n\n assert np.allclose(res, expected, atol=tol, rtol=0)\n\n\[email protected](\"operation\", [qml.PauliX, qml.PauliY, qml.Hadamard])\[email protected](\"wire\", [0, 1, 2, 3])\ndef test_operation_prob(operation, wire, init_state, tol):\n \"Test the rotated probability with different wires and rotating operations.\"\n dev = qml.device(\"default.qubit\", wires=4)\n state = init_state(4)\n\n @qml.qnode(dev)\n def circuit():\n qml.QubitStateVector(state, wires=list(range(4)))\n qml.PauliX(wires=0)\n qml.PauliZ(wires=1)\n qml.PauliY(wires=2)\n qml.PauliZ(wires=3)\n return qml.probs(op=operation(wires=wire))\n\n res = circuit()\n\n def circuit_rotated():\n qml.PauliX(wires=0)\n qml.PauliZ(wires=1)\n qml.PauliY(wires=2)\n qml.PauliZ(wires=3)\n operation(wires=wire).diagonalizing_gates()\n\n matrix = qml.transforms.get_unitary_matrix(circuit_rotated)()\n state = np.dot(matrix, state)\n expected = np.reshape(np.abs(state) ** 2, [2] * 4)\n\n if wire == 0:\n expected = np.einsum(\"ijkl->i\", expected).flatten()\n elif wire == 1:\n expected = np.einsum(\"ijkl->j\", expected).flatten()\n elif wire == 2:\n expected = np.einsum(\"ijkl->k\", expected).flatten()\n elif wire == 3:\n expected = np.einsum(\"ijkl->l\", expected).flatten()\n\n assert np.allclose(res, expected, atol=tol, rtol=0)\n\n\[email protected](\"observable\", [(qml.PauliX, qml.PauliY)])\ndef test_observable_tensor_prob(observable, init_state, tol):\n \"Test the rotated probability with a tensor observable.\"\n dev = qml.device(\"default.qubit\", wires=4)\n state = init_state(4)\n\n @qml.qnode(dev)\n def circuit():\n qml.QubitStateVector(state, wires=list(range(4)))\n qml.PauliX(wires=0)\n qml.PauliZ(wires=1)\n qml.PauliY(wires=2)\n qml.PauliZ(wires=3)\n return qml.probs(op=observable[0](wires=0) @ observable[1](wires=1))\n\n res = circuit()\n\n def circuit_rotated():\n qml.PauliX(wires=0)\n qml.PauliZ(wires=1)\n qml.PauliY(wires=2)\n qml.PauliZ(wires=3)\n observable[0](wires=0).diagonalizing_gates()\n observable[1](wires=1).diagonalizing_gates()\n\n matrix = qml.transforms.get_unitary_matrix(circuit_rotated)()\n state = np.dot(matrix, state)\n expected = np.reshape(np.abs(state) ** 2, [2] * 4)\n\n expected = np.einsum(\"ijkl->ij\", expected).flatten()\n\n assert np.allclose(res, expected, atol=tol, rtol=0)\n\n\[email protected](\"coeffs, obs\", [([1, 1], [qml.PauliX(wires=0), qml.PauliX(wires=1)])])\ndef test_hamiltonian_error(coeffs, obs, init_state, tol):\n \"Test that an error is returned for hamiltonians.\"\n H = qml.Hamiltonian(coeffs, obs)\n\n dev = qml.device(\"default.qubit\", wires=4)\n state = init_state(4)\n\n @qml.qnode(dev)\n def circuit():\n qml.QubitStateVector(state, wires=list(range(4)))\n qml.PauliX(wires=0)\n qml.PauliZ(wires=1)\n qml.PauliY(wires=2)\n qml.PauliZ(wires=3)\n return qml.probs(op=H)\n\n with pytest.raises(\n qml.QuantumFunctionError,\n match=\"Hamiltonians are not supported for rotating probabilities.\",\n ):\n circuit()\n\n\[email protected](\n \"operation\", [qml.SingleExcitation, qml.SingleExcitationPlus, qml.SingleExcitationMinus]\n)\ndef test_generalize_prob_not_hermitian(operation):\n \"\"\"Test that the Operation or Observables has a diagonalizing_gates attribute.\"\"\"\n\n dev = qml.device(\"default.qubit\", wires=2)\n\n @qml.qnode(dev)\n def circuit():\n qml.PauliX(wires=0)\n qml.PauliZ(wires=1)\n return qml.probs(op=operation(0.56, wires=[0, 1]))\n\n with pytest.raises(\n qml.QuantumFunctionError,\n match=\"has not diagonalizing_gates attribute: cannot be used to rotate the probability\",\n ):\n circuit()\n\n\[email protected](\"hermitian\", [1 / np.sqrt(2) * np.array([[1, 1], [1, -1]])])\ndef test_prob_wires_and_hermitian(hermitian):\n \"\"\"Test that we can cannot give simultaneously wires and a hermitian.\"\"\"\n\n dev = qml.device(\"default.qubit\", wires=2)\n\n @qml.qnode(dev)\n def circuit():\n qml.PauliX(wires=0)\n return qml.probs(op=qml.Hermitian(hermitian, wires=0), wires=1)\n\n with pytest.raises(\n qml.QuantumFunctionError,\n match=\"Cannot specify the wires to probs if an observable is \"\n \"provided. The wires for probs will be determined directly from the observable.\",\n ):\n circuit()\n", "# Copyright 2018-2021 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nTests for the Fourier reconstruction transform.\n\"\"\"\nimport pytest\nfrom inspect import signature\nfrom itertools import chain, combinations\nfrom functools import reduce\nimport numpy as np\nimport pennylane as qml\nfrom pennylane import numpy as pnp\nfrom pennylane.fourier.reconstruct import (\n _reconstruct_equ,\n _reconstruct_gen,\n _prepare_jobs,\n reconstruct,\n)\nfrom pennylane.fourier.utils import join_spectra\n\ndev_0 = qml.device(\"default.qubit\", wires=1)\n\n\nclass Lambda:\n def __init__(self, fun):\n self.fun = fun\n\n def __call__(self, *args, **kwargs):\n return self.fun(*args, **kwargs)\n\n\[email protected](dev_0)\ndef dummy_qnode(x):\n qml.RX(x, wires=0)\n return qml.expval(qml.PauliZ(0))\n\n\ndef get_RX_circuit(scales):\n \"\"\"Generate a circuit with Pauli-X rotation gates with ``f*x``\n as argument where the ``f`` s are stored in ``scales`` .\"\"\"\n\n @qml.qnode(dev_0)\n def circuit(x):\n for f in scales:\n qml.RX(f * x, wires=0)\n return qml.expval(qml.PauliZ(0))\n\n return circuit\n\n\ndef fun_close(fun1, fun2, zero=None, tol=1e-5, samples=100):\n X = np.linspace(-np.pi, np.pi, samples)\n if zero is not None:\n X = qml.math.convert_like(X, zero)\n\n for x in X:\n if not np.isclose(fun1(x), fun2(x), atol=tol, rtol=0):\n return False\n return True\n\n\nclass TestErrors:\n \"\"\"Test that errors are raised e.g. for invalid inputs.\"\"\"\n\n def test_nums_frequency_and_spectra_missing(self):\n \"\"\"Tests that an error is raised if neither information about the number\n of frequencies nor about the spectrum is given to ``reconstruct``.\"\"\"\n with pytest.raises(ValueError, match=\"Either nums_frequency or spectra must be given.\"):\n reconstruct(dummy_qnode)\n\n @pytest.mark.parametrize(\"num_frequency\", [-3, -9.2, 0.999])\n def test_num_frequency_invalid(self, num_frequency):\n \"\"\"Tests that an error is raised if ``_reconstruct_equ`` receives a\n negative or non-integer ``num_frequency`` .\"\"\"\n with pytest.raises(ValueError, match=\"num_frequency must be a non-negative integer\"):\n _reconstruct_equ(dummy_qnode, num_frequency=num_frequency)\n\n @pytest.mark.parametrize(\n \"spectra, shifts\",\n [\n ({\"x\": {(): [0.0, 1.0]}}, {\"x\": {(): [0.3]}}),\n ({\"x\": {(): [0.0, 1.0, 2.0]}}, {\"x\": {(): list(range(20))}}),\n ],\n )\n def test_wrong_number_of_shifts(self, spectra, shifts):\n \"\"\"Tests that an error is raised if the number of provided shifts does not match.\"\"\"\n with pytest.raises(ValueError, match=\"The number of provided shifts\"):\n reconstruct(dummy_qnode, spectra=spectra, shifts=shifts)\n\n\nclass TestWarnings:\n \"\"\"Test that warnings are raised e.g. for an ill-conditioned\n Fourier transform during the reconstruction.\"\"\"\n\n def test_ill_conditioned(self):\n \"\"\"Test that a warning is raised for an ill-conditioned matrix in the Fourier trafo.\"\"\"\n shifts = [-np.pi / 2 - 1e-9, -np.pi / 2, 0, np.pi / 2, np.pi / 2 + 1e-9]\n with pytest.warns(UserWarning, match=\"condition number of the Fourier\"):\n _reconstruct_gen(dummy_qnode, spectrum=[1.0, 2.0], shifts=shifts)\n\n\nclass TestReconstructEqu:\n \"\"\"Tests the one-dimensional reconstruction subroutine based on equidistantly\n shifted evaluations for equidistant frequencies.\"\"\"\n\n c_funs = [\n lambda x: 13.71 * qml.math.sin(x) - qml.math.cos(2 * x) / 30,\n lambda x: -0.49 * qml.math.sin(3.2 * x),\n lambda x: 0.1 * qml.math.cos(-2.1 * x) + 2.9 * qml.math.sin(4.2 * x - 1.2),\n lambda x: qml.math.ones_like(x) * 4.01,\n lambda x: qml.math.sum(\n [i ** 2 * 0.1 * qml.math.sin(i * 3.921 * x - 2.7 / i) for i in range(1, 10)]\n ),\n ]\n\n nums_frequency = [2, 1, 2, 0, 9]\n base_frequencies = [1.0, 3.2, 2.1, 1.0, 3.921]\n expected_grads = [\n lambda x: 13.71 * qml.math.cos(x) + 2 * qml.math.sin(2 * x) / 30,\n lambda x: -0.49 * qml.math.cos(3.2 * x) * 3.2,\n lambda x: (-2.1) * (-0.1) * qml.math.sin(-2.1 * x)\n + 4.2 * 2.9 * qml.math.cos(4.2 * x - 1.2),\n lambda x: 0.0,\n lambda x: qml.math.sum(\n [i * 3.921 * i ** 2 * 0.1 * qml.math.cos(i * 3.921 * x - 2.7 / i) for i in range(1, 10)]\n ),\n ]\n\n @pytest.mark.parametrize(\n \"fun, num_frequency, base_f\", zip(c_funs, nums_frequency, base_frequencies)\n )\n def test_with_classical_fun(self, fun, num_frequency, base_f, mocker):\n \"\"\"Test that equidistant-frequency classical functions are\n reconstructed correctly (via rescaling to integer frequencies).\"\"\"\n Fun = Lambda(fun)\n spy = mocker.spy(Fun, \"fun\")\n # Convert fun to have integer frequencies\n _fun = lambda x: Fun(x / base_f)\n _rec = _reconstruct_equ(_fun, num_frequency)\n\n # Convert reconstruction to have original frequencies\n rec = lambda x: _rec(base_f * x)\n assert spy.call_count == num_frequency * 2 + 1\n assert fun_close(fun, rec)\n\n # Repeat, using precomputed f0\n f0 = _fun(0.0)\n Fun = Lambda(fun)\n spy = mocker.spy(Fun, \"fun\")\n _rec = _reconstruct_equ(_fun, num_frequency, f0=f0)\n # Convert reconstruction to have original frequencies\n rec = lambda x: _rec(base_f * x)\n assert spy.call_count == num_frequency * 2\n assert fun_close(fun, rec)\n\n @pytest.mark.parametrize(\n \"fun, num_frequency, base_f, expected_grad\",\n zip(c_funs, nums_frequency, base_frequencies, expected_grads),\n )\n def test_differentiability_autograd(self, fun, num_frequency, base_f, expected_grad):\n \"\"\"Test that the reconstruction of equidistant-frequency classical\n functions are differentiable for Autograd input variables.\"\"\"\n # Convert fun to have integer frequencies\n _fun = lambda x: fun(x / base_f)\n _rec = _reconstruct_equ(_fun, num_frequency, interface=\"autograd\")\n\n # Convert reconstruction to have original frequencies\n rec = lambda x: _rec(base_f * x)\n grad = qml.grad(rec)\n\n assert fun_close(fun, rec, zero=pnp.array(0.0, requires_grad=True))\n assert fun_close(expected_grad, grad, zero=pnp.array(0.0, requires_grad=True))\n\n @pytest.mark.parametrize(\n \"fun, num_frequency, base_f, expected_grad\",\n zip(c_funs, nums_frequency, base_frequencies, expected_grads),\n )\n def test_differentiability_jax(self, fun, num_frequency, base_f, expected_grad):\n \"\"\"Test that the reconstruction of equidistant-frequency classical\n functions are differentiable for JAX input variables.\"\"\"\n jax = pytest.importorskip(\"jax\")\n from jax.config import config\n\n config.update(\"jax_enable_x64\", True)\n # Convert fun to have integer frequencies\n _fun = lambda x: fun(x / base_f)\n _rec = _reconstruct_equ(_fun, num_frequency, interface=\"jax\")\n\n # Convert reconstruction to have original frequencies\n rec = lambda x: _rec(base_f * x)\n grad = jax.grad(rec)\n assert fun_close(fun, rec, zero=jax.numpy.array(0.0))\n assert fun_close(expected_grad, grad, zero=jax.numpy.array(0.0))\n\n @pytest.mark.parametrize(\n \"fun, num_frequency, base_f, expected_grad\",\n zip(c_funs, nums_frequency, base_frequencies, expected_grads),\n )\n def test_differentiability_tensorflow(self, fun, num_frequency, base_f, expected_grad):\n \"\"\"Test that the reconstruction of equidistant-frequency classical\n functions are differentiable for TensorFlow input variables.\"\"\"\n tf = pytest.importorskip(\"tensorflow\")\n # Convert fun to have integer frequencies\n base_f = tf.constant(base_f, dtype=tf.float64)\n _fun = lambda x: fun(x / base_f)\n _rec = _reconstruct_equ(_fun, num_frequency, interface=\"tensorflow\")\n\n # Convert reconstruction to have original frequencies\n rec = lambda x: _rec(base_f * x)\n\n def grad(arg):\n arg = tf.Variable(arg)\n with tf.GradientTape() as tape:\n out = rec(arg)\n return tape.gradient(out, arg)\n\n assert fun_close(fun, rec, zero=tf.Variable(0.0, dtype=tf.float64))\n assert fun_close(expected_grad, grad, zero=tf.Variable(0.0, dtype=tf.float64))\n\n @pytest.mark.parametrize(\n \"fun, num_frequency, base_f, expected_grad\",\n zip(c_funs, nums_frequency, base_frequencies, expected_grads),\n )\n def test_differentiability_torch(self, fun, num_frequency, base_f, expected_grad):\n \"\"\"Test that the reconstruction of equidistant-frequency classical\n functions are differentiable for Torch input variables.\"\"\"\n torch = pytest.importorskip(\"torch\")\n # Convert fun to have integer frequencies\n _fun = lambda x: fun(x / base_f)\n _rec = _reconstruct_equ(_fun, num_frequency, interface=\"torch\")\n\n # Convert reconstruction to have original frequencies\n rec = lambda x: _rec(base_f * x)\n grad = lambda x: torch.autograd.functional.jacobian(rec, x)\n assert fun_close(fun, rec, zero=torch.tensor(0.0, requires_grad=True))\n assert fun_close(expected_grad, grad, zero=torch.tensor(0.0, requires_grad=True))\n\n @pytest.mark.parametrize(\n \"fun, num_frequency, base_f\", zip(c_funs, nums_frequency, base_frequencies)\n )\n def test_with_classical_fun_num_freq_too_small(self, fun, num_frequency, base_f, mocker):\n \"\"\"Test that equidistant-frequency classical functions are\n reconstructed wrongly if num_frequency is too small.\"\"\"\n if num_frequency == 0:\n pytest.skip(\"Can't reduce the number of frequencies below 0.\")\n num_frequency -= 1\n Fun = Lambda(fun)\n spy = mocker.spy(Fun, \"fun\")\n # Convert fun to have integer frequencies\n _fun = lambda x: Fun(x / base_f)\n _rec = _reconstruct_equ(_fun, num_frequency)\n\n # Convert reconstruction to have original frequencies\n rec = lambda x: _rec(base_f * x)\n assert spy.call_count == num_frequency * 2 + 1\n assert not fun_close(fun, rec)\n\n @pytest.mark.parametrize(\n \"fun, num_frequency, base_f\", zip(c_funs, nums_frequency, base_frequencies)\n )\n def test_with_classical_fun_num_freq_too_large(self, fun, num_frequency, base_f, mocker):\n \"\"\"Test that equidistant-frequency classical functions are\n reconstructed correctly if num_frequency is too large.\"\"\"\n num_frequency += 1\n Fun = Lambda(fun)\n spy = mocker.spy(Fun, \"fun\")\n # Convert fun to have integer frequencies\n _fun = lambda x: Fun(x / base_f)\n _rec = _reconstruct_equ(_fun, num_frequency)\n\n # Convert reconstruction to have original frequencies\n rec = lambda x: _rec(base_f * x)\n assert spy.call_count == num_frequency * 2 + 1\n assert fun_close(fun, rec)\n\n all_scales = [\n [1],\n [0],\n [1, 1],\n [1, 2],\n [1, 5],\n [1, 2, 10],\n ]\n\n @pytest.mark.parametrize(\"scales\", all_scales)\n def test_with_qnode(self, scales, mocker):\n \"\"\"Test that integer-frequency qnodes are reconstructed correctly.\"\"\"\n circuit = get_RX_circuit(scales)\n Fun = Lambda(circuit)\n spy = mocker.spy(Fun, \"fun\")\n num_frequency = sum(scales)\n rec = _reconstruct_equ(Fun, num_frequency)\n assert spy.call_count == num_frequency * 2 + 1\n assert fun_close(circuit, rec)\n\n # Repeat, using precomputed f0\n f0 = circuit(0.0)\n Fun = Lambda(circuit)\n spy = mocker.spy(Fun, \"fun\")\n rec = _reconstruct_equ(Fun, num_frequency, f0=f0)\n assert spy.call_count == num_frequency * 2\n assert fun_close(circuit, rec)\n\n\nclass TestReconstructGen:\n \"\"\"Tests the one-dimensional reconstruction subroutine based on arbitrary\n shifted evaluations for arbitrary frequencies.\"\"\"\n\n c_funs = [\n lambda x: -3.27 * np.sin(0.1712 * x) - np.cos(20.812 * x) / 23,\n lambda x: -0.49 * np.sin(3.2 * x),\n lambda x: 0.1 * np.cos(-0.1 * x) + 2.9 * np.sin(0.3 * x - 1.2),\n lambda x: np.sum([np.sin(i * x) for i in range(1, 10)]),\n lambda x: np.sum(\n [i ** 0.9 * 0.2 * np.sin(i ** 1.2 * 3.921 * x - 5.1 / i) for i in np.arange(1, 10)]\n ),\n ]\n\n spectra = [\n [0.1712, 20.812],\n [3.2],\n [-0.3, -0.1, 0.0, 0.1, 0.3],\n list(np.arange(1, 10)),\n [3.921 * i ** 1.2 for i in np.arange(1, 10)],\n ]\n\n expected_grads = [\n lambda x: -3.27 * np.cos(0.1712 * x) * 0.1712 + np.sin(20.812 * x) / 23 * 20.812,\n lambda x: -0.49 * np.cos(3.2 * x) * 3.2,\n lambda x: (-0.1) ** 2 * np.sin(-0.1 * x) + 0.3 * 2.9 * np.cos(0.3 * x - 1.2),\n lambda x: np.sum([i * np.cos(i * x) for i in range(1, 10)]),\n lambda x: np.sum(\n [i ** 2.1 * 3.921 * 0.2 * np.cos(i ** 1.2 * 3.921 * x - 5.1 / i) for i in range(1, 10)]\n ),\n ]\n\n all_shifts = [\n [-np.pi / 3, -np.pi / 20, 0.0, np.pi / 20, np.pi / 3],\n [-0.15, -0.05, 0.05],\n [-2 * np.pi, -np.pi, -0.1, np.pi, 2 * np.pi],\n np.arange(-9, 10) * np.pi / 19,\n np.arange(-9, 10) * np.pi / 19,\n ]\n\n @pytest.mark.parametrize(\"fun, spectrum\", zip(c_funs, spectra))\n def test_with_classical_fun(self, fun, spectrum, mocker):\n \"\"\"Test that arbitrary-frequency classical functions are\n reconstructed correctly.\"\"\"\n Fun = Lambda(fun)\n spy = mocker.spy(Fun, \"fun\")\n rec = _reconstruct_gen(Fun, spectrum)\n assert spy.call_count == len([f for f in spectrum if f > 0.0]) * 2 + 1\n assert fun_close(fun, rec)\n\n # Repeat, using precomputed f0\n f0 = fun(0.0)\n Fun = Lambda(fun)\n spy = mocker.spy(Fun, \"fun\")\n rec = _reconstruct_gen(Fun, spectrum, f0=f0)\n assert spy.call_count == len([f for f in spectrum if f > 0.0]) * 2\n assert fun_close(fun, rec)\n\n @pytest.mark.parametrize(\"fun, spectrum, shifts\", zip(c_funs, spectra, all_shifts))\n def test_with_classical_fun_with_shifts(self, fun, spectrum, shifts, mocker, recwarn):\n \"\"\"Test that arbitrary-frequency classical functions are\n reconstructed correctly.\"\"\"\n Fun = Lambda(fun)\n spy = mocker.spy(Fun, \"fun\")\n rec = _reconstruct_gen(Fun, spectrum, shifts=shifts)\n assert spy.call_count == len([f for f in spectrum if f > 0.0]) * 2 + 1\n assert fun_close(fun, rec)\n\n # Repeat, using precomputed f0\n f0 = fun(0.0)\n Fun = Lambda(fun)\n spy = mocker.spy(Fun, \"fun\")\n rec = _reconstruct_gen(Fun, spectrum, shifts=shifts, f0=f0)\n if 0.0 in shifts:\n assert spy.call_count == len([f for f in spectrum if f > 0.0]) * 2\n else:\n assert len(recwarn) == 1\n assert recwarn[0].category == UserWarning\n assert recwarn[0].message.args[0].startswith(\"The provided value\")\n assert fun_close(fun, rec)\n\n @pytest.mark.parametrize(\n \"fun, spectrum, expected_grad\",\n zip(c_funs, spectra, expected_grads),\n )\n def test_differentiability_autograd(self, fun, spectrum, expected_grad):\n \"\"\"Test that the reconstruction of equidistant-frequency classical\n functions are differentiable for Autograd input variables.\"\"\"\n # Convert fun to have integer frequencies\n rec = _reconstruct_gen(fun, spectrum, interface=\"autograd\")\n grad = qml.grad(rec)\n assert fun_close(fun, rec, zero=pnp.array(0.0, requires_grad=True))\n assert fun_close(expected_grad, grad, zero=pnp.array(0.0, requires_grad=True))\n\n @pytest.mark.parametrize(\n \"fun, spectrum, expected_grad\",\n zip(c_funs, spectra, expected_grads),\n )\n def test_differentiability_jax(self, fun, spectrum, expected_grad):\n \"\"\"Test that the reconstruction of equidistant-frequency classical\n functions are differentiable for JAX input variables.\"\"\"\n jax = pytest.importorskip(\"jax\")\n from jax.config import config\n\n config.update(\"jax_enable_x64\", True)\n # Convert fun to have integer frequencies\n rec = _reconstruct_gen(fun, spectrum, interface=\"jax\")\n grad = jax.grad(rec)\n assert fun_close(fun, rec, zero=jax.numpy.array(0.0))\n assert fun_close(expected_grad, grad, zero=jax.numpy.array(0.0))\n\n @pytest.mark.parametrize(\n \"fun, spectrum, expected_grad\",\n zip(c_funs, spectra, expected_grads),\n )\n def test_differentiability_tensorflow(self, fun, spectrum, expected_grad):\n \"\"\"Test that the reconstruction of equidistant-frequency classical\n functions are differentiable for TensorFlow input variables.\"\"\"\n tf = pytest.importorskip(\"tensorflow\")\n spectrum = tf.constant(spectrum, dtype=tf.float64)\n # Convert fun to have integer frequencies\n rec = _reconstruct_gen(fun, spectrum, interface=\"tensorflow\")\n\n def grad(arg):\n arg = tf.Variable(arg)\n with tf.GradientTape() as tape:\n out = rec(arg)\n return tape.gradient(out, arg)\n\n assert fun_close(fun, rec, zero=tf.Variable(0.0))\n assert fun_close(expected_grad, grad, zero=tf.Variable(0.0))\n\n @pytest.mark.parametrize(\n \"fun, spectrum, expected_grad\",\n zip(c_funs, spectra, expected_grads),\n )\n def test_differentiability_torch(self, fun, spectrum, expected_grad):\n \"\"\"Test that the reconstruction of equidistant-frequency classical\n functions are differentiable for Torch input variables.\"\"\"\n torch = pytest.importorskip(\"torch\")\n spectrum = torch.tensor(spectrum, dtype=torch.float64)\n # Convert fun to have integer frequencies\n rec = _reconstruct_gen(fun, spectrum, interface=\"torch\")\n grad = lambda x: torch.autograd.functional.jacobian(rec, x)\n assert fun_close(fun, rec, zero=torch.tensor(np.float64(0.0), requires_grad=True))\n assert fun_close(\n expected_grad, grad, zero=torch.tensor(np.float64(0.0), requires_grad=True)\n )\n\n @pytest.mark.parametrize(\"fun, spectrum\", zip(c_funs, spectra))\n def test_with_classical_fun_spectrum_incomplete(self, fun, spectrum, mocker):\n \"\"\"Test that arbitrary-frequency classical functions are reconstructed wrongly\n if spectrum does not contain all frequencies.\"\"\"\n if len(spectrum) <= 1:\n pytest.skip(\"Can't skip a frequency if len(spectrum)<=1.\")\n spectrum = spectrum[:-1]\n Fun = Lambda(fun)\n spy = mocker.spy(Fun, \"fun\")\n rec = _reconstruct_gen(Fun, spectrum)\n\n assert spy.call_count == len([f for f in spectrum if f > 0.0]) * 2 + 1\n assert not fun_close(fun, rec)\n\n @pytest.mark.parametrize(\"fun, spectrum\", zip(c_funs, spectra))\n def test_with_classical_fun_spectrum_overcomplete(self, fun, spectrum, mocker):\n \"\"\"Test that arbitrary-frequency classical functions are reconstructed correctly\n if spectrum contains additional frequencies.\"\"\"\n spectrum = spectrum + [0.4812759, 1.2281]\n Fun = Lambda(fun)\n spy = mocker.spy(Fun, \"fun\")\n # Convert fun to have integer frequencies\n rec = _reconstruct_gen(Fun, spectrum)\n assert spy.call_count == len([f for f in spectrum if f > 0.0]) * 2 + 1\n assert fun_close(fun, rec)\n\n all_scales = [\n [1.3],\n [1.02, 1.59],\n [0.08, 20.2],\n [1.2, 5.0001],\n [1.0, 2.0, 3.0],\n ]\n\n @pytest.mark.parametrize(\"scales\", all_scales)\n def test_with_qnode(self, scales, mocker):\n \"\"\"Test that arbitrary-frequency qnodes are reconstructed correctly.\"\"\"\n circuit = get_RX_circuit(scales)\n Fun = Lambda(circuit)\n spy = mocker.spy(Fun, \"fun\")\n if len(scales) == 1:\n spectrum = sorted({0.0, scales[0]})\n else:\n _spectra = [{0.0, s} for s in scales]\n spectrum = sorted(reduce(join_spectra, _spectra, {0.0}))\n\n rec = _reconstruct_gen(Fun, spectrum)\n assert spy.call_count == len([f for f in spectrum if f > 0.0]) * 2 + 1\n assert fun_close(circuit, rec)\n\n # Repeat, using precomputed f0\n f0 = circuit(0.0)\n Fun = Lambda(circuit)\n spy = mocker.spy(Fun, \"fun\")\n rec = _reconstruct_gen(Fun, spectrum, f0=f0)\n assert spy.call_count == len([f for f in spectrum if f > 0.0]) * 2\n assert fun_close(circuit, rec)\n\n\nall_ids = [\n None,\n {\"x\": [0, 1], \"y\": {1, 5}},\n {\"z\": (0, 9)},\n [\"z\", \"y\", \"x\", \"x\"],\n {\"x\", \"z\", \"y\"},\n (\"y\",),\n \"x\",\n]\n\nall_spectra = [\n {\n \"x\": {0: [0.0], 1: [4.2, 0.0, 0.2]},\n \"y\": {3: [0.3, 0.0, 0.2], 1: [0.0, 1.1, 5.2], 5: [0.0, 1.2]},\n \"z\": {i: [0.0, i * 8.7] for i in range(20)},\n },\n]\n\nall_shifts = [\n {\n \"x\": {0: [-1.3], 1: [1.0, -0.4, 4.2, 2.3, -1.5]},\n \"y\": {\n 3: [0.3 * i + 0.05 for i in range(-2, 3)],\n 1: [-1, -0.5, -0.1, 0.1, 0.9],\n 5: [-1, -0.5, -0.2],\n },\n \"z\": {i: [-np.pi / 2, 0.0, np.pi / 2] for i in range(20)},\n },\n]\n\nall_nums_frequency = [\n {\n \"x\": {0: 1, 1: 4},\n \"y\": {3: 1, 1: 1, 5: 9},\n \"z\": {i: 2 * i for i in range(20)},\n },\n]\n\n\nclass TestPrepareJobs:\n \"\"\"Tests the subroutine that determines the 1D reconstruction\n jobs to be carried out for a call to ``reconstruct`` .\"\"\"\n\n def nested_dict_ids_match(self, ndict, ids):\n if ids.keys() != ndict.keys():\n return False\n for id_, ids_ in ids.items():\n if list(ids_) != list(ndict[id_].keys()):\n return False\n return True\n\n def ids_match(self, ids_in, ids_out):\n if type(ids_in) == dict:\n return ids_in == ids_out\n else:\n return all(id_ in ids_in for id_ in ids_out)\n\n @pytest.mark.parametrize(\"ids\", all_ids)\n @pytest.mark.parametrize(\"shifts\", all_shifts)\n def test_missing_spectra_and_nums_frequency(self, ids, shifts, tol):\n \"\"\"Test that an error is raised if both, spectra\n and nums_frequency are missing.\"\"\"\n with pytest.raises(ValueError, match=\"Either nums_frequency or spectra\"):\n _prepare_jobs(ids, nums_frequency=None, spectra=None, shifts=shifts, atol=tol)\n\n @pytest.mark.parametrize(\"ids\", all_ids)\n @pytest.mark.parametrize(\"spectra\", all_spectra)\n def test_with_spectra(self, ids, spectra, tol):\n \"\"\"Test the prepared jobs when using spectra and shifts.\"\"\"\n ids_, recon_fn, jobs, need_f0 = _prepare_jobs(\n ids,\n nums_frequency=None,\n spectra=spectra,\n shifts=None,\n atol=tol,\n )\n if ids is None:\n assert self.nested_dict_ids_match(spectra, ids_)\n else:\n assert self.ids_match(ids, ids_)\n\n # Check function to use for 1D reconstructions\n assert recon_fn == _reconstruct_gen\n\n # Check reconstruction jobs to be run\n assert self.nested_dict_ids_match(jobs, ids_)\n\n # Check all job details\n for _id, _jobs in jobs.items():\n for idx, job in _jobs.items():\n if len(spectra[_id][idx]) == 1:\n assert job is None\n continue\n assert list(job.keys()) == [\"shifts\", \"spectrum\"]\n assert job[\"shifts\"] is None\n assert job[\"spectrum\"] == spectra[_id][idx]\n assert need_f0\n\n @pytest.mark.parametrize(\"ids\", all_ids)\n @pytest.mark.parametrize(\"spectra\", all_spectra)\n @pytest.mark.parametrize(\"shifts\", all_shifts)\n def test_with_spectra_and_shifts(self, ids, spectra, shifts, tol):\n \"\"\"Test the prepared jobs when using spectra and shifts.\"\"\"\n ids_, recon_fn, jobs, need_f0 = _prepare_jobs(\n ids,\n nums_frequency=None,\n spectra=spectra,\n shifts=shifts,\n atol=tol,\n )\n if ids is None:\n assert self.nested_dict_ids_match(spectra, ids_)\n else:\n assert self.ids_match(ids, ids_)\n\n # Check function to use for 1D reconstructions\n assert recon_fn == _reconstruct_gen\n\n # Check reconstruction jobs to be run\n assert self.nested_dict_ids_match(jobs, ids_)\n\n # Check all job details\n for _id, _jobs in jobs.items():\n for idx, job in _jobs.items():\n if len(spectra[_id][idx]) == 1:\n assert job is None\n continue\n assert list(job.keys()) == [\"shifts\", \"spectrum\"]\n assert job[\"shifts\"] == shifts[_id][idx]\n assert job[\"spectrum\"] == spectra[_id][idx]\n # sometimes need fun at zero if general reconstruction is performed\n _all_shifts = chain.from_iterable(\n [\n sum(\n [\n __shifts\n for par_idx, __shifts in _shifts.items()\n if id_ in ids_ and par_idx in ids_[id_]\n ],\n [],\n )\n for id_, _shifts in shifts.items()\n ],\n )\n assert need_f0 == any(np.isclose(_shift, 0.0, atol=tol, rtol=0) for _shift in _all_shifts)\n\n @pytest.mark.parametrize(\"ids\", all_ids)\n @pytest.mark.parametrize(\"spectra\", all_spectra)\n @pytest.mark.parametrize(\"nums_frequency\", all_nums_frequency)\n def test_with_nums_frequency(self, ids, spectra, nums_frequency, tol):\n \"\"\"Test the prepared jobs when using nums_frequency.\"\"\"\n \"\"\"Test ``_prepare_jobs`` with a large variety of test cases (cheap).\"\"\"\n\n ids_, recon_fn, jobs, need_f0 = _prepare_jobs(\n ids,\n nums_frequency,\n None,\n None,\n atol=tol,\n )\n\n # Check ids\n if ids is None:\n assert self.nested_dict_ids_match(nums_frequency, ids_)\n else:\n assert self.ids_match(ids, ids_)\n\n # Check function to use for 1D reconstructions\n assert recon_fn == _reconstruct_equ\n\n # Check reconstruction jobs to be run\n assert self.nested_dict_ids_match(jobs, ids_)\n\n for _id, _jobs in jobs.items():\n for idx, job in _jobs.items():\n if nums_frequency[_id][idx] == 0:\n assert job is None\n continue\n assert list(job.keys()) == [\"num_frequency\"]\n assert job[\"num_frequency\"] == nums_frequency[_id][idx]\n # always need fun at zero if equidistant reconstruction is performed\n assert need_f0\n\n\ndev_1 = qml.device(\"default.qubit\", wires=2)\n\n\ndef qnode_0(x):\n qml.RX(x, wires=0)\n return qml.expval(qml.PauliZ(0))\n\n\ndef qnode_1(X):\n qml.RX(X[0], wires=0)\n qml.RX(X[0], wires=1)\n qml.RX(X[1], wires=0)\n qml.RX(X[1], wires=1)\n return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))\n\n\ndef qnode_2(X, y):\n qml.RX(X[0], wires=0)\n qml.RX(X[2], wires=1)\n qml.RY(y, wires=0)\n qml.RX(0.5 * X[0], wires=0)\n return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))\n\n\ndef qnode_3(X, Y):\n for i in range(5):\n qml.RX(X[i], wires=0)\n qml.RY(Y[i], wires=0)\n qml.RX(X[i], wires=0)\n return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))\n\n\ndef qnode_4(x):\n return qml.expval(qml.PauliX(0))\n\n\ndef qnode_5(Z, y):\n qml.Hadamard(wires=0)\n qml.Hadamard(wires=1)\n qml.RZ(Z[0, 1], wires=0)\n qml.RZ(Z[2, 4], wires=1)\n qml.RY(y, wires=0)\n return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))\n\n\nx = 0.1\ny = 2.3\n\nX = pnp.array([i ** 1.2 - 2.0 / i for i in range(1, 6)])\nY = pnp.array([i ** 0.9 - 1.0 / i for i in range(1, 6)])\nZ = pnp.array(\n [\n [0.3, 9.1, -0.2, 0.6, 1.2],\n [0.9, -0.1, 1.6, 2.3, -1.5],\n [0.3, 0.1, -0.9, 0.6, 1.8],\n ]\n)\n\ntest_cases_qnodes = [\n (qnode_0, (x,), \"x\", None, {\"x\": {(): [0.0, 1.0]}}, None, 3),\n (\n qnode_0,\n (x,),\n {\"x\": [()]},\n None,\n {\"x\": {(): [0.0, 1.0]}},\n {\"x\": {(): [-np.pi / 3, 0.0, np.pi / 3]}},\n 3,\n ),\n (qnode_0, (x,), \"x\", {\"x\": {(): 1}}, None, None, 3),\n (qnode_1, (X,), {\"X\"}, None, {\"X\": {(0,): [0.0, 1.0, 2.0], (1,): [0.0, 2.0]}}, None, 7),\n (\n qnode_1,\n (X,),\n \"X\",\n None,\n {\"X\": {(0,): [0.0, 2.0]}},\n {\"X\": {(0,): [-np.pi / 2, -0.1, np.pi / 5]}},\n 3,\n ),\n (qnode_1, (X,), [\"X\"], {\"X\": {(0,): 2, (1,): 2}}, None, None, 9),\n (\n qnode_2,\n (X, y),\n [\"X\", \"y\"],\n None,\n {\"X\": {(0,): [0.0, 0.5, 1.0, 1.5], (2,): [0.0, 1.0]}, \"y\": {(): [0.0, 1.0]}},\n None,\n 11,\n ),\n (\n qnode_3,\n (X, Y),\n {\"X\": [(0,), (3,)], \"Y\": ((4,), (1,))},\n {\"X\": {(i,): 2 for i in range(5)}, \"Y\": {(i,): 1 for i in range(5)}},\n None,\n None,\n 13,\n ),\n (qnode_4, (x,), [\"x\"], {\"x\": {(): 0}}, None, None, 1),\n (qnode_5, (Z, y), [\"Z\"], {\"Z\": {(0, 1): 1, (2, 4): 1, (1, 3): 0}}, None, None, 5),\n]\n\n\nclass TestReconstruct:\n \"\"\"Tests the integration of ``_reconstruct_equ`` and ``_reconstruct_gen`` via\n the full ``reconstruct`` function as well as the differentiability of the\n reconstructed function with respect to their single scalar argument.\"\"\"\n\n @pytest.mark.parametrize(\n \"qnode, params, ids, nums_frequency, spectra, shifts, exp_calls\",\n test_cases_qnodes,\n )\n def test_with_qnode(\n self, qnode, params, ids, nums_frequency, spectra, shifts, exp_calls, mocker\n ):\n \"\"\"Run a full reconstruction on a QNode.\"\"\"\n qnode = qml.QNode(qnode, dev_1)\n\n with qml.Tracker(qnode.device) as tracker:\n recons = reconstruct(qnode, ids, nums_frequency, spectra, shifts)(*params)\n assert tracker.totals[\"executions\"] == exp_calls\n arg_names = list(signature(qnode.func).parameters.keys())\n for outer_key in recons:\n outer_key_num = arg_names.index(outer_key)\n for inner_key, rec in recons[outer_key].items():\n x0 = params[outer_key_num]\n if not pnp.isscalar(x0):\n x0 = x0[inner_key]\n shift_vec = qml.math.zeros_like(params[outer_key_num])\n shift_vec[inner_key] = 1.0\n shift_vec = 1.0 if pnp.isscalar(params[outer_key_num]) else shift_vec\n mask = (\n 0.0\n if pnp.isscalar(params[outer_key_num])\n else pnp.ones(qml.math.shape(params[outer_key_num])) - shift_vec\n )\n univariate = lambda x: qnode(\n *params[:outer_key_num],\n params[outer_key_num] * mask + x * shift_vec,\n *params[outer_key_num + 1 :],\n )\n assert np.isclose(rec(x0), qnode(*params))\n assert np.isclose(rec(x0 + 0.1), univariate(x0 + 0.1))\n assert fun_close(rec, univariate, 10)\n\n @pytest.mark.parametrize(\n \"qnode, params, ids, nums_frequency, spectra, shifts, exp_calls\",\n test_cases_qnodes,\n )\n def test_differentiability_autograd(\n self, qnode, params, ids, nums_frequency, spectra, shifts, exp_calls, mocker\n ):\n \"\"\"Tests the reconstruction and differentiability with autograd.\"\"\"\n qnode = qml.QNode(qnode, dev_1, interface=\"autograd\")\n with qml.Tracker(qnode.device) as tracker:\n recons = reconstruct(qnode, ids, nums_frequency, spectra, shifts)(*params)\n assert tracker.totals[\"executions\"] == exp_calls\n arg_names = list(signature(qnode.func).parameters.keys())\n for outer_key in recons:\n outer_key_num = arg_names.index(outer_key)\n for inner_key, rec in recons[outer_key].items():\n x0 = params[outer_key_num]\n if not pnp.isscalar(x0):\n x0 = x0[inner_key]\n shift_vec = qml.math.zeros_like(params[outer_key_num])\n shift_vec[inner_key] = 1.0\n shift_vec = 1.0 if pnp.isscalar(params[outer_key_num]) else shift_vec\n mask = (\n 0.0\n if pnp.isscalar(params[outer_key_num])\n else pnp.ones(qml.math.shape(params[outer_key_num])) - shift_vec\n )\n univariate = lambda x: qnode(\n *params[:outer_key_num],\n params[outer_key_num] * mask + x * shift_vec,\n *params[outer_key_num + 1 :],\n )\n exp_qnode_grad = qml.grad(qnode, argnum=outer_key_num)\n exp_grad = qml.grad(univariate)\n grad = qml.grad(rec)\n if nums_frequency is None:\n # Gradient evaluation at reconstruction point not supported for\n # Dirichlet reconstruction\n assert np.isclose(grad(x0), exp_qnode_grad(*params)[inner_key])\n assert np.isclose(grad(x0 + 0.1), exp_grad(x0 + 0.1))\n assert fun_close(grad, exp_grad, 10)\n\n @pytest.mark.parametrize(\n \"qnode, params, ids, nums_frequency, spectra, shifts, exp_calls\",\n test_cases_qnodes,\n )\n def test_differentiability_jax(\n self, qnode, params, ids, nums_frequency, spectra, shifts, exp_calls, mocker\n ):\n \"\"\"Tests the reconstruction and differentiability with JAX.\"\"\"\n jax = pytest.importorskip(\"jax\")\n from jax.config import config\n\n config.update(\"jax_enable_x64\", True)\n params = tuple(jax.numpy.array(par) for par in params)\n qnode = qml.QNode(qnode, dev_1, interface=\"jax\")\n with qml.Tracker(qnode.device) as tracker:\n recons = reconstruct(qnode, ids, nums_frequency, spectra, shifts)(*params)\n assert tracker.totals[\"executions\"] == exp_calls\n arg_names = list(signature(qnode.func).parameters.keys())\n for outer_key in recons:\n outer_key_num = arg_names.index(outer_key)\n for inner_key, rec in recons[outer_key].items():\n x0 = params[outer_key_num]\n if not pnp.isscalar(x0):\n x0 = x0[inner_key]\n shift_vec = qml.math.zeros_like(params[outer_key_num])\n shift_vec = qml.math.scatter_element_add(shift_vec, inner_key, 1.0)\n shift_vec = 1.0 if pnp.isscalar(params[outer_key_num]) else shift_vec\n mask = (\n 0.0\n if pnp.isscalar(params[outer_key_num])\n else pnp.ones(qml.math.shape(params[outer_key_num])) - shift_vec\n )\n univariate = lambda x: qnode(\n *params[:outer_key_num],\n params[outer_key_num] * mask + x * shift_vec,\n *params[outer_key_num + 1 :],\n )\n exp_qnode_grad = jax.grad(qnode, argnums=outer_key_num)\n exp_grad = jax.grad(univariate)\n grad = jax.grad(rec)\n assert np.isclose(grad(x0), exp_qnode_grad(*params)[inner_key])\n assert np.isclose(grad(x0 + 0.1), exp_grad(x0 + 0.1))\n assert fun_close(grad, exp_grad, 10)\n\n @pytest.mark.parametrize(\n \"qnode, params, ids, nums_frequency, spectra, shifts, exp_calls\",\n test_cases_qnodes,\n )\n def test_differentiability_tensorflow(\n self, qnode, params, ids, nums_frequency, spectra, shifts, exp_calls, mocker\n ):\n \"\"\"Tests the reconstruction and differentiability with TensorFlow.\"\"\"\n if qnode == qnode_4:\n pytest.skip(\"Gradients are empty in TensorFlow for independent functions.\")\n tf = pytest.importorskip(\"tensorflow\")\n qnode = qml.QNode(qnode, dev_1, interface=\"tf\")\n params = tuple(tf.Variable(par, dtype=tf.float64) for par in params)\n if spectra is not None:\n spectra = {\n outer_key: {\n inner_key: tf.constant(val, dtype=tf.float64)\n for inner_key, val in outer_val.items()\n }\n for outer_key, outer_val in spectra.items()\n }\n if shifts is not None:\n shifts = {\n outer_key: {\n inner_key: tf.constant(val, dtype=tf.float64)\n for inner_key, val in outer_val.items()\n }\n for outer_key, outer_val in shifts.items()\n }\n with qml.Tracker(qnode.device) as tracker:\n recons = reconstruct(qnode, ids, nums_frequency, spectra, shifts)(*params)\n assert tracker.totals[\"executions\"] == exp_calls\n arg_names = list(signature(qnode.func).parameters.keys())\n for outer_key in recons:\n outer_key_num = arg_names.index(outer_key)\n for inner_key, rec in recons[outer_key].items():\n if outer_key == \"Z\" and inner_key == (1, 3):\n # This is a constant function dependence, which can\n # not be properly resolved by this test.\n continue\n x0 = params[outer_key_num]\n if not len(qml.math.shape(x0)) == 0:\n x0 = x0[inner_key]\n shift_vec = qml.math.zeros_like(params[outer_key_num])\n shift_vec = qml.math.scatter_element_add(shift_vec, inner_key, 1.0)\n mask = pnp.ones(qml.math.shape(params[outer_key_num])) - shift_vec\n else:\n shift_vec = 1.0\n mask = 0.0\n univariate = lambda x: qnode(\n *params[:outer_key_num],\n params[outer_key_num] * mask + x * shift_vec,\n *params[outer_key_num + 1 :],\n )\n with tf.GradientTape() as tape:\n out = qnode(*params)\n exp_qnode_grad = tape.gradient(out, params[outer_key_num])\n\n def exp_grad(x):\n x = tf.Variable(x, dtype=tf.float64)\n with tf.GradientTape() as tape:\n out = univariate(x)\n return tape.gradient(out, x)\n\n def grad(x):\n x = tf.Variable(x, dtype=tf.float64)\n with tf.GradientTape() as tape:\n out = rec(x)\n return tape.gradient(out, x)\n\n if nums_frequency is None:\n # Gradient evaluation at reconstruction point not supported for\n # Dirichlet reconstruction\n assert np.isclose(grad(x0), exp_qnode_grad[inner_key])\n assert np.isclose(grad(x0 + 0.1), exp_grad(x0 + 0.1))\n assert fun_close(grad, exp_grad, 10)\n\n @pytest.mark.parametrize(\n \"qnode, params, ids, nums_frequency, spectra, shifts, exp_calls\",\n test_cases_qnodes,\n )\n def test_differentiability_torch(\n self, qnode, params, ids, nums_frequency, spectra, shifts, exp_calls, mocker\n ):\n \"\"\"Tests the reconstruction and differentiability with Torch.\"\"\"\n torch = pytest.importorskip(\"torch\")\n qnode = qml.QNode(qnode, dev_1, interface=\"torch\")\n params = tuple(torch.tensor(par, requires_grad=True, dtype=torch.float64) for par in params)\n if spectra is not None:\n spectra = {\n outer_key: {\n inner_key: torch.tensor(val, dtype=torch.float64)\n for inner_key, val in outer_val.items()\n }\n for outer_key, outer_val in spectra.items()\n }\n if shifts is not None:\n shifts = {\n outer_key: {\n inner_key: torch.tensor(val, dtype=torch.float64)\n for inner_key, val in outer_val.items()\n }\n for outer_key, outer_val in shifts.items()\n }\n with qml.Tracker(qnode.device) as tracker:\n recons = reconstruct(qnode, ids, nums_frequency, spectra, shifts)(*params)\n assert tracker.totals[\"executions\"] == exp_calls\n arg_names = list(signature(qnode.func).parameters.keys())\n for outer_key in recons:\n outer_key_num = arg_names.index(outer_key)\n for inner_key, rec in recons[outer_key].items():\n x0 = params[outer_key_num]\n if not len(qml.math.shape(x0)) == 0:\n x0 = x0[inner_key]\n shift_vec = qml.math.zeros_like(params[outer_key_num])\n shift_vec = qml.math.scatter_element_add(shift_vec, inner_key, 1.0)\n mask = torch.ones(qml.math.shape(params[outer_key_num])) - shift_vec\n else:\n shift_vec = 1.0\n mask = 0.0\n univariate = lambda x: qnode(\n *params[:outer_key_num],\n params[outer_key_num] * mask + x * shift_vec,\n *params[outer_key_num + 1 :],\n )\n exp_qnode_grad = torch.autograd.functional.jacobian(qnode, params)[outer_key_num]\n\n exp_grad = lambda x: torch.autograd.functional.jacobian(univariate, x)\n grad = lambda x: torch.autograd.functional.jacobian(rec, x)\n\n assert np.isclose(grad(x0), exp_qnode_grad[inner_key])\n assert np.isclose(grad(x0 + 0.1), exp_grad(x0 + 0.1))\n assert fun_close(\n grad, exp_grad, zero=torch.tensor(0.0, requires_grad=True), samples=10\n )\n" ]
[ [ "numpy.unique", "numpy.random.choice", "numpy.arange", "numpy.mean", "numpy.prod", "numpy.var", "numpy.argsort", "numpy.array", "numpy.zeros", "numpy.empty" ], [ "numpy.dot", "numpy.random.random", "numpy.allclose", "numpy.random.seed", "numpy.abs", "numpy.reshape", "numpy.einsum", "numpy.sqrt", "numpy.linalg.norm", "numpy.array" ], [ "numpy.linspace", "numpy.arange", "numpy.cos", "numpy.sin", "numpy.float64", "numpy.isclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
HappyBall/asr_guided_tacotron
[ "be36f0895b81e338c5c51a7ab6d421fbf3aa055b" ]
[ "synthesize.py" ]
[ "'''\nmodified from\nhttps://www.github.com/kyubyong/tacotron\n'''\n\nfrom hyperparams import Hyperparams as hp\nimport tqdm\nfrom data_load import load_data\nimport tensorflow as tf\nfrom train import Graph\nfrom utils import spectrogram2wav\nfrom scipy.io.wavfile import write\nimport os\nimport numpy as np\n\n\ndef synthesize():\n if not os.path.exists(hp.taco_sampledir): os.mkdir(hp.taco_sampledir)\n\n # Load graph\n g = Graph(mode=\"synthesize\"); print(\"Graph loaded\")\n\n # Load data\n texts = load_data(mode=\"synthesize\")\n _, mel_ref, _ = load_spectrograms(hp.ref_wavfile)\n mel_ref = np.tile(mel_ref, (texts.shape[0], 1, 1))\n\n saver = tf.train.Saver()\n with tf.Session() as sess:\n saver.restore(sess, tf.train.latest_checkpoint(hp.taco_logdir)); print(\"Restored!\")\n\n # Feed Forward\n ## mel\n _y_hat = sess.run(g.diff_mels_taco_hat, {g.random_texts_taco: texts, g.mels_taco: mel_ref})\n y_hat = _y_hat # we can plot spectrogram\n\n mags = sess.run(g.diff_mags_taco_hat, {g.diff_mels_taco_hat: y_hat})\n for i, mag in enumerate(mags):\n print(\"File {}.wav is being generated ...\".format(i+1))\n audio = spectrogram2wav(mag)\n write(os.path.join(hp.taco_sampledir, '{}.wav'.format(i+1)), hp.sr, audio)\n\nif __name__ == '__main__':\n synthesize()\n print(\"Done\")\n\n" ]
[ [ "tensorflow.train.Saver", "tensorflow.train.latest_checkpoint", "numpy.tile", "tensorflow.Session" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
amir-rahim/BookClubSocialNetwork
[ "b69a07cd33592f700214252a64c7c1c53845625d" ]
[ "BookClub/management/commands/importuserstargeted.py" ]
[ "from django.core.management.base import BaseCommand\nimport random\nfrom faker import Faker\nimport pandas as pd\nfrom pandas import DataFrame\nimport time\nfrom BookClub.management.commands.helper import get_top_n_books, get_top_n_users_who_have_rated_xyz_books, get_top_n_books_shifted\n\nfrom BookClub.models.user import User\n\n\nclass Command(BaseCommand):\n \"\"\"The database seeder.\"\"\"\n\n def handle(self, *args, **options):\n tic = time.time()\n model_instances = self.import_users()\n try:\n User.objects.bulk_create(model_instances)\n except Exception as e:\n print(e)\n toc = time.time()\n total = toc-tic\n print('Done in {:.4f} seconds'.format(total))\n print(str(len(model_instances)) + \" Users created\")\n\n def import_users(self):\n file_path = (\"static/dataset/BX-Users.csv\")\n\n data = DataFrame(pd.read_csv(file_path, header=0, encoding= \"ISO-8859-1\", sep=';'))\n\n isbns = get_top_n_books_shifted(300)\n rating_users = get_top_n_users_who_have_rated_xyz_books(1000, isbns)\n faker = Faker()\n\n chosen_users = data[data['User-ID'].isin(rating_users)]\n chosen_users = chosen_users.to_dict('records')\n\n model_instances = []\n i = 0;\n for record in chosen_users:\n i +=1\n Faker.seed(i)\n u = User(\n pk=i,\n username=faker.unique.user_name(),\n email=faker.unique.email(),\n password='pbkdf2_sha256$260000$qw2y9qdBlYmFUZVdkUqlOO$nuzhHvRnVDDOAo70OL14IEqk+bASVNTLjWS1N+c40VU=',\n )\n model_instances.append(u)\n\n return model_instances\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
openghg/openghg
[ "9a05dd6fe3cee6123898b8f390cfaded08dbb408" ]
[ "tests/standardise/surface/test_glasgow_licor.py" ]
[ "from helpers import get_mobile_datapath\nfrom openghg.standardise.surface import parse_glasow_licor\nfrom pandas import Timestamp\nimport pytest\n\n\ndef test_glasgow_licor_read():\n test_data = get_mobile_datapath(filename=\"glasgow_licor_sample.txt\")\n data = parse_glasow_licor(filepath=test_data)\n\n ch4_data = data[\"ch4\"][\"data\"]\n metadata = data[\"ch4\"][\"metadata\"]\n\n assert ch4_data.time[0] == Timestamp(\"2021-08-25T14:35:57\")\n assert ch4_data.longitude[0] == pytest.approx(-4.2321)\n assert ch4_data.latitude[0] == pytest.approx(55.82689833)\n assert ch4_data.ch4[0] == 13.43\n\n assert metadata == {'units': 'ppb', \n 'notes': 'measurement value is methane enhancement over background',\n 'sampling_period': 'NOT_SET'}\n" ]
[ [ "pandas.Timestamp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hwangoh/uq-vae
[ "24a3d26987e2ec807d57601b14c68b22f3652a18", "24a3d26987e2ec807d57601b14c68b22f3652a18", "24a3d26987e2ec807d57601b14c68b22f3652a18", "24a3d26987e2ec807d57601b14c68b22f3652a18", "24a3d26987e2ec807d57601b14c68b22f3652a18" ]
[ "codes/projects/test_continuous_parameter/utils_project/prediction_and_plotting_routine_vaeiaf.py", "codes/projects/test_continuous_parameter/utils_project/training_routine_vae_full_model_aware.py", "codes/projects/advection_diffusion_2d/utils_project/construct_prior_dict.py", "codes/src/optimize/distributed/optimize_distributed_vae_model_aware.py", "codes/src/neural_networks/nn_vae_full.py" ]
[ "'''Prediction and plotting routine\n\nIn preparation for prediction and plotting, this script will:\n 1) Load the obs_dimensions\n 2) Specify the input_dimensions and latent_dimensions\n 3) Instantiate the DataHandler class\n 4) Instantiate the neural network\n 5) Load the trained neural network weights\n 6) Select and prepare an illustrative test example\n 7) Draw from the predicted posterior by utilizing nn.iaf_chain_posterior as\n well as the encoder\n 8) Predict the state using the draw from the posterior either using the\n modelled or learned (decoder) parameter-to-observable map\n 9) Plot the prediction\n\nInputs:\n - hyperp: dictionary storing set hyperparameter values\n - options: dictionary storing the set options\n - filepaths: instance of the FilePaths class storing the default strings for\n importing and exporting required objects.\n\nAuthor: Hwan Goh, Oden Institute, Austin, Texas 2020\n'''\nimport sys\nsys.path.append('../../../../..')\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nplt.ioff() # Turn interactive plotting off\n\n# Import src code\nfrom utils_data.data_handler import DataHandler\nfrom neural_networks.nn_vaeiaf import VAEIAF\nfrom utils_misc.positivity_constraints import positivity_constraint_log_exp\n\n# Import FEM Code\nfrom Finite_Element_Method.src.load_mesh import load_mesh\nfrom utils_project.plot_fem_function import plot_fem_function\n\nimport pdb #Equivalent of keyboard in MATLAB, just add \"pdb.set_trace()\"\n\n###############################################################################\n# Plot Predictions #\n###############################################################################\ndef predict_and_plot(hyperp, options, filepaths):\n\n #=== Load Observation Indices ===#\n if options.obs_type == 'full':\n obs_dimensions = options.parameter_dimensions\n if options.obs_type == 'obs':\n obs_dimensions = options.num_obs_points\n\n #=== Data and Latent Dimensions of Autoencoder ===#\n input_dimensions = obs_dimensions\n latent_dimensions = options.parameter_dimensions\n\n #=== Prepare Data ===#\n data = DataHandler(hyperp, options, filepaths,\n options.parameter_dimensions, obs_dimensions)\n data.load_data_test()\n if options.add_noise == 1:\n data.add_noise_qoi_test()\n parameter_test = data.poi_test\n state_obs_test = data.qoi_test\n\n #=== Load Trained Neural Network ===#\n nn = VAEIAF(hyperp, options,\n input_dimensions, latent_dimensions,\n None, None,\n None, None,\n positivity_constraint_log_exp)\n nn.load_weights(filepaths.trained_nn)\n\n #=== Selecting Samples ===#\n sample_number = 105\n parameter_test_sample = np.expand_dims(parameter_test[sample_number,:], 0)\n state_obs_test_sample = np.expand_dims(state_obs_test[sample_number,:], 0)\n\n #=== Predictions ===#\n parameter_pred_sample, _ = nn.iaf_chain_posterior(\n nn.encoder(state_obs_test_sample))\n state_obs_pred_sample = nn.decoder(parameter_test_sample)\n parameter_pred_sample = parameter_pred_sample.numpy().flatten()\n state_obs_pred_sample = state_obs_pred_sample.numpy().flatten()\n\n #=== Plotting Prediction ===#\n print('================================')\n print(' Plotting Predictions ')\n print('================================')\n #=== Load Mesh ===#\n nodes, elements, _, _, _, _, _, _ = load_mesh(filepaths.project)\n\n #=== Plot FEM Functions ===#\n plot_fem_function(filepaths.figures_savefile_name_parameter_test,\n 'True Parameter', 7.0,\n nodes, elements,\n parameter_test_sample)\n plot_fem_function(filepaths.figures_savefile_name_parameter_pred,\n 'Parameter Prediction', 7.0,\n nodes, elements,\n parameter_pred_sample)\n if options.obs_type == 'full':\n plot_fem_function(filepaths.figures_savefile_name_state_test,\n 'True State', 2.6,\n nodes, elements,\n state_obs_test_sample)\n plot_fem_function(filepaths.figures_savefile_name_state_pred,\n 'State Prediction', 2.6,\n nodes, elements,\n state_obs_pred_sample)\n\n print('Predictions plotted')\n\n###############################################################################\n# Plot Metrics #\n###############################################################################\ndef plot_and_save_metrics(hyperp, options, filepaths):\n print('================================')\n print(' Plotting Metrics ')\n print('================================')\n #=== Load Metrics ===#\n print('Loading Metrics')\n df_metrics = pd.read_csv(filepaths.trained_nn + \"_metrics\" + '.csv')\n array_metrics = df_metrics.to_numpy()\n\n ####################\n # Load Metrics #\n ####################\n storage_array_loss_train = array_metrics[:,0]\n storage_array_loss_train_VAE = array_metrics[:,1]\n storage_array_loss_train_encoder = array_metrics[:,2]\n storage_array_relative_error_input_VAE = array_metrics[:,10]\n storage_array_relative_error_latent_encoder = array_metrics[:,11]\n storage_array_relative_error_input_decoder = array_metrics[:,12]\n storage_array_relative_gradient_norm = array_metrics[:,13]\n\n ################\n # Plotting #\n ################\n #=== Loss Train ===#\n fig_loss = plt.figure()\n x_axis = np.linspace(1, hyperp.num_epochs, hyperp.num_epochs, endpoint = True)\n plt.plot(x_axis, np.log(storage_array_loss_train))\n plt.title('Log-Loss for Training Neural Network')\n plt.xlabel('Epochs')\n plt.ylabel('Log-Loss')\n figures_savefile_name = filepaths.directory_figures + '/' +\\\n 'loss.png'\n plt.savefig(figures_savefile_name)\n plt.close(fig_loss)\n\n #=== Loss Autoencoder ===#\n fig_loss = plt.figure()\n x_axis = np.linspace(1, hyperp.num_epochs, hyperp.num_epochs, endpoint = True)\n plt.plot(x_axis, np.log(storage_array_loss_train_VAE))\n plt.title('Log-Loss for VAE')\n plt.xlabel('Epochs')\n plt.ylabel('Log-Loss')\n figures_savefile_name = filepaths.directory_figures + '/' +\\\n 'loss_autoencoder.png'\n plt.savefig(figures_savefile_name)\n plt.close(fig_loss)\n\n #=== Loss Encoder ===#\n fig_loss = plt.figure()\n x_axis = np.linspace(1, hyperp.num_epochs, hyperp.num_epochs, endpoint = True)\n plt.plot(x_axis, np.log(storage_array_loss_train_encoder))\n plt.title('Log-Loss for Encoder')\n plt.xlabel('Epochs')\n plt.ylabel('Log-Loss')\n figures_savefile_name = filepaths.directory_figures + '/' +\\\n 'loss_encoder.png'\n plt.savefig(figures_savefile_name)\n plt.close(fig_loss)\n\n #=== Relative Error Autoencoder ===#\n fig_accuracy = plt.figure()\n x_axis = np.linspace(1,hyperp.num_epochs, hyperp.num_epochs, endpoint = True)\n plt.plot(x_axis, storage_array_relative_error_input_VAE)\n plt.title('Relative Error for Autoencoder')\n plt.xlabel('Epochs')\n plt.ylabel('Relative Error')\n figures_savefile_name = filepaths.directory_figures + '/' +\\\n 'relative_error_autoencoder.png'\n plt.savefig(figures_savefile_name)\n plt.close(fig_accuracy)\n\n #=== Relative Error Encoder ===#\n fig_accuracy = plt.figure()\n x_axis = np.linspace(1,hyperp.num_epochs, hyperp.num_epochs, endpoint = True)\n plt.plot(x_axis, storage_array_relative_error_latent_encoder)\n plt.title('Relative Error for Encoder')\n plt.xlabel('Epochs')\n plt.ylabel('Relative Error')\n figures_savefile_name = filepaths.directory_figures + '/' +\\\n 'relative_error_encoder.png'\n plt.savefig(figures_savefile_name)\n plt.close(fig_accuracy)\n\n #=== Relative Error Decoder ===#\n fig_accuracy = plt.figure()\n x_axis = np.linspace(1,hyperp.num_epochs, hyperp.num_epochs, endpoint = True)\n plt.plot(x_axis, storage_array_relative_error_input_decoder)\n plt.title('Relative Error for Decoder')\n plt.xlabel('Epochs')\n plt.ylabel('Relative Error')\n figures_savefile_name = filepaths.directory_figures + '/' +\\\n 'relative_error_decoder.png'\n plt.savefig(figures_savefile_name)\n plt.close(fig_accuracy)\n\n #=== Relative Gradient Norm ===#\n fig_gradient_norm = plt.figure()\n x_axis = np.linspace(1,hyperp.num_epochs, hyperp.num_epochs, endpoint = True)\n plt.plot(x_axis, storage_array_relative_gradient_norm)\n plt.title('Relative Gradient Norm')\n plt.xlabel('Epochs')\n plt.ylabel('Relative Error')\n figures_savefile_name = filepaths.directory_figures + '/' +\\\n 'relative_error_gradient_norm.png'\n plt.savefig(figures_savefile_name)\n plt.close(fig_gradient_norm)\n\n if options.model_augmented == 1:\n #=== Relative Error Decoder ===#\n fig_loss = plt.figure()\n x_axis = np.linspace(1,hyperp.num_epochs, hyperp.num_epochs, endpoint = True)\n plt.plot(x_axis, storage_array_loss_train_forward_model)\n plt.title('Log-loss Forward Model')\n plt.xlabel('Epochs')\n plt.ylabel('Relative Error')\n figures_savefile_name = filepaths.directory_figures + '/' +\\\n 'loss_forward_model.png'\n plt.savefig(figures_savefile_name)\n plt.close(fig_loss)\n\n print('Plotting complete')\n", "'''Training routine for the case where posterior model possesses a full\ncovariance and the parameter-to-observable map is learned\n\nIn preparation for prediction and plotting, this script will:\n 1) Specify which GPU to use for optimization\n 2) Form the batches for the training, validation and testing sets\n 3) Specify the input_dimensions and latent_dimensions\n 4) Specify the probability density for the initial guess of the weights and bias\n 5) Instantiate the neural network\n 6) Specify the optimizer\n 7) Call the optimization routine\n\nInputs:\n - hyperp: dictionary storing set hyperparameter values\n - options: dictionary storing the set options\n - filepaths: instance of the FilePaths class storing the default strings for\n importing and exporting required objects.\n - data_dict: dictionary storing the dataset related objects\n - prior_dict: dictionary storing the prior related objects\n\nAuthor: Hwan Goh, Oden Institute, Austin, Texas 2020\n'''\nimport os\nimport sys\n\nimport tensorflow as tf\nimport numpy as np\nimport pandas as pd\n\n# Import src code\nfrom utils_training.form_train_val_test import form_train_val_test_tf_batches\nfrom neural_networks.nn_vae_full import VAE\nfrom optimize.single.optimize_vae_full_model_aware import optimize\nfrom optimize.distributed.optimize_distributed_vae_full_model_aware import optimize_distributed\nfrom utils_misc.positivity_constraints import positivity_constraint_exp,\\\n positivity_constraint_log_exp\n\nimport pdb\n\n###############################################################################\n# Training #\n###############################################################################\ndef training(hyperp, options, filepaths,\n data_dict, prior_dict):\n\n #=== GPU Settings ===#\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n if options.distributed_training == 0:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = options.which_gpu\n if options.distributed_training == 1:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = options.dist_which_gpus\n gpus = tf.config.experimental.list_physical_devices('GPU')\n\n #=== Construct Validation Set and Batches ===#\n input_and_latent_train, input_and_latent_val, input_and_latent_test,\\\n num_batches_train, num_batches_val, num_batches_test\\\n = form_train_val_test_tf_batches(\n data_dict[\"state_obs_train\"], data_dict[\"parameter_train\"],\n data_dict[\"state_obs_test\"], data_dict[\"parameter_test\"],\n hyperp.batch_size, options.random_seed)\n\n #=== Data and Latent Dimensions of Autoencoder ===#\n input_dimensions = data_dict[\"obs_dimensions\"]\n latent_dimensions = options.parameter_dimensions\n\n #=== Neural Network Regularizers ===#\n kernel_initializer = tf.keras.initializers.RandomNormal(mean=0.0, stddev=0.05)\n bias_initializer = 'zeros'\n\n #=== Non-distributed Training ===#\n if options.distributed_training == 0:\n #=== Neural Network ===#\n nn = VAE(hyperp, options,\n input_dimensions, latent_dimensions,\n kernel_initializer, bias_initializer,\n tf.identity)\n\n #=== Optimizer ===#\n optimizer = tf.keras.optimizers.Adam()\n\n #=== Training ===#\n optimize(hyperp, options, filepaths,\n nn, optimizer,\n input_and_latent_train, input_and_latent_val, input_and_latent_test,\n input_dimensions, latent_dimensions, num_batches_train,\n data_dict[\"noise_regularization_matrix\"],\n prior_dict[\"prior_mean\"], prior_dict[\"prior_covariance_inverse\"])\n\n #=== Distributed Training ===#\n if options.distributed_training == 1:\n dist_strategy = tf.distribute.MirroredStrategy()\n with dist_strategy.scope():\n #=== Neural Network ===#\n nn = VAE(hyperp, options,\n input_dimensions, latent_dimensions,\n kernel_initializer, bias_initializer,\n tf.identity)\n\n #=== Optimizer ===#\n optimizer = tf.keras.optimizers.Adam()\n\n #=== Training ===#\n optimize_distributed(dist_strategy,\n hyperp, options, filepaths,\n nn, optimizer,\n input_and_latent_train, input_and_latent_val, input_and_latent_test,\n input_dimensions, latent_dimensions, num_batches_train,\n data_dict[\"noise_regularization_matrix\"],\n prior_dict[\"prior_mean\"], prior_dict[\"prior_covariance_inverse\"])\n", "'''Constructs project specific dictionary containing prior model related objects\n\nTo construct the dictionary, the code will create an instance of the PriorHandler\nclass. Utilizing the methods of this class then loads the covariance related\nobjects.\n\nInputs:\n - hyperp: dictionary storing set hyperparameter values\n - options: dictionary storing the set options\n - filepaths: class instance storing the filepaths\n - load_covariance_: flag that dictates whether to load variants\n of the covariance\n\nAuthor: Hwan Goh, Oden Institute, Austin, Texas 2020\n'''\nimport numpy as np\nimport pandas as pd\n\nfrom utils_data.prior_handler import PriorHandler\n\nimport pdb #Equivalent of keyboard in MATLAB, just add \"pdb.set_trace()\"\n\ndef construct_prior_dict(hyperp, options, filepaths,\n load_mean = True,\n load_covariance = True,\n load_covariance_inverse = True,\n load_covariance_cholesky = True,\n load_covariance_cholesky_inverse = True):\n\n prior_dict = {}\n prior = PriorHandler(hyperp, options, filepaths,\n options.parameter_dimensions)\n\n #=== Prior Mean ===#\n if load_mean == True:\n prior_mean = prior.load_prior_mean()\n prior_dict[\"prior_mean\"] = np.expand_dims(prior_mean, 0)\n\n #=== Prior Covariance ===#\n if load_covariance == True:\n prior_covariance = prior.load_prior_covariance()\n prior_dict[\"prior_covariance\"] = prior_covariance\n\n #=== Prior Covariance Inverse ===#\n if load_covariance_inverse == True:\n prior_covariance_inverse = prior.load_prior_covariance_inverse()\n prior_dict[\"prior_covariance_inverse\"] = prior_covariance_inverse\n\n #=== Prior Covariance Cholesky ===#\n if load_covariance_cholesky == True:\n prior_covariance_cholesky = prior.load_prior_covariance_cholesky()\n prior_dict[\"prior_covariance_cholesky\"] = prior_covariance_cholesky\n\n #=== Prior Covariance Cholesky Inverse ===#\n if load_covariance_cholesky_inverse == True:\n prior_covariance_cholesky_inverse = prior.load_prior_covariance_cholesky_inverse()\n prior_dict[\"prior_covariance_cholesky_inverse\"] = prior_covariance_cholesky_inverse\n\n return prior_dict\n", "'''Distributed optimization routine for the case where the model\nposterior possesses a full covariance and the parameter-to-observable map is\nlearned\n\nIn preparation for optimization, this script will:\n 1) Constuct any objects necessary to be passed to the loss functionals\n 2) Instantiate the Metrics class\n 3) Instantiate the Tensorboard summary_writer\n 4) Build the neural network and display a summary\n\nThen, per epoch, this script will:\n 1) Using train_step() form the batched gradient using the training set\n 2) Using val_step() evaluate the metrics on the validation set\n 3) Using test_step() evaluate the metrics on the testing set\n 4) Update the Tensorboard metrics\n 5) Update the storage arrays\n 6) Display and reset the current metric values\n 7) Output the metrics, current values of the neural network weights and\n dump the hyperp and options dictionaries into uq-vae/trained_nns/\n\nInputs:\n - dist_strategy: the distribution strategy used for parallelized optimization\n - hyperp: dictionary storing set hyperparameter values\n - options: dictionary storing the set options\n - filepaths: instance of the FilePaths class storing the default strings for\n importing and exporting required objects.\n - nn: the neural network to be trained\n - optimizer: Tensorflow optimizer to be used\n - input_and_latent_: batched train, validation and testing datasets\n - input_dimension: dimension of the input layer of the neural network\n - latent_dimension: dimension of the model posterior mean estimate output by\n the encoder\n - num_batches_train: batch_size\n - noise_regularization_matrix: noise covariance matrix for the likelihood term\n - prior_mean: mean of the prior model\n - prior_cov_inv: inverse of the covariance of the prior model\n\nAuthor: Hwan Goh, Oden Institute, Austin, Texas 2020\n'''\nimport sys\nsys.path.append('../..')\n\nimport shutil # for deleting directories\nimport os\nimport time\n\nimport tensorflow as tf\nimport numpy as np\n\n# Import src code\nfrom utils_training.metrics_distributed_vae import Metrics\nfrom utils_io.config_io import dump_attrdict_as_yaml\nfrom utils_training.functionals import\\\n loss_diagonal_weighted_penalized_difference, loss_kld,\\\n relative_error\n\nimport pdb #Equivalent of keyboard in MATLAB, just add \"pdb.set_trace()\"\n\n###############################################################################\n# Training Properties #\n###############################################################################\ndef optimize_distributed(dist_strategy,\n hyperp, options, filepaths,\n nn, optimizer,\n input_and_latent_train, input_and_latent_val, input_and_latent_test,\n input_dimensions, latent_dimension, num_batches_train,\n noise_regularization_matrix,\n prior_mean, prior_cov_inv):\n\n #=== Check Number of Parallel Computations and Set Global Batch Size ===#\n print('Number of Replicas in Sync: %d' %(dist_strategy.num_replicas_in_sync))\n\n #=== Distribute Data ===#\n dist_input_and_latent_train =\\\n dist_strategy.experimental_distribute_dataset(input_and_latent_train)\n dist_input_and_latent_val = dist_strategy.experimental_distribute_dataset(input_and_latent_val)\n dist_input_and_latent_test = dist_strategy.experimental_distribute_dataset(input_and_latent_test)\n\n #=== Metrics ===#\n metrics = Metrics(dist_strategy)\n\n #=== Creating Directory for Trained Neural Network ===#\n if not os.path.exists(filepaths.directory_trained_nn):\n os.makedirs(filepaths.directory_trained_nn)\n\n #=== Tensorboard ===# \"tensorboard --logdir=Tensorboard\"\n if os.path.exists(filepaths.directory_tensorboard):\n shutil.rmtree(filepaths.directory_tensorboard)\n summary_writer = tf.summary.create_file_writer(filepaths.directory_tensorboard)\n\n #=== Display Neural Network Architecture ===#\n with dist_strategy.scope():\n nn.build((hyperp.batch_size, input_dimensions))\n nn.summary()\n\n###############################################################################\n# Training, Validation and Testing Step #\n###############################################################################\n with dist_strategy.scope():\n #=== Training Step ===#\n def train_step(batch_input_train, batch_latent_train):\n with tf.GradientTape() as tape:\n batch_likelihood_train = nn(batch_input_train)\n batch_post_mean_train, batch_log_post_var_train = nn.encoder(batch_input_train)\n\n unscaled_replica_batch_loss_train_vae =\\\n loss_diagonal_weighted_penalized_difference(\n batch_input_train, batch_likelihood_train,\n noise_regularization_matrix,\n 1)\n unscaled_replica_batch_loss_train_kld =\\\n loss_kld(\n batch_post_mean_train, batch_log_post_var_train,\n prior_mean, prior_cov_inv,\n 1)\n unscaled_replica_batch_loss_train_posterior =\\\n (1-hyperp.penalty_js)/hyperp.penalty_js *\\\n tf.reduce_sum(batch_log_post_var_train,axis=1) +\\\n loss_diagonal_weighted_penalized_difference(\n batch_latent_train, batch_post_mean_train,\n 1/tf.math.exp(batch_log_post_var_train/2),\n (1-hyperp.penalty_js)/hyperp.penalty_js)\n\n unscaled_replica_batch_loss_train =\\\n -(-unscaled_replica_batch_loss_train_vae\\\n -unscaled_replica_batch_loss_train_kld\\\n -unscaled_replica_batch_loss_train_posterior)\n scaled_replica_batch_loss_train =\\\n tf.reduce_sum(unscaled_replica_batch_loss_train * (1./hyperp.batch_size))\n\n gradients = tape.gradient(scaled_replica_batch_loss_train, nn.trainable_variables)\n optimizer.apply_gradients(zip(gradients, nn.trainable_variables))\n metrics.mean_loss_train_vae(unscaled_replica_batch_loss_train_vae)\n metrics.mean_loss_train_encoder(unscaled_replica_batch_loss_train_kld)\n metrics.mean_loss_train_posterior(unscaled_replica_batch_loss_train_posterior)\n\n return scaled_replica_batch_loss_train\n\n @tf.function\n def dist_train_step(batch_input_train, batch_latent_train):\n per_replica_losses = dist_strategy.experimental_run_v2(\n train_step, args=(batch_input_train, batch_latent_train))\n return dist_strategy.reduce(tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None)\n\n #=== Validation Step ===#\n def val_step(batch_input_val, batch_latent_val):\n batch_likelihood_val = nn(batch_input_val)\n batch_post_mean_val, batch_log_post_var_val = nn.encoder(batch_input_val)\n\n unscaled_replica_batch_loss_val_vae =\\\n loss_diagonal_weighted_penalized_difference(\n batch_input_val, batch_likelihood_val,\n noise_regularization_matrix,\n 1)\n unscaled_replica_batch_loss_val_kld =\\\n loss_kld(\n batch_post_mean_val, batch_log_post_var_val,\n prior_mean, prior_cov_inv,\n 1)\n unscaled_replica_batch_loss_val_posterior =\\\n (1-hyperp.penalty_js)/hyperp.penalty_js *\\\n tf.reduce_sum(batch_log_post_var_val,axis=1) +\\\n loss_diagonal_weighted_penalized_difference(\n batch_latent_val, batch_post_mean_val,\n 1/tf.math.exp(batch_log_post_var_val/2),\n (1-hyperp.penalty_js)/hyperp.penalty_js)\n\n unscaled_replica_batch_loss_val =\\\n -(-unscaled_replica_batch_loss_val_vae\\\n -unscaled_replica_batch_loss_val_kld\\\n -unscaled_replica_batch_loss_val_posterior)\n\n metrics.mean_loss_val(unscaled_replica_batch_loss_val)\n metrics.mean_loss_val_vae(unscaled_replica_batch_loss_val_vae)\n metrics.mean_loss_val_encoder(unscaled_replica_batch_loss_val_kld)\n metrics.mean_loss_val_posterior(unscaled_replica_batch_loss_val_posterior)\n\n # @tf.function\n def dist_val_step(batch_input_val, batch_latent_val):\n return dist_strategy.experimental_run_v2(\n val_step, (batch_input_val, batch_latent_val))\n\n #=== Test Step ===#\n def test_step(batch_input_test, batch_latent_test):\n batch_likelihood_test = nn(batch_input_test)\n batch_post_mean_test, batch_log_post_var_test = nn.encoder(batch_input_test)\n batch_input_pred_test = nn.decoder(batch_latent_test)\n\n unscaled_replica_batch_loss_test_vae =\\\n loss_diagonal_weighted_penalized_difference(\n batch_input_test, batch_likelihood_test,\n noise_regularization_matrix,\n 1)\n unscaled_replica_batch_loss_test_kld =\\\n loss_kld(\n batch_post_mean_test, batch_log_post_var_test,\n prior_mean, prior_cov_inv,\n 1)\n unscaled_replica_batch_loss_test_posterior =\\\n (1-hyperp.penalty_js)/hyperp.penalty_js *\\\n tf.reduce_sum(batch_log_post_var_test,axis=1) +\\\n loss_diagonal_weighted_penalized_difference(\n batch_latent_test, batch_post_mean_test,\n 1/tf.math.exp(batch_log_post_var_test/2),\n (1-hyperp.penalty_js)/hyperp.penalty_js)\n\n unscaled_replica_batch_loss_test =\\\n -(-unscaled_replica_batch_loss_test_vae\\\n -unscaled_replica_batch_loss_test_kld\\\n -unscaled_replica_batch_loss_test_posterior)\n\n metrics.mean_loss_test(unscaled_replica_batch_loss_test)\n metrics.mean_loss_test_vae(unscaled_replica_batch_loss_test_vae)\n metrics.mean_loss_test_encoder(unscaled_replica_batch_loss_test_kld)\n metrics.mean_loss_test_posterior(unscaled_replica_batch_loss_test_posterior)\n\n metrics.mean_relative_error_input_vae(relative_error(\n batch_input_test, batch_likelihood_test))\n metrics.mean_relative_error_latent_posterior(relative_error(\n batch_latent_test, batch_post_mean_test))\n metrics.mean_relative_error_input_decoder(relative_error(\n batch_input_test, batch_input_pred_test))\n\n # @tf.function\n def dist_test_step(batch_input_test, batch_latent_test):\n return dist_strategy.experimental_run_v2(\n test_step, (batch_input_test, batch_latent_test))\n\n###############################################################################\n# Train Neural Network #\n###############################################################################\n print('Beginning Training')\n for epoch in range(hyperp.num_epochs):\n print('================================')\n print(' Epoch %d ' %(epoch))\n print('================================')\n print('Project: ' + filepaths.case_name + '\\n' + 'nn: ' + filepaths.nn_name + '\\n')\n print('GPUs: ' + options.dist_which_gpus + '\\n')\n print('Optimizing %d batches of size %d:' %(num_batches_train, hyperp.batch_size))\n start_time_epoch = time.time()\n batch_counter = 0\n total_loss_train = 0\n for batch_input_train, batch_latent_train in dist_input_and_latent_train:\n start_time_batch = time.time()\n #=== Compute Train Step ===#\n batch_loss_train = dist_train_step(\n batch_input_train, batch_latent_train)\n total_loss_train += batch_loss_train\n elapsed_time_batch = time.time() - start_time_batch\n if batch_counter == 0:\n print('Time per Batch: %.4f' %(elapsed_time_batch))\n batch_counter += 1\n metrics.mean_loss_train = total_loss_train/batch_counter\n\n #=== Computing Validation Metrics ===#\n for batch_input_val, batch_latent_val in dist_input_and_latent_val:\n dist_val_step(batch_input_val, batch_latent_val)\n\n #=== Computing Test Metrics ===#\n for batch_input_test, batch_latent_test in dist_input_and_latent_test:\n dist_test_step(batch_input_test, batch_latent_test)\n\n #=== Tensorboard Tracking Training Metrics, Weights and Gradients ===#\n metrics.update_tensorboard(summary_writer, epoch)\n\n #=== Update Storage Arrays ===#\n metrics.update_storage_arrays()\n\n #=== Display Epoch Iteration Information ===#\n elapsed_time_epoch = time.time() - start_time_epoch\n print('Time per Epoch: %.4f\\n' %(elapsed_time_epoch))\n print('Train Loss: Full: %.3e, VAE: %.3e, kld: %.3e, Posterior: %.3e'\\\n %(metrics.mean_loss_train,\n metrics.mean_loss_train_vae.result(),\n metrics.mean_loss_train_encoder.result(),\n metrics.mean_loss_train_posterior.result()))\n print('Val Loss: Full: %.3e, VAE: %.3e, kld: %.3e, Posterior: %.3e'\\\n %(metrics.mean_loss_val.result(),\n metrics.mean_loss_val_vae.result(),\n metrics.mean_loss_val_encoder.result(),\n metrics.mean_loss_val_posterior.result()))\n print('Test Loss: Full: %.3e, VAE: %.3e, kld: %.3e, Posterior: %.3e'\\\n %(metrics.mean_loss_test.result(),\n metrics.mean_loss_test_vae.result(),\n metrics.mean_loss_test_encoder.result(),\n metrics.mean_loss_val_posterior.result()))\n print('Rel Errors: VAE: %.3e, Posterior Mean: %.3e, Decoder: %.3e\\n'\\\n %(metrics.mean_relative_error_input_vae.result(),\n metrics.mean_relative_error_latent_posterior.result(),\n metrics.mean_relative_error_input_decoder.result()))\n start_time_epoch = time.time()\n\n #=== Resetting Metrics ===#\n metrics.reset_metrics()\n\n #=== Save Current Model and Metrics ===#\n if epoch % 5 == 0:\n nn.save_weights(filepaths.trained_nn)\n metrics.save_metrics(filepaths)\n dump_attrdict_as_yaml(hyperp, filepaths.directory_trained_nn, 'hyperp')\n dump_attrdict_as_yaml(options, filepaths.directory_trained_nn, 'options')\n print('Current Model and Metrics Saved')\n\n #=== Save Final Model ===#\n nn.save_weights(filepaths.trained_nn)\n metrics.save_metrics(filepaths)\n dump_attrdict_as_yaml(hyperp, filepaths.directory_trained_nn, 'hyperp')\n dump_attrdict_as_yaml(options, filepaths.directory_trained_nn, 'options')\n print('Final Model and Metrics Saved')\n", "'''Class for the UQ-VAE when the posterior model possesses a full covariance\n\nIn preparation for constructing the neural network, this code will:\n 1) Construct the architecture list that stores the number of nodes in\n each layer of the neural network\n 2) Construct the activations list that stores the activation function used at\n each layer\n 3) Instantiate the Encoder class using the architecture and activations lists\n 4) Instantiate the Decoder class using the architecture and activations lists\n\nInputs:\n - hyperp: dictionary storing set hyperparameter values\n - options: dictionary storing the set options\n - input_dimension: dimension of the input layer of the neural network\n - latent_dimension: dimension of the model posterior mean estimate output by\n the encoder\n - kernel_initializer: probability density of the initial guess of the weights\n - bias_initializer: probability density of the initial guess of the biases\n - positivity constraint: function mapping to positive values. Use\n tf.identity if positivity is not required\n\nAuthor: Hwan Goh, Oden Institute, Austin, Texas 2020\n'''\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Dense, Conv2D, Flatten\nfrom tensorflow.keras.initializers import RandomNormal\nimport pdb #Equivalent of keyboard in MATLAB, just add \"pdb.set_trace()\"\n\n###############################################################################\n# Variational Autoencoder #\n###############################################################################\nclass VAE(tf.keras.Model):\n def __init__(self, hyperp, options,\n input_dimensions, latent_dimensions,\n kernel_initializer, bias_initializer,\n positivity_constraint):\n super(VAE, self).__init__()\n\n #=== Define Architecture and Create Layer Storage ===#\n self.architecture = [input_dimensions] +\\\n [hyperp.num_hidden_nodes_encoder]*hyperp.num_hidden_layers_encoder +\\\n [latent_dimensions + latent_dimensions + latent_dimensions**2] +\\\n [hyperp.num_hidden_nodes_decoder]*hyperp.num_hidden_layers_decoder +\\\n [input_dimensions]\n\n #=== Define Other Attributes ===#\n self.options = options\n self.positivity_constraint = positivity_constraint\n self.activations = ['not required'] +\\\n [hyperp.activation]*hyperp.num_hidden_layers_encoder +\\\n ['linear'] +\\\n [hyperp.activation]*hyperp.num_hidden_layers_decoder +\\\n ['linear']\n\n #=== Encoder and Decoder ===#\n self.encoder = Encoder(options, latent_dimensions,\n hyperp.num_hidden_layers_encoder + 1,\n self.architecture, self.activations,\n kernel_initializer, bias_initializer)\n if self.options.model_aware == 1:\n self.decoder = Decoder(options,\n hyperp.num_hidden_layers_encoder + 1,\n self.architecture, self.activations,\n kernel_initializer, bias_initializer,\n len(self.architecture) - 1)\n\n #=== Variational Autoencoder Propagation ===#\n def reparameterize(self, mean, post_cov_chol):\n eps = tf.random.normal(shape=(mean.shape[1]**2,mean.shape[1]))\n return self.positivity_constraint(\n mean + tf.matmul(post_cov_chol,eps))\n\n def call(self, X):\n post_mean, log_post_std, post_cov_chol = self.encoder(X)\n if self.options.model_augmented == True:\n return post_mean, log_post_std, post_cov_chol\n if self.options.model_aware == True:\n z = self.reparameterize(post_mean, post_cov_chol)\n likelihood_mean = self.decoder(z)\n return likelihood_mean\n\n###############################################################################\n# Encoder #\n###############################################################################\nclass Encoder(tf.keras.layers.Layer):\n def __init__(self, options,\n latent_dimensions,\n truncation_layer,\n architecture,\n activations,\n kernel_initializer, bias_initializer):\n super(Encoder, self).__init__()\n\n self.options = options\n self.latent_dimensions = latent_dimensions\n self.vec_lowtri_ones = tf.reshape(\n tf.transpose(\n tf.cast(np.tril(np.ones(latent_dimensions), -1), tf.float32)),\n (latent_dimensions**2,1))\n self.ones = tf.ones([latent_dimensions,1], tf.float32)\n self.truncation_layer = truncation_layer\n self.hidden_layers_encoder = [] # This will be a list of layers\n\n for l in range(1, truncation_layer+1):\n hidden_layer_encoder = tf.keras.layers.Dense(units = architecture[l],\n activation = activations[l],\n use_bias = True,\n kernel_initializer = kernel_initializer,\n bias_initializer = bias_initializer,\n name = \"W\" + str(l))\n self.hidden_layers_encoder.append(hidden_layer_encoder)\n\n def call(self, X):\n for hidden_layer in enumerate(self.hidden_layers_encoder):\n if self.options.resnet == True\\\n and 0 < hidden_layer[0] < self.truncation_layer-1:\n X += hidden_layer[1](X)\n else:\n X = hidden_layer[1](X)\n post_mean, log_post_std, post_cov_lowtri =\\\n tf.split(X, [self.latent_dimensions, self.latent_dimensions,\n self.latent_dimensions**2], axis=1)\n post_cov_chol = self.form_post_cov_chol(log_post_std, post_cov_lowtri)\n return post_mean, log_post_std, post_cov_chol\n\n def form_post_cov_chol(self, log_post_std, post_cov_lowtri):\n return tf.multiply(post_cov_lowtri, tf.transpose(self.vec_lowtri_ones)) +\\\n tf.multiply(tf.linalg.LinearOperatorKronecker(\n [tf.linalg.LinearOperatorFullMatrix(tf.transpose(self.ones)),\n tf.linalg.LinearOperatorFullMatrix(tf.math.exp(log_post_std))]).to_dense(),\n tf.transpose(\n tf.reshape(tf.eye(self.latent_dimensions, dtype=tf.float32),\n (self.latent_dimensions**2,1))))\n\n###############################################################################\n# Decoder #\n###############################################################################\nclass Decoder(tf.keras.layers.Layer):\n def __init__(self, options,\n truncation_layer,\n architecture,\n activations,\n kernel_initializer, bias_initializer,\n last_layer_index):\n super(Decoder, self).__init__()\n\n self.options = options\n self.truncation_layer = truncation_layer\n self.last_layer_index = last_layer_index\n self.hidden_layers_decoder = [] # This will be a list of layers\n\n for l in range(truncation_layer+1, last_layer_index+1):\n hidden_layer_decoder = tf.keras.layers.Dense(units = architecture[l],\n activation = activations[l],\n use_bias = True,\n kernel_initializer = kernel_initializer,\n bias_initializer = bias_initializer,\n name = \"W\" + str(l))\n self.hidden_layers_decoder.append(hidden_layer_decoder)\n\n def call(self, X):\n for hidden_layer in enumerate(self.hidden_layers_decoder):\n if self.options.resnet == True\\\n and self.truncation_layer < hidden_layer[0]+self.truncation_layer\\\n < self.last_layer_index-1:\n X += hidden_layer[1](X)\n else:\n X = hidden_layer[1](X)\n return X\n" ]
[ [ "numpy.log", "numpy.expand_dims", "pandas.read_csv", "numpy.linspace", "matplotlib.pyplot.title", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "matplotlib.pyplot.ioff", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.close", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.figure" ], [ "tensorflow.config.experimental.list_physical_devices", "tensorflow.keras.initializers.RandomNormal", "tensorflow.keras.optimizers.Adam", "tensorflow.distribute.MirroredStrategy" ], [ "numpy.expand_dims" ], [ "tensorflow.reduce_sum", "tensorflow.GradientTape", "tensorflow.math.exp", "tensorflow.summary.create_file_writer" ], [ "tensorflow.matmul", "tensorflow.transpose", "tensorflow.ones", "tensorflow.eye", "numpy.ones", "tensorflow.math.exp", "tensorflow.split", "tensorflow.random.normal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ajspera/pandas
[ "f38020f33052ea9029b410d7fae79bc8f249c0ac", "f38020f33052ea9029b410d7fae79bc8f249c0ac", "f38020f33052ea9029b410d7fae79bc8f249c0ac" ]
[ "pandas/tests/arithmetic/test_object.py", "pandas/tests/groupby/aggregate/test_other.py", "pandas/tests/frame/test_timeseries.py" ]
[ "# Arithmetic tests for DataFrame/Series/Index/Array classes that should\n# behave identically.\n# Specifically for object dtype\nfrom decimal import Decimal\nimport operator\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import Series, Timestamp\nfrom pandas.core import ops\nimport pandas.util.testing as tm\n\n# ------------------------------------------------------------------\n# Comparisons\n\n\nclass TestObjectComparisons:\n def test_comparison_object_numeric_nas(self):\n ser = Series(np.random.randn(10), dtype=object)\n shifted = ser.shift(2)\n\n ops = [\"lt\", \"le\", \"gt\", \"ge\", \"eq\", \"ne\"]\n for op in ops:\n func = getattr(operator, op)\n\n result = func(ser, shifted)\n expected = func(ser.astype(float), shifted.astype(float))\n tm.assert_series_equal(result, expected)\n\n def test_object_comparisons(self):\n ser = Series([\"a\", \"b\", np.nan, \"c\", \"a\"])\n\n result = ser == \"a\"\n expected = Series([True, False, False, False, True])\n tm.assert_series_equal(result, expected)\n\n result = ser < \"a\"\n expected = Series([False, False, False, False, False])\n tm.assert_series_equal(result, expected)\n\n result = ser != \"a\"\n expected = -(ser == \"a\")\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\"dtype\", [None, object])\n def test_more_na_comparisons(self, dtype):\n left = Series([\"a\", np.nan, \"c\"], dtype=dtype)\n right = Series([\"a\", np.nan, \"d\"], dtype=dtype)\n\n result = left == right\n expected = Series([True, False, False])\n tm.assert_series_equal(result, expected)\n\n result = left != right\n expected = Series([False, True, True])\n tm.assert_series_equal(result, expected)\n\n result = left == np.nan\n expected = Series([False, False, False])\n tm.assert_series_equal(result, expected)\n\n result = left != np.nan\n expected = Series([True, True, True])\n tm.assert_series_equal(result, expected)\n\n\n# ------------------------------------------------------------------\n# Arithmetic\n\n\nclass TestArithmetic:\n\n # TODO: parametrize\n def test_pow_ops_object(self):\n # GH#22922\n # pow is weird with masking & 1, so testing here\n a = Series([1, np.nan, 1, np.nan], dtype=object)\n b = Series([1, np.nan, np.nan, 1], dtype=object)\n result = a ** b\n expected = Series(a.values ** b.values, dtype=object)\n tm.assert_series_equal(result, expected)\n\n result = b ** a\n expected = Series(b.values ** a.values, dtype=object)\n\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\"op\", [operator.add, ops.radd])\n @pytest.mark.parametrize(\"other\", [\"category\", \"Int64\"])\n def test_add_extension_scalar(self, other, box, op):\n # GH#22378\n # Check that scalars satisfying is_extension_array_dtype(obj)\n # do not incorrectly try to dispatch to an ExtensionArray operation\n\n arr = pd.Series([\"a\", \"b\", \"c\"])\n expected = pd.Series([op(x, other) for x in arr])\n\n arr = tm.box_expected(arr, box)\n expected = tm.box_expected(expected, box)\n\n result = op(arr, other)\n tm.assert_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"box\",\n [\n pytest.param(\n pd.Index,\n marks=pytest.mark.xfail(reason=\"Does not mask nulls\", raises=TypeError),\n ),\n pd.Series,\n pd.DataFrame,\n ],\n ids=lambda x: x.__name__,\n )\n def test_objarr_add_str(self, box):\n ser = pd.Series([\"x\", np.nan, \"x\"])\n expected = pd.Series([\"xa\", np.nan, \"xa\"])\n\n ser = tm.box_expected(ser, box)\n expected = tm.box_expected(expected, box)\n\n result = ser + \"a\"\n tm.assert_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"box\",\n [\n pytest.param(\n pd.Index,\n marks=pytest.mark.xfail(reason=\"Does not mask nulls\", raises=TypeError),\n ),\n pd.Series,\n pd.DataFrame,\n ],\n ids=lambda x: x.__name__,\n )\n def test_objarr_radd_str(self, box):\n ser = pd.Series([\"x\", np.nan, \"x\"])\n expected = pd.Series([\"ax\", np.nan, \"ax\"])\n\n ser = tm.box_expected(ser, box)\n expected = tm.box_expected(expected, box)\n\n result = \"a\" + ser\n tm.assert_equal(result, expected)\n\n @pytest.mark.parametrize(\n \"data\",\n [\n [1, 2, 3],\n [1.1, 2.2, 3.3],\n [Timestamp(\"2011-01-01\"), Timestamp(\"2011-01-02\"), pd.NaT],\n [\"x\", \"y\", 1],\n ],\n )\n @pytest.mark.parametrize(\"dtype\", [None, object])\n def test_objarr_radd_str_invalid(self, dtype, data, box):\n ser = Series(data, dtype=dtype)\n\n ser = tm.box_expected(ser, box)\n with pytest.raises(TypeError):\n \"foo_\" + ser\n\n @pytest.mark.parametrize(\"op\", [operator.add, ops.radd, operator.sub, ops.rsub])\n def test_objarr_add_invalid(self, op, box):\n # invalid ops\n\n obj_ser = tm.makeObjectSeries()\n obj_ser.name = \"objects\"\n\n obj_ser = tm.box_expected(obj_ser, box)\n with pytest.raises(Exception):\n op(obj_ser, 1)\n with pytest.raises(Exception):\n op(obj_ser, np.array(1, dtype=np.int64))\n\n # TODO: Moved from tests.series.test_operators; needs cleanup\n def test_operators_na_handling(self):\n ser = Series([\"foo\", \"bar\", \"baz\", np.nan])\n result = \"prefix_\" + ser\n expected = pd.Series([\"prefix_foo\", \"prefix_bar\", \"prefix_baz\", np.nan])\n tm.assert_series_equal(result, expected)\n\n result = ser + \"_suffix\"\n expected = pd.Series([\"foo_suffix\", \"bar_suffix\", \"baz_suffix\", np.nan])\n tm.assert_series_equal(result, expected)\n\n # TODO: parametrize over box\n @pytest.mark.parametrize(\"dtype\", [None, object])\n def test_series_with_dtype_radd_timedelta(self, dtype):\n # note this test is _not_ aimed at timedelta64-dtyped Series\n ser = pd.Series(\n [pd.Timedelta(\"1 days\"), pd.Timedelta(\"2 days\"), pd.Timedelta(\"3 days\")],\n dtype=dtype,\n )\n expected = pd.Series(\n [pd.Timedelta(\"4 days\"), pd.Timedelta(\"5 days\"), pd.Timedelta(\"6 days\")]\n )\n\n result = pd.Timedelta(\"3 days\") + ser\n tm.assert_series_equal(result, expected)\n\n result = ser + pd.Timedelta(\"3 days\")\n tm.assert_series_equal(result, expected)\n\n # TODO: cleanup & parametrize over box\n def test_mixed_timezone_series_ops_object(self):\n # GH#13043\n ser = pd.Series(\n [\n pd.Timestamp(\"2015-01-01\", tz=\"US/Eastern\"),\n pd.Timestamp(\"2015-01-01\", tz=\"Asia/Tokyo\"),\n ],\n name=\"xxx\",\n )\n assert ser.dtype == object\n\n exp = pd.Series(\n [\n pd.Timestamp(\"2015-01-02\", tz=\"US/Eastern\"),\n pd.Timestamp(\"2015-01-02\", tz=\"Asia/Tokyo\"),\n ],\n name=\"xxx\",\n )\n tm.assert_series_equal(ser + pd.Timedelta(\"1 days\"), exp)\n tm.assert_series_equal(pd.Timedelta(\"1 days\") + ser, exp)\n\n # object series & object series\n ser2 = pd.Series(\n [\n pd.Timestamp(\"2015-01-03\", tz=\"US/Eastern\"),\n pd.Timestamp(\"2015-01-05\", tz=\"Asia/Tokyo\"),\n ],\n name=\"xxx\",\n )\n assert ser2.dtype == object\n exp = pd.Series([pd.Timedelta(\"2 days\"), pd.Timedelta(\"4 days\")], name=\"xxx\")\n tm.assert_series_equal(ser2 - ser, exp)\n tm.assert_series_equal(ser - ser2, -exp)\n\n ser = pd.Series(\n [pd.Timedelta(\"01:00:00\"), pd.Timedelta(\"02:00:00\")],\n name=\"xxx\",\n dtype=object,\n )\n assert ser.dtype == object\n\n exp = pd.Series(\n [pd.Timedelta(\"01:30:00\"), pd.Timedelta(\"02:30:00\")], name=\"xxx\"\n )\n tm.assert_series_equal(ser + pd.Timedelta(\"00:30:00\"), exp)\n tm.assert_series_equal(pd.Timedelta(\"00:30:00\") + ser, exp)\n\n # TODO: cleanup & parametrize over box\n def test_iadd_preserves_name(self):\n # GH#17067, GH#19723 __iadd__ and __isub__ should preserve index name\n ser = pd.Series([1, 2, 3])\n ser.index.name = \"foo\"\n\n ser.index += 1\n assert ser.index.name == \"foo\"\n\n ser.index -= 1\n assert ser.index.name == \"foo\"\n\n def test_add_string(self):\n # from bug report\n index = pd.Index([\"a\", \"b\", \"c\"])\n index2 = index + \"foo\"\n\n assert \"a\" not in index2\n assert \"afoo\" in index2\n\n def test_iadd_string(self):\n index = pd.Index([\"a\", \"b\", \"c\"])\n # doesn't fail test unless there is a check before `+=`\n assert \"a\" in index\n\n index += \"_x\"\n assert \"a_x\" in index\n\n def test_add(self):\n index = tm.makeStringIndex(100)\n expected = pd.Index(index.values * 2)\n tm.assert_index_equal(index + index, expected)\n tm.assert_index_equal(index + index.tolist(), expected)\n tm.assert_index_equal(index.tolist() + index, expected)\n\n # test add and radd\n index = pd.Index(list(\"abc\"))\n expected = pd.Index([\"a1\", \"b1\", \"c1\"])\n tm.assert_index_equal(index + \"1\", expected)\n expected = pd.Index([\"1a\", \"1b\", \"1c\"])\n tm.assert_index_equal(\"1\" + index, expected)\n\n def test_sub_fail(self):\n index = tm.makeStringIndex(100)\n with pytest.raises(TypeError):\n index - \"a\"\n with pytest.raises(TypeError):\n index - index\n with pytest.raises(TypeError):\n index - index.tolist()\n with pytest.raises(TypeError):\n index.tolist() - index\n\n def test_sub_object(self):\n # GH#19369\n index = pd.Index([Decimal(1), Decimal(2)])\n expected = pd.Index([Decimal(0), Decimal(1)])\n\n result = index - Decimal(1)\n tm.assert_index_equal(result, expected)\n\n result = index - pd.Index([Decimal(1), Decimal(1)])\n tm.assert_index_equal(result, expected)\n\n with pytest.raises(TypeError):\n index - \"foo\"\n\n with pytest.raises(TypeError):\n index - np.array([2, \"foo\"])\n\n def test_rsub_object(self):\n # GH#19369\n index = pd.Index([Decimal(1), Decimal(2)])\n expected = pd.Index([Decimal(1), Decimal(0)])\n\n result = Decimal(2) - index\n tm.assert_index_equal(result, expected)\n\n result = np.array([Decimal(2), Decimal(2)]) - index\n tm.assert_index_equal(result, expected)\n\n with pytest.raises(TypeError):\n \"foo\" - index\n\n with pytest.raises(TypeError):\n np.array([True, pd.Timestamp.now()]) - index\n", "\"\"\"\ntest all other .agg behavior\n\"\"\"\n\nfrom collections import OrderedDict\nimport datetime as dt\nfrom functools import partial\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Index,\n MultiIndex,\n PeriodIndex,\n Series,\n date_range,\n period_range,\n)\nfrom pandas.core.groupby.groupby import SpecificationError\nimport pandas.util.testing as tm\n\nfrom pandas.io.formats.printing import pprint_thing\n\n\ndef test_agg_api():\n # GH 6337\n # http://stackoverflow.com/questions/21706030/pandas-groupby-agg-function-column-dtype-error\n # different api for agg when passed custom function with mixed frame\n\n df = DataFrame(\n {\n \"data1\": np.random.randn(5),\n \"data2\": np.random.randn(5),\n \"key1\": [\"a\", \"a\", \"b\", \"b\", \"a\"],\n \"key2\": [\"one\", \"two\", \"one\", \"two\", \"one\"],\n }\n )\n grouped = df.groupby(\"key1\")\n\n def peak_to_peak(arr):\n return arr.max() - arr.min()\n\n expected = grouped.agg([peak_to_peak])\n expected.columns = [\"data1\", \"data2\"]\n result = grouped.agg(peak_to_peak)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_agg_datetimes_mixed():\n data = [[1, \"2012-01-01\", 1.0], [2, \"2012-01-02\", 2.0], [3, None, 3.0]]\n\n df1 = DataFrame(\n {\n \"key\": [x[0] for x in data],\n \"date\": [x[1] for x in data],\n \"value\": [x[2] for x in data],\n }\n )\n\n data = [\n [\n row[0],\n (dt.datetime.strptime(row[1], \"%Y-%m-%d\").date() if row[1] else None),\n row[2],\n ]\n for row in data\n ]\n\n df2 = DataFrame(\n {\n \"key\": [x[0] for x in data],\n \"date\": [x[1] for x in data],\n \"value\": [x[2] for x in data],\n }\n )\n\n df1[\"weights\"] = df1[\"value\"] / df1[\"value\"].sum()\n gb1 = df1.groupby(\"date\").aggregate(np.sum)\n\n df2[\"weights\"] = df1[\"value\"] / df1[\"value\"].sum()\n gb2 = df2.groupby(\"date\").aggregate(np.sum)\n\n assert len(gb1) == len(gb2)\n\n\ndef test_agg_period_index():\n prng = period_range(\"2012-1-1\", freq=\"M\", periods=3)\n df = DataFrame(np.random.randn(3, 2), index=prng)\n rs = df.groupby(level=0).sum()\n assert isinstance(rs.index, PeriodIndex)\n\n # GH 3579\n index = period_range(start=\"1999-01\", periods=5, freq=\"M\")\n s1 = Series(np.random.rand(len(index)), index=index)\n s2 = Series(np.random.rand(len(index)), index=index)\n series = [(\"s1\", s1), (\"s2\", s2)]\n df = DataFrame.from_dict(OrderedDict(series))\n grouped = df.groupby(df.index.month)\n list(grouped)\n\n\ndef test_agg_dict_parameter_cast_result_dtypes():\n # GH 12821\n\n df = DataFrame(\n {\n \"class\": [\"A\", \"A\", \"B\", \"B\", \"C\", \"C\", \"D\", \"D\"],\n \"time\": date_range(\"1/1/2011\", periods=8, freq=\"H\"),\n }\n )\n df.loc[[0, 1, 2, 5], \"time\"] = None\n\n # test for `first` function\n exp = df.loc[[0, 3, 4, 6]].set_index(\"class\")\n grouped = df.groupby(\"class\")\n tm.assert_frame_equal(grouped.first(), exp)\n tm.assert_frame_equal(grouped.agg(\"first\"), exp)\n tm.assert_frame_equal(grouped.agg({\"time\": \"first\"}), exp)\n tm.assert_series_equal(grouped.time.first(), exp[\"time\"])\n tm.assert_series_equal(grouped.time.agg(\"first\"), exp[\"time\"])\n\n # test for `last` function\n exp = df.loc[[0, 3, 4, 7]].set_index(\"class\")\n grouped = df.groupby(\"class\")\n tm.assert_frame_equal(grouped.last(), exp)\n tm.assert_frame_equal(grouped.agg(\"last\"), exp)\n tm.assert_frame_equal(grouped.agg({\"time\": \"last\"}), exp)\n tm.assert_series_equal(grouped.time.last(), exp[\"time\"])\n tm.assert_series_equal(grouped.time.agg(\"last\"), exp[\"time\"])\n\n # count\n exp = pd.Series([2, 2, 2, 2], index=Index(list(\"ABCD\"), name=\"class\"), name=\"time\")\n tm.assert_series_equal(grouped.time.agg(len), exp)\n tm.assert_series_equal(grouped.time.size(), exp)\n\n exp = pd.Series([0, 1, 1, 2], index=Index(list(\"ABCD\"), name=\"class\"), name=\"time\")\n tm.assert_series_equal(grouped.time.count(), exp)\n\n\ndef test_agg_cast_results_dtypes():\n # similar to GH12821\n # xref #11444\n u = [dt.datetime(2015, x + 1, 1) for x in range(12)]\n v = list(\"aaabbbbbbccd\")\n df = pd.DataFrame({\"X\": v, \"Y\": u})\n\n result = df.groupby(\"X\")[\"Y\"].agg(len)\n expected = df.groupby(\"X\")[\"Y\"].count()\n tm.assert_series_equal(result, expected)\n\n\ndef test_aggregate_float64_no_int64():\n # see gh-11199\n df = DataFrame({\"a\": [1, 2, 3, 4, 5], \"b\": [1, 2, 2, 4, 5], \"c\": [1, 2, 3, 4, 5]})\n\n expected = DataFrame({\"a\": [1, 2.5, 4, 5]}, index=[1, 2, 4, 5])\n expected.index.name = \"b\"\n\n result = df.groupby(\"b\")[[\"a\"]].mean()\n tm.assert_frame_equal(result, expected)\n\n expected = DataFrame({\"a\": [1, 2.5, 4, 5], \"c\": [1, 2.5, 4, 5]}, index=[1, 2, 4, 5])\n expected.index.name = \"b\"\n\n result = df.groupby(\"b\")[[\"a\", \"c\"]].mean()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_aggregate_api_consistency():\n # GH 9052\n # make sure that the aggregates via dict\n # are consistent\n df = DataFrame(\n {\n \"A\": [\"foo\", \"bar\", \"foo\", \"bar\", \"foo\", \"bar\", \"foo\", \"foo\"],\n \"B\": [\"one\", \"one\", \"two\", \"two\", \"two\", \"two\", \"one\", \"two\"],\n \"C\": np.random.randn(8) + 1.0,\n \"D\": np.arange(8),\n }\n )\n\n grouped = df.groupby([\"A\", \"B\"])\n c_mean = grouped[\"C\"].mean()\n c_sum = grouped[\"C\"].sum()\n d_mean = grouped[\"D\"].mean()\n d_sum = grouped[\"D\"].sum()\n\n result = grouped[\"D\"].agg([\"sum\", \"mean\"])\n expected = pd.concat([d_sum, d_mean], axis=1)\n expected.columns = [\"sum\", \"mean\"]\n tm.assert_frame_equal(result, expected, check_like=True)\n\n result = grouped.agg([np.sum, np.mean])\n expected = pd.concat([c_sum, c_mean, d_sum, d_mean], axis=1)\n expected.columns = MultiIndex.from_product([[\"C\", \"D\"], [\"sum\", \"mean\"]])\n tm.assert_frame_equal(result, expected, check_like=True)\n\n result = grouped[[\"D\", \"C\"]].agg([np.sum, np.mean])\n expected = pd.concat([d_sum, d_mean, c_sum, c_mean], axis=1)\n expected.columns = MultiIndex.from_product([[\"D\", \"C\"], [\"sum\", \"mean\"]])\n tm.assert_frame_equal(result, expected, check_like=True)\n\n result = grouped.agg({\"C\": \"mean\", \"D\": \"sum\"})\n expected = pd.concat([d_sum, c_mean], axis=1)\n tm.assert_frame_equal(result, expected, check_like=True)\n\n result = grouped.agg({\"C\": [\"mean\", \"sum\"], \"D\": [\"mean\", \"sum\"]})\n expected = pd.concat([c_mean, c_sum, d_mean, d_sum], axis=1)\n expected.columns = MultiIndex.from_product([[\"C\", \"D\"], [\"mean\", \"sum\"]])\n\n with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):\n result = grouped[[\"D\", \"C\"]].agg({\"r\": np.sum, \"r2\": np.mean})\n expected = pd.concat([d_sum, c_sum, d_mean, c_mean], axis=1)\n expected.columns = MultiIndex.from_product([[\"r\", \"r2\"], [\"D\", \"C\"]])\n tm.assert_frame_equal(result, expected, check_like=True)\n\n\ndef test_agg_dict_renaming_deprecation():\n # 15931\n df = pd.DataFrame({\"A\": [1, 1, 1, 2, 2], \"B\": range(5), \"C\": range(5)})\n\n with tm.assert_produces_warning(FutureWarning, check_stacklevel=False) as w:\n df.groupby(\"A\").agg(\n {\"B\": {\"foo\": [\"sum\", \"max\"]}, \"C\": {\"bar\": [\"count\", \"min\"]}}\n )\n assert \"using a dict with renaming\" in str(w[0].message)\n assert \"named aggregation\" in str(w[0].message)\n\n with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):\n df.groupby(\"A\")[[\"B\", \"C\"]].agg({\"ma\": \"max\"})\n\n with tm.assert_produces_warning(FutureWarning) as w:\n df.groupby(\"A\").B.agg({\"foo\": \"count\"})\n assert \"using a dict on a Series for aggregation\" in str(w[0].message)\n assert \"named aggregation instead.\" in str(w[0].message)\n\n\ndef test_agg_compat():\n # GH 12334\n df = DataFrame(\n {\n \"A\": [\"foo\", \"bar\", \"foo\", \"bar\", \"foo\", \"bar\", \"foo\", \"foo\"],\n \"B\": [\"one\", \"one\", \"two\", \"two\", \"two\", \"two\", \"one\", \"two\"],\n \"C\": np.random.randn(8) + 1.0,\n \"D\": np.arange(8),\n }\n )\n\n g = df.groupby([\"A\", \"B\"])\n\n expected = pd.concat([g[\"D\"].sum(), g[\"D\"].std()], axis=1)\n expected.columns = MultiIndex.from_tuples([(\"C\", \"sum\"), (\"C\", \"std\")])\n with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):\n result = g[\"D\"].agg({\"C\": [\"sum\", \"std\"]})\n tm.assert_frame_equal(result, expected, check_like=True)\n\n expected = pd.concat([g[\"D\"].sum(), g[\"D\"].std()], axis=1)\n expected.columns = [\"C\", \"D\"]\n\n with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):\n result = g[\"D\"].agg({\"C\": \"sum\", \"D\": \"std\"})\n tm.assert_frame_equal(result, expected, check_like=True)\n\n\ndef test_agg_nested_dicts():\n # API change for disallowing these types of nested dicts\n df = DataFrame(\n {\n \"A\": [\"foo\", \"bar\", \"foo\", \"bar\", \"foo\", \"bar\", \"foo\", \"foo\"],\n \"B\": [\"one\", \"one\", \"two\", \"two\", \"two\", \"two\", \"one\", \"two\"],\n \"C\": np.random.randn(8) + 1.0,\n \"D\": np.arange(8),\n }\n )\n\n g = df.groupby([\"A\", \"B\"])\n\n msg = r\"cannot perform renaming for r[1-2] with a nested dictionary\"\n with pytest.raises(SpecificationError, match=msg):\n g.aggregate({\"r1\": {\"C\": [\"mean\", \"sum\"]}, \"r2\": {\"D\": [\"mean\", \"sum\"]}})\n\n with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):\n result = g.agg({\"C\": {\"ra\": [\"mean\", \"std\"]}, \"D\": {\"rb\": [\"mean\", \"std\"]}})\n expected = pd.concat(\n [g[\"C\"].mean(), g[\"C\"].std(), g[\"D\"].mean(), g[\"D\"].std()], axis=1\n )\n expected.columns = pd.MultiIndex.from_tuples(\n [(\"ra\", \"mean\"), (\"ra\", \"std\"), (\"rb\", \"mean\"), (\"rb\", \"std\")]\n )\n tm.assert_frame_equal(result, expected, check_like=True)\n\n # same name as the original column\n # GH9052\n with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):\n expected = g[\"D\"].agg({\"result1\": np.sum, \"result2\": np.mean})\n expected = expected.rename(columns={\"result1\": \"D\"})\n\n with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):\n result = g[\"D\"].agg({\"D\": np.sum, \"result2\": np.mean})\n tm.assert_frame_equal(result, expected, check_like=True)\n\n\ndef test_agg_item_by_item_raise_typeerror():\n df = DataFrame(np.random.randint(10, size=(20, 10)))\n\n def raiseException(df):\n pprint_thing(\"----------------------------------------\")\n pprint_thing(df.to_string())\n raise TypeError(\"test\")\n\n with pytest.raises(TypeError, match=\"test\"):\n df.groupby(0).agg(raiseException)\n\n\ndef test_series_agg_multikey():\n ts = tm.makeTimeSeries()\n grouped = ts.groupby([lambda x: x.year, lambda x: x.month])\n\n result = grouped.agg(np.sum)\n expected = grouped.sum()\n tm.assert_series_equal(result, expected)\n\n\ndef test_series_agg_multi_pure_python():\n data = DataFrame(\n {\n \"A\": [\n \"foo\",\n \"foo\",\n \"foo\",\n \"foo\",\n \"bar\",\n \"bar\",\n \"bar\",\n \"bar\",\n \"foo\",\n \"foo\",\n \"foo\",\n ],\n \"B\": [\n \"one\",\n \"one\",\n \"one\",\n \"two\",\n \"one\",\n \"one\",\n \"one\",\n \"two\",\n \"two\",\n \"two\",\n \"one\",\n ],\n \"C\": [\n \"dull\",\n \"dull\",\n \"shiny\",\n \"dull\",\n \"dull\",\n \"shiny\",\n \"shiny\",\n \"dull\",\n \"shiny\",\n \"shiny\",\n \"shiny\",\n ],\n \"D\": np.random.randn(11),\n \"E\": np.random.randn(11),\n \"F\": np.random.randn(11),\n }\n )\n\n def bad(x):\n assert len(x.values.base) > 0\n return \"foo\"\n\n result = data.groupby([\"A\", \"B\"]).agg(bad)\n expected = data.groupby([\"A\", \"B\"]).agg(lambda x: \"foo\")\n tm.assert_frame_equal(result, expected)\n\n\ndef test_agg_consistency():\n # agg with ([]) and () not consistent\n # GH 6715\n def P1(a):\n try:\n return np.percentile(a.dropna(), q=1)\n except Exception:\n return np.nan\n\n df = DataFrame(\n {\n \"col1\": [1, 2, 3, 4],\n \"col2\": [10, 25, 26, 31],\n \"date\": [\n dt.date(2013, 2, 10),\n dt.date(2013, 2, 10),\n dt.date(2013, 2, 11),\n dt.date(2013, 2, 11),\n ],\n }\n )\n\n g = df.groupby(\"date\")\n\n expected = g.agg([P1])\n expected.columns = expected.columns.levels[0]\n\n result = g.agg(P1)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_agg_callables():\n # GH 7929\n df = DataFrame({\"foo\": [1, 2], \"bar\": [3, 4]}).astype(np.int64)\n\n class fn_class:\n def __call__(self, x):\n return sum(x)\n\n equiv_callables = [\n sum,\n np.sum,\n lambda x: sum(x),\n lambda x: x.sum(),\n partial(sum),\n fn_class(),\n ]\n\n expected = df.groupby(\"foo\").agg(sum)\n for ecall in equiv_callables:\n result = df.groupby(\"foo\").agg(ecall)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_agg_over_numpy_arrays():\n # GH 3788\n df = pd.DataFrame(\n [\n [1, np.array([10, 20, 30])],\n [1, np.array([40, 50, 60])],\n [2, np.array([20, 30, 40])],\n ],\n columns=[\"category\", \"arraydata\"],\n )\n result = df.groupby(\"category\").agg(sum)\n\n expected_data = [[np.array([50, 70, 90])], [np.array([20, 30, 40])]]\n expected_index = pd.Index([1, 2], name=\"category\")\n expected_column = [\"arraydata\"]\n expected = pd.DataFrame(\n expected_data, index=expected_index, columns=expected_column\n )\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_agg_timezone_round_trip():\n # GH 15426\n ts = pd.Timestamp(\"2016-01-01 12:00:00\", tz=\"US/Pacific\")\n df = pd.DataFrame(\n {\"a\": 1, \"b\": [ts + dt.timedelta(minutes=nn) for nn in range(10)]}\n )\n\n result1 = df.groupby(\"a\")[\"b\"].agg(np.min).iloc[0]\n result2 = df.groupby(\"a\")[\"b\"].agg(lambda x: np.min(x)).iloc[0]\n result3 = df.groupby(\"a\")[\"b\"].min().iloc[0]\n\n assert result1 == ts\n assert result2 == ts\n assert result3 == ts\n\n dates = [\n pd.Timestamp(\"2016-01-0%d 12:00:00\" % i, tz=\"US/Pacific\") for i in range(1, 5)\n ]\n df = pd.DataFrame({\"A\": [\"a\", \"b\"] * 2, \"B\": dates})\n grouped = df.groupby(\"A\")\n\n ts = df[\"B\"].iloc[0]\n assert ts == grouped.nth(0)[\"B\"].iloc[0]\n assert ts == grouped.head(1)[\"B\"].iloc[0]\n assert ts == grouped.first()[\"B\"].iloc[0]\n\n # GH#27110 applying iloc should return a DataFrame\n assert ts == grouped.apply(lambda x: x.iloc[0]).iloc[0, 0]\n\n ts = df[\"B\"].iloc[2]\n assert ts == grouped.last()[\"B\"].iloc[0]\n\n # GH#27110 applying iloc should return a DataFrame\n assert ts == grouped.apply(lambda x: x.iloc[-1]).iloc[0, 0]\n\n\ndef test_sum_uint64_overflow():\n # see gh-14758\n # Convert to uint64 and don't overflow\n df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], dtype=object)\n df = df + 9223372036854775807\n\n index = pd.Index(\n [9223372036854775808, 9223372036854775810, 9223372036854775812], dtype=np.uint64\n )\n expected = pd.DataFrame(\n {1: [9223372036854775809, 9223372036854775811, 9223372036854775813]},\n index=index,\n )\n\n expected.index.name = 0\n result = df.groupby(0).sum()\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\n \"structure, expected\",\n [\n (tuple, pd.DataFrame({\"C\": {(1, 1): (1, 1, 1), (3, 4): (3, 4, 4)}})),\n (list, pd.DataFrame({\"C\": {(1, 1): [1, 1, 1], (3, 4): [3, 4, 4]}})),\n (\n lambda x: tuple(x),\n pd.DataFrame({\"C\": {(1, 1): (1, 1, 1), (3, 4): (3, 4, 4)}}),\n ),\n (\n lambda x: list(x),\n pd.DataFrame({\"C\": {(1, 1): [1, 1, 1], (3, 4): [3, 4, 4]}}),\n ),\n ],\n)\ndef test_agg_structs_dataframe(structure, expected):\n df = pd.DataFrame(\n {\"A\": [1, 1, 1, 3, 3, 3], \"B\": [1, 1, 1, 4, 4, 4], \"C\": [1, 1, 1, 3, 4, 4]}\n )\n\n result = df.groupby([\"A\", \"B\"]).aggregate(structure)\n expected.index.names = [\"A\", \"B\"]\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\n \"structure, expected\",\n [\n (tuple, pd.Series([(1, 1, 1), (3, 4, 4)], index=[1, 3], name=\"C\")),\n (list, pd.Series([[1, 1, 1], [3, 4, 4]], index=[1, 3], name=\"C\")),\n (lambda x: tuple(x), pd.Series([(1, 1, 1), (3, 4, 4)], index=[1, 3], name=\"C\")),\n (lambda x: list(x), pd.Series([[1, 1, 1], [3, 4, 4]], index=[1, 3], name=\"C\")),\n ],\n)\ndef test_agg_structs_series(structure, expected):\n # Issue #18079\n df = pd.DataFrame(\n {\"A\": [1, 1, 1, 3, 3, 3], \"B\": [1, 1, 1, 4, 4, 4], \"C\": [1, 1, 1, 3, 4, 4]}\n )\n\n result = df.groupby(\"A\")[\"C\"].aggregate(structure)\n expected.index.name = \"A\"\n tm.assert_series_equal(result, expected)\n\n\ndef test_agg_category_nansum(observed):\n categories = [\"a\", \"b\", \"c\"]\n df = pd.DataFrame(\n {\"A\": pd.Categorical([\"a\", \"a\", \"b\"], categories=categories), \"B\": [1, 2, 3]}\n )\n result = df.groupby(\"A\", observed=observed).B.agg(np.nansum)\n expected = pd.Series(\n [3, 3, 0],\n index=pd.CategoricalIndex([\"a\", \"b\", \"c\"], categories=categories, name=\"A\"),\n name=\"B\",\n )\n if observed:\n expected = expected[expected != 0]\n tm.assert_series_equal(result, expected)\n\n\ndef test_agg_list_like_func():\n # GH 18473\n df = pd.DataFrame(\n {\"A\": [str(x) for x in range(3)], \"B\": [str(x) for x in range(3)]}\n )\n grouped = df.groupby(\"A\", as_index=False, sort=False)\n result = grouped.agg({\"B\": lambda x: list(x)})\n expected = pd.DataFrame(\n {\"A\": [str(x) for x in range(3)], \"B\": [[str(x)] for x in range(3)]}\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_agg_lambda_with_timezone():\n # GH 23683\n df = pd.DataFrame(\n {\n \"tag\": [1, 1],\n \"date\": [\n pd.Timestamp(\"2018-01-01\", tz=\"UTC\"),\n pd.Timestamp(\"2018-01-02\", tz=\"UTC\"),\n ],\n }\n )\n result = df.groupby(\"tag\").agg({\"date\": lambda e: e.head(1)})\n expected = pd.DataFrame(\n [pd.Timestamp(\"2018-01-01\", tz=\"UTC\")],\n index=pd.Index([1], name=\"tag\"),\n columns=[\"date\"],\n )\n tm.assert_frame_equal(result, expected)\n", "from datetime import datetime, time\nfrom itertools import product\n\nimport numpy as np\nimport pytest\nimport pytz\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n DatetimeIndex,\n Index,\n MultiIndex,\n Series,\n Timestamp,\n date_range,\n period_range,\n to_datetime,\n)\nfrom pandas.tests.frame.common import TestData\nimport pandas.util.testing as tm\nfrom pandas.util.testing import (\n assert_frame_equal,\n assert_index_equal,\n assert_series_equal,\n)\n\nimport pandas.tseries.offsets as offsets\n\n\[email protected](params=product([True, False], [True, False]))\ndef close_open_fixture(request):\n return request.param\n\n\nclass TestDataFrameTimeSeriesMethods(TestData):\n def test_diff(self):\n the_diff = self.tsframe.diff(1)\n\n assert_series_equal(\n the_diff[\"A\"], self.tsframe[\"A\"] - self.tsframe[\"A\"].shift(1)\n )\n\n # int dtype\n a = 10000000000000000\n b = a + 1\n s = Series([a, b])\n\n rs = DataFrame({\"s\": s}).diff()\n assert rs.s[1] == 1\n\n # mixed numeric\n tf = self.tsframe.astype(\"float32\")\n the_diff = tf.diff(1)\n assert_series_equal(the_diff[\"A\"], tf[\"A\"] - tf[\"A\"].shift(1))\n\n # issue 10907\n df = pd.DataFrame({\"y\": pd.Series([2]), \"z\": pd.Series([3])})\n df.insert(0, \"x\", 1)\n result = df.diff(axis=1)\n expected = pd.DataFrame(\n {\"x\": np.nan, \"y\": pd.Series(1), \"z\": pd.Series(1)}\n ).astype(\"float64\")\n assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"tz\", [None, \"UTC\"])\n def test_diff_datetime_axis0(self, tz):\n # GH 18578\n df = DataFrame(\n {\n 0: date_range(\"2010\", freq=\"D\", periods=2, tz=tz),\n 1: date_range(\"2010\", freq=\"D\", periods=2, tz=tz),\n }\n )\n\n result = df.diff(axis=0)\n expected = DataFrame(\n {\n 0: pd.TimedeltaIndex([\"NaT\", \"1 days\"]),\n 1: pd.TimedeltaIndex([\"NaT\", \"1 days\"]),\n }\n )\n assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\"tz\", [None, \"UTC\"])\n def test_diff_datetime_axis1(self, tz):\n # GH 18578\n df = DataFrame(\n {\n 0: date_range(\"2010\", freq=\"D\", periods=2, tz=tz),\n 1: date_range(\"2010\", freq=\"D\", periods=2, tz=tz),\n }\n )\n if tz is None:\n result = df.diff(axis=1)\n expected = DataFrame(\n {\n 0: pd.TimedeltaIndex([\"NaT\", \"NaT\"]),\n 1: pd.TimedeltaIndex([\"0 days\", \"0 days\"]),\n }\n )\n assert_frame_equal(result, expected)\n else:\n with pytest.raises(NotImplementedError):\n result = df.diff(axis=1)\n\n def test_diff_timedelta(self):\n # GH 4533\n df = DataFrame(\n dict(\n time=[Timestamp(\"20130101 9:01\"), Timestamp(\"20130101 9:02\")],\n value=[1.0, 2.0],\n )\n )\n\n res = df.diff()\n exp = DataFrame(\n [[pd.NaT, np.nan], [pd.Timedelta(\"00:01:00\"), 1]], columns=[\"time\", \"value\"]\n )\n assert_frame_equal(res, exp)\n\n def test_diff_mixed_dtype(self):\n df = DataFrame(np.random.randn(5, 3))\n df[\"A\"] = np.array([1, 2, 3, 4, 5], dtype=object)\n\n result = df.diff()\n assert result[0].dtype == np.float64\n\n def test_diff_neg_n(self):\n rs = self.tsframe.diff(-1)\n xp = self.tsframe - self.tsframe.shift(-1)\n assert_frame_equal(rs, xp)\n\n def test_diff_float_n(self):\n rs = self.tsframe.diff(1.0)\n xp = self.tsframe.diff(1)\n assert_frame_equal(rs, xp)\n\n def test_diff_axis(self):\n # GH 9727\n df = DataFrame([[1.0, 2.0], [3.0, 4.0]])\n assert_frame_equal(df.diff(axis=1), DataFrame([[np.nan, 1.0], [np.nan, 1.0]]))\n assert_frame_equal(df.diff(axis=0), DataFrame([[np.nan, np.nan], [2.0, 2.0]]))\n\n def test_pct_change(self):\n rs = self.tsframe.pct_change(fill_method=None)\n assert_frame_equal(rs, self.tsframe / self.tsframe.shift(1) - 1)\n\n rs = self.tsframe.pct_change(2)\n filled = self.tsframe.fillna(method=\"pad\")\n assert_frame_equal(rs, filled / filled.shift(2) - 1)\n\n rs = self.tsframe.pct_change(fill_method=\"bfill\", limit=1)\n filled = self.tsframe.fillna(method=\"bfill\", limit=1)\n assert_frame_equal(rs, filled / filled.shift(1) - 1)\n\n rs = self.tsframe.pct_change(freq=\"5D\")\n filled = self.tsframe.fillna(method=\"pad\")\n assert_frame_equal(\n rs, (filled / filled.shift(freq=\"5D\") - 1).reindex_like(filled)\n )\n\n def test_pct_change_shift_over_nas(self):\n s = Series([1.0, 1.5, np.nan, 2.5, 3.0])\n\n df = DataFrame({\"a\": s, \"b\": s})\n\n chg = df.pct_change()\n expected = Series([np.nan, 0.5, 0.0, 2.5 / 1.5 - 1, 0.2])\n edf = DataFrame({\"a\": expected, \"b\": expected})\n assert_frame_equal(chg, edf)\n\n @pytest.mark.parametrize(\n \"freq, periods, fill_method, limit\",\n [\n (\"5B\", 5, None, None),\n (\"3B\", 3, None, None),\n (\"3B\", 3, \"bfill\", None),\n (\"7B\", 7, \"pad\", 1),\n (\"7B\", 7, \"bfill\", 3),\n (\"14B\", 14, None, None),\n ],\n )\n def test_pct_change_periods_freq(self, freq, periods, fill_method, limit):\n # GH 7292\n rs_freq = self.tsframe.pct_change(\n freq=freq, fill_method=fill_method, limit=limit\n )\n rs_periods = self.tsframe.pct_change(\n periods, fill_method=fill_method, limit=limit\n )\n assert_frame_equal(rs_freq, rs_periods)\n\n empty_ts = DataFrame(index=self.tsframe.index, columns=self.tsframe.columns)\n rs_freq = empty_ts.pct_change(freq=freq, fill_method=fill_method, limit=limit)\n rs_periods = empty_ts.pct_change(periods, fill_method=fill_method, limit=limit)\n assert_frame_equal(rs_freq, rs_periods)\n\n def test_frame_ctor_datetime64_column(self):\n rng = date_range(\"1/1/2000 00:00:00\", \"1/1/2000 1:59:50\", freq=\"10s\")\n dates = np.asarray(rng)\n\n df = DataFrame({\"A\": np.random.randn(len(rng)), \"B\": dates})\n assert np.issubdtype(df[\"B\"].dtype, np.dtype(\"M8[ns]\"))\n\n def test_frame_append_datetime64_column(self):\n rng = date_range(\"1/1/2000 00:00:00\", \"1/1/2000 1:59:50\", freq=\"10s\")\n df = DataFrame(index=np.arange(len(rng)))\n\n df[\"A\"] = rng\n assert np.issubdtype(df[\"A\"].dtype, np.dtype(\"M8[ns]\"))\n\n def test_frame_datetime64_pre1900_repr(self):\n df = DataFrame({\"year\": date_range(\"1/1/1700\", periods=50, freq=\"A-DEC\")})\n # it works!\n repr(df)\n\n def test_frame_append_datetime64_col_other_units(self):\n n = 100\n\n units = [\"h\", \"m\", \"s\", \"ms\", \"D\", \"M\", \"Y\"]\n\n ns_dtype = np.dtype(\"M8[ns]\")\n\n for unit in units:\n dtype = np.dtype(\"M8[%s]\" % unit)\n vals = np.arange(n, dtype=np.int64).view(dtype)\n\n df = DataFrame({\"ints\": np.arange(n)}, index=np.arange(n))\n df[unit] = vals\n\n ex_vals = to_datetime(vals.astype(\"O\")).values\n\n assert df[unit].dtype == ns_dtype\n assert (df[unit].values == ex_vals).all()\n\n # Test insertion into existing datetime64 column\n df = DataFrame({\"ints\": np.arange(n)}, index=np.arange(n))\n df[\"dates\"] = np.arange(n, dtype=np.int64).view(ns_dtype)\n\n for unit in units:\n dtype = np.dtype(\"M8[%s]\" % unit)\n vals = np.arange(n, dtype=np.int64).view(dtype)\n\n tmp = df.copy()\n\n tmp[\"dates\"] = vals\n ex_vals = to_datetime(vals.astype(\"O\")).values\n\n assert (tmp[\"dates\"].values == ex_vals).all()\n\n def test_shift(self):\n # naive shift\n shiftedFrame = self.tsframe.shift(5)\n tm.assert_index_equal(shiftedFrame.index, self.tsframe.index)\n\n shiftedSeries = self.tsframe[\"A\"].shift(5)\n assert_series_equal(shiftedFrame[\"A\"], shiftedSeries)\n\n shiftedFrame = self.tsframe.shift(-5)\n tm.assert_index_equal(shiftedFrame.index, self.tsframe.index)\n\n shiftedSeries = self.tsframe[\"A\"].shift(-5)\n assert_series_equal(shiftedFrame[\"A\"], shiftedSeries)\n\n # shift by 0\n unshifted = self.tsframe.shift(0)\n assert_frame_equal(unshifted, self.tsframe)\n\n # shift by DateOffset\n shiftedFrame = self.tsframe.shift(5, freq=offsets.BDay())\n assert len(shiftedFrame) == len(self.tsframe)\n\n shiftedFrame2 = self.tsframe.shift(5, freq=\"B\")\n assert_frame_equal(shiftedFrame, shiftedFrame2)\n\n d = self.tsframe.index[0]\n shifted_d = d + offsets.BDay(5)\n assert_series_equal(\n self.tsframe.xs(d), shiftedFrame.xs(shifted_d), check_names=False\n )\n\n # shift int frame\n int_shifted = self.intframe.shift(1) # noqa\n\n # Shifting with PeriodIndex\n ps = tm.makePeriodFrame()\n shifted = ps.shift(1)\n unshifted = shifted.shift(-1)\n tm.assert_index_equal(shifted.index, ps.index)\n tm.assert_index_equal(unshifted.index, ps.index)\n tm.assert_numpy_array_equal(\n unshifted.iloc[:, 0].dropna().values, ps.iloc[:-1, 0].values\n )\n\n shifted2 = ps.shift(1, \"B\")\n shifted3 = ps.shift(1, offsets.BDay())\n assert_frame_equal(shifted2, shifted3)\n assert_frame_equal(ps, shifted2.shift(-1, \"B\"))\n\n msg = \"does not match PeriodIndex freq\"\n with pytest.raises(ValueError, match=msg):\n ps.shift(freq=\"D\")\n\n # shift other axis\n # GH 6371\n df = DataFrame(np.random.rand(10, 5))\n expected = pd.concat(\n [DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]],\n ignore_index=True,\n axis=1,\n )\n result = df.shift(1, axis=1)\n assert_frame_equal(result, expected)\n\n # shift named axis\n df = DataFrame(np.random.rand(10, 5))\n expected = pd.concat(\n [DataFrame(np.nan, index=df.index, columns=[0]), df.iloc[:, 0:-1]],\n ignore_index=True,\n axis=1,\n )\n result = df.shift(1, axis=\"columns\")\n assert_frame_equal(result, expected)\n\n def test_shift_bool(self):\n df = DataFrame({\"high\": [True, False], \"low\": [False, False]})\n rs = df.shift(1)\n xp = DataFrame(\n np.array([[np.nan, np.nan], [True, False]], dtype=object),\n columns=[\"high\", \"low\"],\n )\n assert_frame_equal(rs, xp)\n\n def test_shift_categorical(self):\n # GH 9416\n s1 = pd.Series([\"a\", \"b\", \"c\"], dtype=\"category\")\n s2 = pd.Series([\"A\", \"B\", \"C\"], dtype=\"category\")\n df = DataFrame({\"one\": s1, \"two\": s2})\n rs = df.shift(1)\n xp = DataFrame({\"one\": s1.shift(1), \"two\": s2.shift(1)})\n assert_frame_equal(rs, xp)\n\n def test_shift_fill_value(self):\n # GH #24128\n df = DataFrame(\n [1, 2, 3, 4, 5], index=date_range(\"1/1/2000\", periods=5, freq=\"H\")\n )\n exp = DataFrame(\n [0, 1, 2, 3, 4], index=date_range(\"1/1/2000\", periods=5, freq=\"H\")\n )\n result = df.shift(1, fill_value=0)\n assert_frame_equal(result, exp)\n\n exp = DataFrame(\n [0, 0, 1, 2, 3], index=date_range(\"1/1/2000\", periods=5, freq=\"H\")\n )\n result = df.shift(2, fill_value=0)\n assert_frame_equal(result, exp)\n\n def test_shift_empty(self):\n # Regression test for #8019\n df = DataFrame({\"foo\": []})\n rs = df.shift(-1)\n\n assert_frame_equal(df, rs)\n\n def test_shift_duplicate_columns(self):\n # GH 9092; verify that position-based shifting works\n # in the presence of duplicate columns\n column_lists = [list(range(5)), [1] * 5, [1, 1, 2, 2, 1]]\n data = np.random.randn(20, 5)\n\n shifted = []\n for columns in column_lists:\n df = pd.DataFrame(data.copy(), columns=columns)\n for s in range(5):\n df.iloc[:, s] = df.iloc[:, s].shift(s + 1)\n df.columns = range(5)\n shifted.append(df)\n\n # sanity check the base case\n nulls = shifted[0].isna().sum()\n assert_series_equal(nulls, Series(range(1, 6), dtype=\"int64\"))\n\n # check all answers are the same\n assert_frame_equal(shifted[0], shifted[1])\n assert_frame_equal(shifted[0], shifted[2])\n\n def test_tshift(self):\n # PeriodIndex\n ps = tm.makePeriodFrame()\n shifted = ps.tshift(1)\n unshifted = shifted.tshift(-1)\n\n assert_frame_equal(unshifted, ps)\n\n shifted2 = ps.tshift(freq=\"B\")\n assert_frame_equal(shifted, shifted2)\n\n shifted3 = ps.tshift(freq=offsets.BDay())\n assert_frame_equal(shifted, shifted3)\n\n with pytest.raises(ValueError, match=\"does not match\"):\n ps.tshift(freq=\"M\")\n\n # DatetimeIndex\n shifted = self.tsframe.tshift(1)\n unshifted = shifted.tshift(-1)\n\n assert_frame_equal(self.tsframe, unshifted)\n\n shifted2 = self.tsframe.tshift(freq=self.tsframe.index.freq)\n assert_frame_equal(shifted, shifted2)\n\n inferred_ts = DataFrame(\n self.tsframe.values,\n Index(np.asarray(self.tsframe.index)),\n columns=self.tsframe.columns,\n )\n shifted = inferred_ts.tshift(1)\n unshifted = shifted.tshift(-1)\n assert_frame_equal(shifted, self.tsframe.tshift(1))\n assert_frame_equal(unshifted, inferred_ts)\n\n no_freq = self.tsframe.iloc[[0, 5, 7], :]\n msg = \"Freq was not given and was not set in the index\"\n with pytest.raises(ValueError, match=msg):\n no_freq.tshift()\n\n def test_truncate(self):\n ts = self.tsframe[::3]\n\n start, end = self.tsframe.index[3], self.tsframe.index[6]\n\n start_missing = self.tsframe.index[2]\n end_missing = self.tsframe.index[7]\n\n # neither specified\n truncated = ts.truncate()\n assert_frame_equal(truncated, ts)\n\n # both specified\n expected = ts[1:3]\n\n truncated = ts.truncate(start, end)\n assert_frame_equal(truncated, expected)\n\n truncated = ts.truncate(start_missing, end_missing)\n assert_frame_equal(truncated, expected)\n\n # start specified\n expected = ts[1:]\n\n truncated = ts.truncate(before=start)\n assert_frame_equal(truncated, expected)\n\n truncated = ts.truncate(before=start_missing)\n assert_frame_equal(truncated, expected)\n\n # end specified\n expected = ts[:3]\n\n truncated = ts.truncate(after=end)\n assert_frame_equal(truncated, expected)\n\n truncated = ts.truncate(after=end_missing)\n assert_frame_equal(truncated, expected)\n\n msg = \"Truncate: 2000-01-06 00:00:00 must be after 2000-02-04 00:00:00\"\n with pytest.raises(ValueError, match=msg):\n ts.truncate(\n before=ts.index[-1] - ts.index.freq, after=ts.index[0] + ts.index.freq\n )\n\n def test_truncate_copy(self):\n index = self.tsframe.index\n truncated = self.tsframe.truncate(index[5], index[10])\n truncated.values[:] = 5.0\n assert not (self.tsframe.values[5:11] == 5).any()\n\n def test_truncate_nonsortedindex(self):\n # GH 17935\n\n df = pd.DataFrame({\"A\": [\"a\", \"b\", \"c\", \"d\", \"e\"]}, index=[5, 3, 2, 9, 0])\n msg = \"truncate requires a sorted index\"\n with pytest.raises(ValueError, match=msg):\n df.truncate(before=3, after=9)\n\n rng = pd.date_range(\"2011-01-01\", \"2012-01-01\", freq=\"W\")\n ts = pd.DataFrame(\n {\"A\": np.random.randn(len(rng)), \"B\": np.random.randn(len(rng))}, index=rng\n )\n msg = \"truncate requires a sorted index\"\n with pytest.raises(ValueError, match=msg):\n ts.sort_values(\"A\", ascending=False).truncate(\n before=\"2011-11\", after=\"2011-12\"\n )\n\n df = pd.DataFrame(\n {\n 3: np.random.randn(5),\n 20: np.random.randn(5),\n 2: np.random.randn(5),\n 0: np.random.randn(5),\n },\n columns=[3, 20, 2, 0],\n )\n msg = \"truncate requires a sorted index\"\n with pytest.raises(ValueError, match=msg):\n df.truncate(before=2, after=20, axis=1)\n\n def test_asfreq(self):\n offset_monthly = self.tsframe.asfreq(offsets.BMonthEnd())\n rule_monthly = self.tsframe.asfreq(\"BM\")\n\n tm.assert_almost_equal(offset_monthly[\"A\"], rule_monthly[\"A\"])\n\n filled = rule_monthly.asfreq(\"B\", method=\"pad\") # noqa\n # TODO: actually check that this worked.\n\n # don't forget!\n filled_dep = rule_monthly.asfreq(\"B\", method=\"pad\") # noqa\n\n # test does not blow up on length-0 DataFrame\n zero_length = self.tsframe.reindex([])\n result = zero_length.asfreq(\"BM\")\n assert result is not zero_length\n\n def test_asfreq_datetimeindex(self):\n df = DataFrame(\n {\"A\": [1, 2, 3]},\n index=[datetime(2011, 11, 1), datetime(2011, 11, 2), datetime(2011, 11, 3)],\n )\n df = df.asfreq(\"B\")\n assert isinstance(df.index, DatetimeIndex)\n\n ts = df[\"A\"].asfreq(\"B\")\n assert isinstance(ts.index, DatetimeIndex)\n\n def test_asfreq_fillvalue(self):\n # test for fill value during upsampling, related to issue 3715\n\n # setup\n rng = pd.date_range(\"1/1/2016\", periods=10, freq=\"2S\")\n ts = pd.Series(np.arange(len(rng)), index=rng)\n df = pd.DataFrame({\"one\": ts})\n\n # insert pre-existing missing value\n df.loc[\"2016-01-01 00:00:08\", \"one\"] = None\n\n actual_df = df.asfreq(freq=\"1S\", fill_value=9.0)\n expected_df = df.asfreq(freq=\"1S\").fillna(9.0)\n expected_df.loc[\"2016-01-01 00:00:08\", \"one\"] = None\n assert_frame_equal(expected_df, actual_df)\n\n expected_series = ts.asfreq(freq=\"1S\").fillna(9.0)\n actual_series = ts.asfreq(freq=\"1S\", fill_value=9.0)\n assert_series_equal(expected_series, actual_series)\n\n @pytest.mark.parametrize(\n \"data,idx,expected_first,expected_last\",\n [\n ({\"A\": [1, 2, 3]}, [1, 1, 2], 1, 2),\n ({\"A\": [1, 2, 3]}, [1, 2, 2], 1, 2),\n ({\"A\": [1, 2, 3, 4]}, [\"d\", \"d\", \"d\", \"d\"], \"d\", \"d\"),\n ({\"A\": [1, np.nan, 3]}, [1, 1, 2], 1, 2),\n ({\"A\": [np.nan, np.nan, 3]}, [1, 1, 2], 2, 2),\n ({\"A\": [1, np.nan, 3]}, [1, 2, 2], 1, 2),\n ],\n )\n def test_first_last_valid(self, data, idx, expected_first, expected_last):\n N = len(self.frame.index)\n mat = np.random.randn(N)\n mat[:5] = np.nan\n mat[-5:] = np.nan\n\n frame = DataFrame({\"foo\": mat}, index=self.frame.index)\n index = frame.first_valid_index()\n\n assert index == frame.index[5]\n\n index = frame.last_valid_index()\n assert index == frame.index[-6]\n\n # GH12800\n empty = DataFrame()\n assert empty.last_valid_index() is None\n assert empty.first_valid_index() is None\n\n # GH17400: no valid entries\n frame[:] = np.nan\n assert frame.last_valid_index() is None\n assert frame.first_valid_index() is None\n\n # GH20499: its preserves freq with holes\n frame.index = date_range(\"20110101\", periods=N, freq=\"B\")\n frame.iloc[1] = 1\n frame.iloc[-2] = 1\n assert frame.first_valid_index() == frame.index[1]\n assert frame.last_valid_index() == frame.index[-2]\n assert frame.first_valid_index().freq == frame.index.freq\n assert frame.last_valid_index().freq == frame.index.freq\n\n # GH 21441\n df = DataFrame(data, index=idx)\n assert expected_first == df.first_valid_index()\n assert expected_last == df.last_valid_index()\n\n def test_first_subset(self):\n ts = tm.makeTimeDataFrame(freq=\"12h\")\n result = ts.first(\"10d\")\n assert len(result) == 20\n\n ts = tm.makeTimeDataFrame(freq=\"D\")\n result = ts.first(\"10d\")\n assert len(result) == 10\n\n result = ts.first(\"3M\")\n expected = ts[:\"3/31/2000\"]\n assert_frame_equal(result, expected)\n\n result = ts.first(\"21D\")\n expected = ts[:21]\n assert_frame_equal(result, expected)\n\n result = ts[:0].first(\"3M\")\n assert_frame_equal(result, ts[:0])\n\n def test_first_raises(self):\n # GH20725\n df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])\n with pytest.raises(TypeError): # index is not a DatetimeIndex\n df.first(\"1D\")\n\n def test_last_subset(self):\n ts = tm.makeTimeDataFrame(freq=\"12h\")\n result = ts.last(\"10d\")\n assert len(result) == 20\n\n ts = tm.makeTimeDataFrame(nper=30, freq=\"D\")\n result = ts.last(\"10d\")\n assert len(result) == 10\n\n result = ts.last(\"21D\")\n expected = ts[\"2000-01-10\":]\n assert_frame_equal(result, expected)\n\n result = ts.last(\"21D\")\n expected = ts[-21:]\n assert_frame_equal(result, expected)\n\n result = ts[:0].last(\"3M\")\n assert_frame_equal(result, ts[:0])\n\n def test_last_raises(self):\n # GH20725\n df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])\n with pytest.raises(TypeError): # index is not a DatetimeIndex\n df.last(\"1D\")\n\n def test_at_time(self):\n rng = date_range(\"1/1/2000\", \"1/5/2000\", freq=\"5min\")\n ts = DataFrame(np.random.randn(len(rng), 2), index=rng)\n rs = ts.at_time(rng[1])\n assert (rs.index.hour == rng[1].hour).all()\n assert (rs.index.minute == rng[1].minute).all()\n assert (rs.index.second == rng[1].second).all()\n\n result = ts.at_time(\"9:30\")\n expected = ts.at_time(time(9, 30))\n assert_frame_equal(result, expected)\n\n result = ts.loc[time(9, 30)]\n expected = ts.loc[(rng.hour == 9) & (rng.minute == 30)]\n\n assert_frame_equal(result, expected)\n\n # midnight, everything\n rng = date_range(\"1/1/2000\", \"1/31/2000\")\n ts = DataFrame(np.random.randn(len(rng), 3), index=rng)\n\n result = ts.at_time(time(0, 0))\n assert_frame_equal(result, ts)\n\n # time doesn't exist\n rng = date_range(\"1/1/2012\", freq=\"23Min\", periods=384)\n ts = DataFrame(np.random.randn(len(rng), 2), rng)\n rs = ts.at_time(\"16:00\")\n assert len(rs) == 0\n\n @pytest.mark.parametrize(\n \"hour\", [\"1:00\", \"1:00AM\", time(1), time(1, tzinfo=pytz.UTC)]\n )\n def test_at_time_errors(self, hour):\n # GH 24043\n dti = pd.date_range(\"2018\", periods=3, freq=\"H\")\n df = pd.DataFrame(list(range(len(dti))), index=dti)\n if getattr(hour, \"tzinfo\", None) is None:\n result = df.at_time(hour)\n expected = df.iloc[1:2]\n tm.assert_frame_equal(result, expected)\n else:\n with pytest.raises(ValueError, match=\"Index must be timezone\"):\n df.at_time(hour)\n\n def test_at_time_tz(self):\n # GH 24043\n dti = pd.date_range(\"2018\", periods=3, freq=\"H\", tz=\"US/Pacific\")\n df = pd.DataFrame(list(range(len(dti))), index=dti)\n result = df.at_time(time(4, tzinfo=pytz.timezone(\"US/Eastern\")))\n expected = df.iloc[1:2]\n tm.assert_frame_equal(result, expected)\n\n def test_at_time_raises(self):\n # GH20725\n df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])\n with pytest.raises(TypeError): # index is not a DatetimeIndex\n df.at_time(\"00:00\")\n\n @pytest.mark.parametrize(\"axis\", [\"index\", \"columns\", 0, 1])\n def test_at_time_axis(self, axis):\n # issue 8839\n rng = date_range(\"1/1/2000\", \"1/5/2000\", freq=\"5min\")\n ts = DataFrame(np.random.randn(len(rng), len(rng)))\n ts.index, ts.columns = rng, rng\n\n indices = rng[(rng.hour == 9) & (rng.minute == 30) & (rng.second == 0)]\n\n if axis in [\"index\", 0]:\n expected = ts.loc[indices, :]\n elif axis in [\"columns\", 1]:\n expected = ts.loc[:, indices]\n\n result = ts.at_time(\"9:30\", axis=axis)\n assert_frame_equal(result, expected)\n\n def test_between_time(self, close_open_fixture):\n rng = date_range(\"1/1/2000\", \"1/5/2000\", freq=\"5min\")\n ts = DataFrame(np.random.randn(len(rng), 2), index=rng)\n stime = time(0, 0)\n etime = time(1, 0)\n inc_start, inc_end = close_open_fixture\n\n filtered = ts.between_time(stime, etime, inc_start, inc_end)\n exp_len = 13 * 4 + 1\n if not inc_start:\n exp_len -= 5\n if not inc_end:\n exp_len -= 4\n\n assert len(filtered) == exp_len\n for rs in filtered.index:\n t = rs.time()\n if inc_start:\n assert t >= stime\n else:\n assert t > stime\n\n if inc_end:\n assert t <= etime\n else:\n assert t < etime\n\n result = ts.between_time(\"00:00\", \"01:00\")\n expected = ts.between_time(stime, etime)\n assert_frame_equal(result, expected)\n\n # across midnight\n rng = date_range(\"1/1/2000\", \"1/5/2000\", freq=\"5min\")\n ts = DataFrame(np.random.randn(len(rng), 2), index=rng)\n stime = time(22, 0)\n etime = time(9, 0)\n\n filtered = ts.between_time(stime, etime, inc_start, inc_end)\n exp_len = (12 * 11 + 1) * 4 + 1\n if not inc_start:\n exp_len -= 4\n if not inc_end:\n exp_len -= 4\n\n assert len(filtered) == exp_len\n for rs in filtered.index:\n t = rs.time()\n if inc_start:\n assert (t >= stime) or (t <= etime)\n else:\n assert (t > stime) or (t <= etime)\n\n if inc_end:\n assert (t <= etime) or (t >= stime)\n else:\n assert (t < etime) or (t >= stime)\n\n def test_between_time_raises(self):\n # GH20725\n df = pd.DataFrame([[1, 2, 3], [4, 5, 6]])\n with pytest.raises(TypeError): # index is not a DatetimeIndex\n df.between_time(start_time=\"00:00\", end_time=\"12:00\")\n\n def test_between_time_axis(self, axis):\n # issue 8839\n rng = date_range(\"1/1/2000\", periods=100, freq=\"10min\")\n ts = DataFrame(np.random.randn(len(rng), len(rng)))\n stime, etime = (\"08:00:00\", \"09:00:00\")\n exp_len = 7\n\n if axis in [\"index\", 0]:\n ts.index = rng\n assert len(ts.between_time(stime, etime)) == exp_len\n assert len(ts.between_time(stime, etime, axis=0)) == exp_len\n\n if axis in [\"columns\", 1]:\n ts.columns = rng\n selected = ts.between_time(stime, etime, axis=1).columns\n assert len(selected) == exp_len\n\n def test_between_time_axis_raises(self, axis):\n # issue 8839\n rng = date_range(\"1/1/2000\", periods=100, freq=\"10min\")\n mask = np.arange(0, len(rng))\n rand_data = np.random.randn(len(rng), len(rng))\n ts = DataFrame(rand_data, index=rng, columns=rng)\n stime, etime = (\"08:00:00\", \"09:00:00\")\n\n msg = \"Index must be DatetimeIndex\"\n if axis in [\"columns\", 1]:\n ts.index = mask\n with pytest.raises(TypeError, match=msg):\n ts.between_time(stime, etime)\n with pytest.raises(TypeError, match=msg):\n ts.between_time(stime, etime, axis=0)\n\n if axis in [\"index\", 0]:\n ts.columns = mask\n with pytest.raises(TypeError, match=msg):\n ts.between_time(stime, etime, axis=1)\n\n def test_operation_on_NaT(self):\n # Both NaT and Timestamp are in DataFrame.\n df = pd.DataFrame({\"foo\": [pd.NaT, pd.NaT, pd.Timestamp(\"2012-05-01\")]})\n\n res = df.min()\n exp = pd.Series([pd.Timestamp(\"2012-05-01\")], index=[\"foo\"])\n tm.assert_series_equal(res, exp)\n\n res = df.max()\n exp = pd.Series([pd.Timestamp(\"2012-05-01\")], index=[\"foo\"])\n tm.assert_series_equal(res, exp)\n\n # GH12941, only NaTs are in DataFrame.\n df = pd.DataFrame({\"foo\": [pd.NaT, pd.NaT]})\n\n res = df.min()\n exp = pd.Series([pd.NaT], index=[\"foo\"])\n tm.assert_series_equal(res, exp)\n\n res = df.max()\n exp = pd.Series([pd.NaT], index=[\"foo\"])\n tm.assert_series_equal(res, exp)\n\n def test_datetime_assignment_with_NaT_and_diff_time_units(self):\n # GH 7492\n data_ns = np.array([1, \"nat\"], dtype=\"datetime64[ns]\")\n result = pd.Series(data_ns).to_frame()\n result[\"new\"] = data_ns\n expected = pd.DataFrame(\n {0: [1, None], \"new\": [1, None]}, dtype=\"datetime64[ns]\"\n )\n tm.assert_frame_equal(result, expected)\n # OutOfBoundsDatetime error shouldn't occur\n data_s = np.array([1, \"nat\"], dtype=\"datetime64[s]\")\n result[\"new\"] = data_s\n expected = pd.DataFrame(\n {0: [1, None], \"new\": [1e9, None]}, dtype=\"datetime64[ns]\"\n )\n tm.assert_frame_equal(result, expected)\n\n def test_frame_to_period(self):\n K = 5\n\n dr = date_range(\"1/1/2000\", \"1/1/2001\")\n pr = period_range(\"1/1/2000\", \"1/1/2001\")\n df = DataFrame(np.random.randn(len(dr), K), index=dr)\n df[\"mix\"] = \"a\"\n\n pts = df.to_period()\n exp = df.copy()\n exp.index = pr\n assert_frame_equal(pts, exp)\n\n pts = df.to_period(\"M\")\n tm.assert_index_equal(pts.index, exp.index.asfreq(\"M\"))\n\n df = df.T\n pts = df.to_period(axis=1)\n exp = df.copy()\n exp.columns = pr\n assert_frame_equal(pts, exp)\n\n pts = df.to_period(\"M\", axis=1)\n tm.assert_index_equal(pts.columns, exp.columns.asfreq(\"M\"))\n\n msg = \"No axis named 2 for object type <class 'pandas.core.frame.DataFrame'>\"\n with pytest.raises(ValueError, match=msg):\n df.to_period(axis=2)\n\n @pytest.mark.parametrize(\"fn\", [\"tz_localize\", \"tz_convert\"])\n def test_tz_convert_and_localize(self, fn):\n l0 = date_range(\"20140701\", periods=5, freq=\"D\")\n l1 = date_range(\"20140701\", periods=5, freq=\"D\")\n\n int_idx = Index(range(5))\n\n if fn == \"tz_convert\":\n l0 = l0.tz_localize(\"UTC\")\n l1 = l1.tz_localize(\"UTC\")\n\n for idx in [l0, l1]:\n\n l0_expected = getattr(idx, fn)(\"US/Pacific\")\n l1_expected = getattr(idx, fn)(\"US/Pacific\")\n\n df1 = DataFrame(np.ones(5), index=l0)\n df1 = getattr(df1, fn)(\"US/Pacific\")\n assert_index_equal(df1.index, l0_expected)\n\n # MultiIndex\n # GH7846\n df2 = DataFrame(np.ones(5), MultiIndex.from_arrays([l0, l1]))\n\n df3 = getattr(df2, fn)(\"US/Pacific\", level=0)\n assert not df3.index.levels[0].equals(l0)\n assert_index_equal(df3.index.levels[0], l0_expected)\n assert_index_equal(df3.index.levels[1], l1)\n assert not df3.index.levels[1].equals(l1_expected)\n\n df3 = getattr(df2, fn)(\"US/Pacific\", level=1)\n assert_index_equal(df3.index.levels[0], l0)\n assert not df3.index.levels[0].equals(l0_expected)\n assert_index_equal(df3.index.levels[1], l1_expected)\n assert not df3.index.levels[1].equals(l1)\n\n df4 = DataFrame(np.ones(5), MultiIndex.from_arrays([int_idx, l0]))\n\n # TODO: untested\n df5 = getattr(df4, fn)(\"US/Pacific\", level=1) # noqa\n\n assert_index_equal(df3.index.levels[0], l0)\n assert not df3.index.levels[0].equals(l0_expected)\n assert_index_equal(df3.index.levels[1], l1_expected)\n assert not df3.index.levels[1].equals(l1)\n\n # Bad Inputs\n\n # Not DatetimeIndex / PeriodIndex\n with pytest.raises(TypeError, match=\"DatetimeIndex\"):\n df = DataFrame(index=int_idx)\n df = getattr(df, fn)(\"US/Pacific\")\n\n # Not DatetimeIndex / PeriodIndex\n with pytest.raises(TypeError, match=\"DatetimeIndex\"):\n df = DataFrame(np.ones(5), MultiIndex.from_arrays([int_idx, l0]))\n df = getattr(df, fn)(\"US/Pacific\", level=0)\n\n # Invalid level\n with pytest.raises(ValueError, match=\"not valid\"):\n df = DataFrame(index=l0)\n df = getattr(df, fn)(\"US/Pacific\", level=1)\n" ]
[ [ "pandas.util.testing.box_expected", "pandas.Series", "pandas.util.testing.makeObjectSeries", "pandas.util.testing.assert_series_equal", "pandas.Index", "pandas.util.testing.makeStringIndex", "pandas.Timedelta", "pandas.util.testing.assert_index_equal", "numpy.random.randn", "pandas.Timestamp.now", "pandas.util.testing.assert_equal", "numpy.array", "pandas.Timestamp" ], [ "pandas.Series", "pandas.util.testing.assert_produces_warning", "pandas.MultiIndex.from_tuples", "pandas.DataFrame", "pandas.util.testing.assert_frame_equal", "numpy.random.randn", "numpy.random.randint", "numpy.arange", "pandas.util.testing.assert_series_equal", "pandas.Index", "pandas.concat", "numpy.min", "pandas.Categorical", "pandas.MultiIndex.from_product", "pandas.date_range", "numpy.array", "pandas.CategoricalIndex", "pandas.util.testing.makeTimeSeries", "pandas.period_range", "pandas.Timestamp", "pandas.io.formats.printing.pprint_thing" ], [ "pandas.Series", "numpy.asarray", "pandas.DataFrame", "numpy.dtype", "pandas.util.testing.assert_frame_equal", "pandas.util.testing.assert_index_equal", "numpy.random.randn", "pandas.tseries.offsets.BDay", "pandas.util.testing.makeTimeDataFrame", "numpy.arange", "pandas.util.testing.assert_series_equal", "pandas.util.testing.assert_almost_equal", "pandas.Timedelta", "numpy.random.rand", "pandas.date_range", "numpy.array", "pandas.TimedeltaIndex", "pandas.period_range", "pandas.MultiIndex.from_arrays", "numpy.ones", "pandas.tseries.offsets.BMonthEnd", "pandas.util.testing.makePeriodFrame", "pandas.Timestamp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AndrewWangJZ/pyfem
[ "8e7df6aa69c1c761bb8ec67302847e30a83190b4" ]
[ "1D_example.py" ]
[ "import numpy as np\nimport scipy.integrate.quadrature as integrator\n\n\"\"\"\n An 1-dimensional linear problem is used to describe the FEM process\n \n reference:\n [1] https://www.youtube.com/watch?v=rdaZuKFK-4k\n \n\"\"\"\n\n\nclass OneDimensionalProblem:\n def __init__(self):\n self.NodeNum = 5\n self.elementNum = 4\n self.nodeCoordinate = np.linspace(0, 1, 5)\n self.element = [[i, i+1] for i in range(self.NodeNum-1)]\n self.gaussionNorm = np.array([-1/np.sqrt(3), 1/np.sqrt(3)])\n self.gaussionGlobal = [self.mapGaussian2Local(self.nodeCoordinate[i[0]], self.nodeCoordinate[i[1]]) for i in self.element]\n print()\n\n def shapeFunction(self, x, x1, x2):\n w1 = (x2-x)/(x2-x1)\n w2 = (x-x1)/(x2-x1)\n return np.array([w1, w2])\n\n def shapeFunctionDx(self, x1, x2):\n dx1 = -1/(x2-x1)\n dx2 = 1/(x2-x1)\n return np.array([dx1, dx2])\n\n def mapGaussian2Local(self, x1, x2):\n gaussionLocal = np.zeros_like(self.gaussionNorm)\n for i in range(len(self.gaussionNorm)):\n gaussionLocal[i] = (self.gaussionNorm[i]+1)/2*(x2-x1)+x1\n return gaussionLocal\n\n\nif __name__ == '__main__':\n oneDimProblem = OneDimensionalProblem()" ]
[ [ "numpy.array", "numpy.zeros_like", "numpy.sqrt", "numpy.linspace" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
cookish/deeptile
[ "159275b4d63286dd6ad08010a4e457ba9af9ae2d", "159275b4d63286dd6ad08010a4e457ba9af9ae2d" ]
[ "python/check_docker.py", "python/draw_manual_data.py" ]
[ "import requests\nimport numpy as np\nimport json\n\nimport include.board_utilities as board_utilities\n\nhost = 'localhost'\nport = '8503'\n\ntest_boards = np.empty(shape=1000, dtype=\"uint64\")\nfor i in range(1000):\n test_boards[i] = 0xfedcba9876543210 + i\n\ntest_board_arr = board_utilities.int_to_arr(test_boards)\nboard_utilities.print_board_hex(test_board_arr[0])\ntest_board_shaped = board_utilities.arr_to_board(test_board_arr)\n\nprint()\nprint(\"===============================\")\nprint(\"Converting board to list\")\nx = test_board_shaped.tolist()\n\n# print(\"x:\")\n# print(x)\n\nprint(\"Converting list to json string\")\ndata = json.dumps({\"instances\": x})\n\n# headers = {\"content-type\": \"application/json\"}\nprint(\"Posting string to Docker\")\njson_response = requests.post(\"http://%s:%s/v1/models/serving_default:predict\" % (host, port), data=data)\n\nprint(\"Received response from Docker\")\n\nresponse = json.loads(json_response.text)\nif 'predictions' in response:\n print(\"================= Predictions: =======================\")\n predictions = json.loads(json_response.text)['predictions']\n print('Found', len(predictions), 'predictions')\n for i in range(500, 510):\n print(predictions[i])\nelse:\n print(json_response)\n", "import math\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport matplotlib.ticker as ticker\n# matplotlib.rcParams.update({'errorbar.capsize': 3})\ny_log = False\ny_err = [0, 0, 0, 0]\n\n# ============================== Settings =============================== #\nx = [1, 2, 3, 4]\n\n# title = \"Evaluations / move\"\n# y = [3774.854000000003 / 148.75400000000005,\n# 137842.8080000001 / 198.18399999999983,\n# 5099854.276666666 / 211.04666666666662,\n# 299216277.5450001 / 226.69500000000014,\n# ]\n# x_title = \"Generation\"\n# y_title = \"Evals / move\"\n# y_log = True\n\n# title = \"Fraction of total evals that came from cache\"\n# y = [1 - 0.996676, 1 - 0.504268, 1 - 0.168454, 1 - 0.0290961]\n# x_title = \"Generation\"\n# y_title = \"Fraction cache evals\"\n\n# title = \"Fraction passing critical point (8192..512 -> 16384)\"\n# y = [0.083, 0.184, 0.273333, 0.38]\n# num_y = [1000, 500, 300, 200]\n# x_title = \"Generation\"\n# y_title = \"Fraction passing\"\n# y_err = []\n# for i in range(len(y)):\n# y_err.append(math.sqrt(y[i] / num_y[i]))\n\ntitle = \"Moves per sec\"\ny = [812.647, 82.1121, 7.40047, 0.657738]\nx_title = \"Generation\"\ny_title = \"Moves / sec\"\ny_log = True\n# ======================================================================= #\n\n\n# plt.figure()\nfig, ax = plt.subplots(1,1)\n\nplt.errorbar(x, y, yerr=y_err, fmt='--o', capsize=3, linewidth=1)\n\n\ntick_spacing = 1\nax.xaxis.set_major_locator(ticker.MultipleLocator(tick_spacing))\nplt.title(title)\nplt.xlabel(x_title)\nplt.ylabel(y_title)\nif y_log:\n plt.yscale('log')\nplt.show()\n\n\n" ]
[ [ "numpy.empty" ], [ "matplotlib.ticker.MultipleLocator", "matplotlib.pyplot.title", "matplotlib.pyplot.yscale", "matplotlib.pyplot.subplots", "matplotlib.pyplot.errorbar", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
shanaxel42/napari
[ "d182b3694deb185afcf8b6ae2e87cccb78d7f82b", "d182b3694deb185afcf8b6ae2e87cccb78d7f82b" ]
[ "napari/layers/image/image.py", "napari/layers/_tests/test_transforms.py" ]
[ "import types\nimport warnings\nfrom base64 import b64encode\nfrom xml.etree.ElementTree import Element\n\nimport numpy as np\nfrom imageio import imwrite\nfrom scipy import ndimage as ndi\n\nfrom ...utils.colormaps import AVAILABLE_COLORMAPS\nfrom ...utils.event import Event\nfrom ...utils.status_messages import format_float\nfrom ..base import Layer\nfrom ..layer_utils import calc_data_range\nfrom ..intensity_mixin import IntensityVisualizationMixin\nfrom ._constants import Interpolation, Rendering\nfrom .image_utils import get_pyramid_and_rgb\n\n\n# Mixin must come before Layer\nclass Image(IntensityVisualizationMixin, Layer):\n \"\"\"Image layer.\n\n Parameters\n ----------\n data : array or list of array\n Image data. Can be N dimensional. If the last dimension has length\n 3 or 4 can be interpreted as RGB or RGBA if rgb is `True`. If a\n list and arrays are decreasing in shape then the data is treated as\n an image pyramid.\n rgb : bool\n Whether the image is rgb RGB or RGBA. If not specified by user and\n the last dimension of the data has length 3 or 4 it will be set as\n `True`. If `False` the image is interpreted as a luminance image.\n is_pyramid : bool\n Whether the data is an image pyramid or not. Pyramid data is\n represented by a list of array like image data. If not specified by\n the user and if the data is a list of arrays that decrease in shape\n then it will be taken to be a pyramid. The first image in the list\n should be the largest.\n colormap : str, vispy.Color.Colormap, tuple, dict\n Colormap to use for luminance images. If a string must be the name\n of a supported colormap from vispy or matplotlib. If a tuple the\n first value must be a string to assign as a name to a colormap and\n the second item must be a Colormap. If a dict the key must be a\n string to assign as a name to a colormap and the value must be a\n Colormap.\n contrast_limits : list (2,)\n Color limits to be used for determining the colormap bounds for\n luminance images. If not passed is calculated as the min and max of\n the image.\n gamma : float\n Gamma correction for determining colormap linearity. Defaults to 1.\n interpolation : str\n Interpolation mode used by vispy. Must be one of our supported\n modes.\n rendering : str\n Rendering mode used by vispy. Must be one of our supported\n modes.\n iso_threshold : float\n Threshold for isosurface.\n attenuation : float\n Attenuation rate for attenuated maximum intensity projection.\n name : str\n Name of the layer.\n metadata : dict\n Layer metadata.\n scale : tuple of float\n Scale factors for the layer.\n translate : tuple of float\n Translation values for the layer.\n opacity : float\n Opacity of the layer visual, between 0.0 and 1.0.\n blending : str\n One of a list of preset blending modes that determines how RGB and\n alpha values of the layer visual get mixed. Allowed values are\n {'opaque', 'translucent', and 'additive'}.\n visible : bool\n Whether the layer visual is currently being displayed.\n\n\n Attributes\n ----------\n data : array\n Image data. Can be N dimensional. If the last dimension has length\n 3 or 4 can be interpreted as RGB or RGBA if rgb is `True`. If a list\n and arrays are decreaing in shape then the data is treated as an\n image pyramid.\n metadata : dict\n Image metadata.\n rgb : bool\n Whether the image is rgb RGB or RGBA if rgb. If not\n specified by user and the last dimension of the data has length 3 or 4\n it will be set as `True`. If `False` the image is interpreted as a\n luminance image.\n is_pyramid : bool\n Whether the data is an image pyramid or not. Pyramid data is\n represented by a list of array like image data. The first image in the\n list should be the largest.\n colormap : 2-tuple of str, vispy.color.Colormap\n The first is the name of the current colormap, and the second value is\n the colormap. Colormaps are used for luminance images, if the image is\n rgb the colormap is ignored.\n colormaps : tuple of str\n Names of the available colormaps.\n contrast_limits : list (2,) of float\n Color limits to be used for determining the colormap bounds for\n luminance images. If the image is rgb the contrast_limits is ignored.\n contrast_limits_range : list (2,) of float\n Range for the color limits for luminace images. If the image is\n rgb the contrast_limits_range is ignored.\n gamma : float\n Gamma correction for determining colormap linearity.\n interpolation : str\n Interpolation mode used by vispy. Must be one of our supported\n modes.\n rendering : str\n Rendering mode used by vispy. Must be one of our supported\n modes.\n iso_threshold : float\n Threshold for isosurface.\n attenuation : float\n Attenuation rate for attenuated maximum intensity projection.\n\n Extended Summary\n ----------\n _data_view : array (N, M), (N, M, 3), or (N, M, 4)\n Image data for the currently viewed slice. Must be 2D image data, but\n can be multidimensional for RGB or RGBA images if multidimensional is\n `True`.\n _colorbar : array\n Colorbar for current colormap.\n \"\"\"\n\n _colormaps = AVAILABLE_COLORMAPS\n _max_tile_shape = 1600\n\n def __init__(\n self,\n data,\n *,\n rgb=None,\n is_pyramid=None,\n colormap='gray',\n contrast_limits=None,\n gamma=1,\n interpolation='nearest',\n rendering='mip',\n iso_threshold=0.5,\n attenuation=0.5,\n name=None,\n metadata=None,\n scale=None,\n translate=None,\n opacity=1,\n blending='translucent',\n visible=True,\n ):\n if isinstance(data, types.GeneratorType):\n data = list(data)\n\n ndim, rgb, is_pyramid, data_pyramid = get_pyramid_and_rgb(\n data, pyramid=is_pyramid, rgb=rgb\n )\n\n super().__init__(\n data,\n ndim,\n name=name,\n metadata=metadata,\n scale=scale,\n translate=translate,\n opacity=opacity,\n blending=blending,\n visible=visible,\n )\n\n self.events.add(\n interpolation=Event,\n rendering=Event,\n iso_threshold=Event,\n attenuation=Event,\n )\n\n # Set data\n self.is_pyramid = is_pyramid\n self.rgb = rgb\n self._data = data\n self._data_pyramid = data_pyramid\n self._top_left = np.zeros(ndim, dtype=int)\n if self.is_pyramid:\n self._data_level = len(data_pyramid) - 1\n else:\n self._data_level = 0\n\n # Intitialize image views and thumbnails with zeros\n if self.rgb:\n self._data_view = np.zeros(\n (1,) * self.dims.ndisplay + (self.shape[-1],)\n )\n else:\n self._data_view = np.zeros((1,) * self.dims.ndisplay)\n self._data_raw = self._data_view\n self._data_thumbnail = self._data_view\n\n # Set contrast_limits and colormaps\n self._gamma = gamma\n self._iso_threshold = iso_threshold\n self._attenuation = attenuation\n if contrast_limits is None:\n self.contrast_limits_range = self._calc_data_range()\n else:\n self.contrast_limits_range = contrast_limits\n self._contrast_limits = tuple(self.contrast_limits_range)\n self.colormap = colormap\n self.contrast_limits = self._contrast_limits\n self.interpolation = interpolation\n self.rendering = rendering\n\n # Trigger generation of view slice and thumbnail\n self._update_dims()\n\n def _calc_data_range(self):\n if self.is_pyramid:\n input_data = self._data_pyramid[-1]\n else:\n input_data = self.data\n return calc_data_range(input_data)\n\n @property\n def dtype(self):\n return self.data[0].dtype if self.is_pyramid else self.data.dtype\n\n @property\n def data(self):\n \"\"\"array: Image data.\"\"\"\n return self._data\n\n @data.setter\n def data(self, data):\n ndim, rgb, is_pyramid, data_pyramid = get_pyramid_and_rgb(\n data, pyramid=self.is_pyramid, rgb=self.rgb\n )\n self.is_pyramid = is_pyramid\n self.rgb = rgb\n self._data = data\n self._data_pyramid = data_pyramid\n\n self._update_dims()\n self.events.data()\n\n def _get_ndim(self):\n \"\"\"Determine number of dimensions of the layer.\"\"\"\n return len(self.level_shapes[0])\n\n def _get_extent(self):\n return tuple((0, m) for m in self.level_shapes[0])\n\n @property\n def data_level(self):\n \"\"\"int: Current level of pyramid, or 0 if image.\"\"\"\n return self._data_level\n\n @data_level.setter\n def data_level(self, level):\n if self._data_level == level:\n return\n self._data_level = level\n self.refresh()\n\n @property\n def level_shapes(self):\n \"\"\"array: Shapes of each level of the pyramid or just of image.\"\"\"\n if self.is_pyramid:\n if self.rgb:\n shapes = [im.shape[:-1] for im in self._data_pyramid]\n else:\n shapes = [im.shape for im in self._data_pyramid]\n else:\n if self.rgb:\n shapes = [self.data.shape[:-1]]\n else:\n shapes = [self.data.shape]\n return np.array(shapes)\n\n @property\n def level_downsamples(self):\n \"\"\"list: Downsample factors for each level of the pyramid.\"\"\"\n return np.divide(self.level_shapes[0], self.level_shapes)\n\n @property\n def top_left(self):\n \"\"\"tuple: Location of top left canvas pixel in image.\"\"\"\n return self._top_left\n\n @top_left.setter\n def top_left(self, top_left):\n if np.all(self._top_left == top_left):\n return\n self._top_left = top_left.astype(int)\n self.refresh()\n\n @property\n def iso_threshold(self):\n \"\"\"float: threshold for isosurface.\"\"\"\n return self._iso_threshold\n\n @iso_threshold.setter\n def iso_threshold(self, value):\n self.status = format_float(value)\n self._iso_threshold = value\n self._update_thumbnail()\n self.events.iso_threshold()\n\n @property\n def attenuation(self):\n \"\"\"float: attenuation rate for attenuated_mip rendering.\"\"\"\n return self._attenuation\n\n @attenuation.setter\n def attenuation(self, value):\n self.status = format_float(value)\n self._attenuation = value\n self._update_thumbnail()\n self.events.attenuation()\n\n @property\n def interpolation(self):\n \"\"\"{\n 'bessel', 'bicubic', 'bilinear', 'blackman', 'catrom', 'gaussian',\n 'hamming', 'hanning', 'hermite', 'kaiser', 'lanczos', 'mitchell',\n 'nearest', 'spline16', 'spline36'\n }: Equipped interpolation method's name.\n \"\"\"\n return str(self._interpolation)\n\n @interpolation.setter\n def interpolation(self, interpolation):\n self._interpolation = Interpolation(interpolation)\n self.events.interpolation()\n\n @property\n def rendering(self):\n \"\"\"Rendering: Rendering mode.\n Selects a preset rendering mode in vispy that determines how\n volume is displayed\n * translucent: voxel colors are blended along the view ray until\n the result is opaque.\n * mip: maxiumum intensity projection. Cast a ray and display the\n maximum value that was encountered.\n * additive: voxel colors are added along the view ray until\n the result is saturated.\n * iso: isosurface. Cast a ray until a certain threshold is\n encountered. At that location, lighning calculations are\n performed to give the visual appearance of a surface.\n * attenuated_mip: attenuated maxiumum intensity projection. Cast a\n ray and attenuate values based on integral of encountered values,\n display the maximum value that was encountered after attenuation.\n This will make nearer objects appear more prominent.\n \"\"\"\n return str(self._rendering)\n\n @rendering.setter\n def rendering(self, rendering):\n self._rendering = Rendering(rendering)\n self.events.rendering()\n\n def _get_state(self):\n \"\"\"Get dictionary of layer state.\n\n Returns\n -------\n state : dict\n Dictionary of layer state.\n \"\"\"\n state = self._get_base_state()\n state.update(\n {\n 'rgb': self.rgb,\n 'is_pyramid': self.is_pyramid,\n 'colormap': self.colormap[0],\n 'contrast_limits': self.contrast_limits,\n 'interpolation': self.interpolation,\n 'rendering': self.rendering,\n 'iso_threshold': self.iso_threshold,\n 'attenuation': self.attenuation,\n 'gamma': self.gamma,\n 'data': self.data,\n }\n )\n return state\n\n def _raw_to_displayed(self, raw):\n \"\"\"Determine displayed image from raw image.\n\n For normal image layers, just return the actual image.\n\n Parameters\n -------\n raw : array\n Raw array.\n\n Returns\n -------\n image : array\n Displayed array.\n \"\"\"\n image = raw\n return image\n\n def _set_view_slice(self):\n \"\"\"Set the view given the indices to slice with.\"\"\"\n not_disp = self.dims.not_displayed\n\n if self.rgb:\n # if rgb need to keep the final axis fixed during the\n # transpose. The index of the final axis depends on how many\n # axes are displayed.\n order = self.dims.displayed_order + (\n max(self.dims.displayed_order) + 1,\n )\n else:\n order = self.dims.displayed_order\n\n if self.is_pyramid:\n # If 3d redering just show lowest level of pyramid\n if self.dims.ndisplay == 3:\n self.data_level = len(self._data_pyramid) - 1\n\n # Slice currently viewed level\n level = self.data_level\n indices = np.array(self.dims.indices)\n downsampled_indices = (\n indices[not_disp] / self.level_downsamples[level, not_disp]\n )\n downsampled_indices = np.round(\n downsampled_indices.astype(float)\n ).astype(int)\n downsampled_indices = np.clip(\n downsampled_indices, 0, self.level_shapes[level, not_disp] - 1\n )\n indices[not_disp] = downsampled_indices\n\n disp_shape = self.level_shapes[level, self.dims.displayed]\n scale = np.ones(self.ndim)\n for d in self.dims.displayed:\n scale[d] = self.level_downsamples[self.data_level][d]\n self._transform_view.scale = scale\n\n if np.any(disp_shape > self._max_tile_shape):\n for d in self.dims.displayed:\n indices[d] = slice(\n self._top_left[d],\n self._top_left[d] + self._max_tile_shape,\n 1,\n )\n self._transform_view.translate = (\n self._top_left * self.scale * self._transform_view.scale\n )\n else:\n self._transform_view.translate = [0] * self.ndim\n\n image = np.asarray(\n self._data_pyramid[level][tuple(indices)]\n ).transpose(order)\n\n if level == len(self._data_pyramid) - 1:\n thumbnail = image\n else:\n # Slice thumbnail\n indices = np.array(self.dims.indices)\n downsampled_indices = (\n indices[not_disp] / self.level_downsamples[-1, not_disp]\n )\n downsampled_indices = np.round(\n downsampled_indices.astype(float)\n ).astype(int)\n downsampled_indices = np.clip(\n downsampled_indices, 0, self.level_shapes[-1, not_disp] - 1\n )\n indices[not_disp] = downsampled_indices\n thumbnail = np.asarray(\n self._data_pyramid[-1][tuple(indices)]\n ).transpose(order)\n else:\n self._transform_view.scale = np.ones(self.dims.ndim)\n image = np.asarray(self.data[self.dims.indices]).transpose(order)\n thumbnail = image\n\n if self.rgb and image.dtype.kind == 'f':\n self._data_raw = np.clip(image, 0, 1)\n self._data_view = self._raw_to_displayed(self._data_raw)\n self._data_thumbnail = self._raw_to_displayed(\n np.clip(thumbnail, 0, 1)\n )\n\n else:\n self._data_raw = image\n self._data_view = self._raw_to_displayed(self._data_raw)\n self._data_thumbnail = self._raw_to_displayed(thumbnail)\n\n if self.is_pyramid:\n self.events.scale()\n self.events.translate()\n\n def _update_thumbnail(self):\n \"\"\"Update thumbnail with current image data and colormap.\"\"\"\n if self.dims.ndisplay == 3 and self.dims.ndim > 2:\n image = np.max(self._data_thumbnail, axis=0)\n else:\n image = self._data_thumbnail\n\n # float16 not supported by ndi.zoom\n dtype = np.dtype(image.dtype)\n if dtype in [np.dtype(np.float16)]:\n image = image.astype(np.float32)\n\n raw_zoom_factor = np.divide(\n self._thumbnail_shape[:2], image.shape[:2]\n ).min()\n new_shape = np.clip(\n raw_zoom_factor * np.array(image.shape[:2]),\n 1, # smallest side should be 1 pixel wide\n self._thumbnail_shape[:2],\n )\n zoom_factor = tuple(new_shape / image.shape[:2])\n if self.rgb:\n # warning filter can be removed with scipy 1.4\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n downsampled = ndi.zoom(\n image, zoom_factor + (1,), prefilter=False, order=0\n )\n if image.shape[2] == 4: # image is RGBA\n colormapped = np.copy(downsampled)\n colormapped[..., 3] = downsampled[..., 3] * self.opacity\n if downsampled.dtype == np.uint8:\n colormapped = colormapped.astype(np.uint8)\n else: # image is RGB\n if downsampled.dtype == np.uint8:\n alpha = np.full(\n downsampled.shape[:2] + (1,),\n int(255 * self.opacity),\n dtype=np.uint8,\n )\n else:\n alpha = np.full(downsampled.shape[:2] + (1,), self.opacity)\n colormapped = np.concatenate([downsampled, alpha], axis=2)\n else:\n # warning filter can be removed with scipy 1.4\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n downsampled = ndi.zoom(\n image, zoom_factor, prefilter=False, order=0\n )\n low, high = self.contrast_limits\n downsampled = np.clip(downsampled, low, high)\n color_range = high - low\n if color_range != 0:\n downsampled = (downsampled - low) / color_range\n downsampled = downsampled ** self.gamma\n color_array = self.colormap[1][downsampled.ravel()]\n colormapped = color_array.rgba.reshape(downsampled.shape + (4,))\n colormapped[..., 3] *= self.opacity\n self.thumbnail = colormapped\n\n def _get_value(self):\n \"\"\"Returns coordinates, values, and a string for a given mouse position\n and set of indices.\n\n Returns\n ----------\n value : tuple\n Value of the data at the coord.\n \"\"\"\n coord = np.round(self.coordinates).astype(int)\n if self.rgb:\n shape = self._data_raw.shape[:-1]\n else:\n shape = self._data_raw.shape\n\n if all(0 <= c < s for c, s in zip(coord[self.dims.displayed], shape)):\n value = self._data_raw[tuple(coord[self.dims.displayed])]\n else:\n value = None\n\n if self.is_pyramid:\n value = (self.data_level, value)\n\n return value\n\n def to_xml_list(self):\n \"\"\"Generates a list with a single xml element that defines the\n currently viewed image as a png according to the svg specification.\n\n Returns\n ----------\n xml : list of xml.etree.ElementTree.Element\n List of a single xml element specifying the currently viewed image\n as a png according to the svg specification.\n \"\"\"\n if self.dims.ndisplay == 3:\n image = np.max(self._data_thumbnail, axis=0)\n else:\n image = self._data_thumbnail\n image = np.clip(\n image, self.contrast_limits[0], self.contrast_limits[1]\n )\n image = image - self.contrast_limits[0]\n color_range = self.contrast_limits[1] - self.contrast_limits[0]\n if color_range != 0:\n image = image / color_range\n mapped_image = self.colormap[1][image.ravel()]\n mapped_image = mapped_image.RGBA.reshape(image.shape + (4,))\n image_str = imwrite('<bytes>', mapped_image, format='png')\n image_str = \"data:image/png;base64,\" + str(b64encode(image_str))[2:-1]\n props = {'xlink:href': image_str}\n width = str(self.shape[self.dims.displayed[1]])\n height = str(self.shape[self.dims.displayed[0]])\n opacity = str(self.opacity)\n xml = Element(\n 'image', width=width, height=height, opacity=opacity, **props\n )\n return [xml]\n", "import numpy.testing as npt\nfrom napari.layers.transforms import ScaleTranslate\n\n\ndef test_scale_translate():\n coord = [10, 13]\n transform = ScaleTranslate(scale=[2, 3], translate=[8, -5])\n new_coord = transform(coord)\n target_coord = [2 * 10 + 8, 3 * 13 - 5]\n npt.assert_allclose(new_coord, target_coord)\n\n\ndef test_scale_translate_inverse():\n coord = [10, 13]\n transform = ScaleTranslate(scale=[2, 3], translate=[8, -5])\n new_coord = transform(coord)\n target_coord = [2 * 10 + 8, 3 * 13 - 5]\n npt.assert_allclose(new_coord, target_coord)\n\n inverted_new_coord = transform.inverse(new_coord)\n npt.assert_allclose(inverted_new_coord, coord)\n\n\ndef test_scale_translate_compose():\n coord = [10, 13]\n transform_a = ScaleTranslate(scale=[2, 3], translate=[8, -5])\n transform_b = ScaleTranslate(scale=[0.3, 1.4], translate=[-2.2, 3])\n transform_c = transform_b.compose(transform_a)\n\n new_coord_1 = transform_c(coord)\n new_coord_2 = transform_b(transform_a(coord))\n npt.assert_allclose(new_coord_1, new_coord_2)\n\n\ndef test_scale_translate_slice():\n transform_a = ScaleTranslate(scale=[2, 3], translate=[8, -5])\n transform_b = ScaleTranslate(scale=[2, 1, 3], translate=[8, 3, -5])\n npt.assert_allclose(transform_b.set_slice([0, 2]).scale, transform_a.scale)\n npt.assert_allclose(\n transform_b.set_slice([0, 2]).translate, transform_a.translate\n )\n\n\ndef test_scale_translate_pad():\n transform_a = ScaleTranslate(scale=[2, 3], translate=[8, -5])\n transform_b = ScaleTranslate(scale=[2, 1, 3], translate=[8, 0, -5])\n npt.assert_allclose(transform_a.set_pad([1]).scale, transform_b.scale)\n npt.assert_allclose(\n transform_a.set_pad([1]).translate, transform_b.translate\n )\n\n\ndef test_scale_translate_identity_default():\n coord = [10, 13]\n transform = ScaleTranslate()\n new_coord = transform(coord)\n npt.assert_allclose(new_coord, coord)\n" ]
[ [ "numpy.clip", "numpy.asarray", "scipy.ndimage.zoom", "numpy.dtype", "numpy.ones", "numpy.all", "numpy.max", "numpy.copy", "numpy.concatenate", "numpy.round", "numpy.any", "numpy.full", "numpy.array", "numpy.zeros", "numpy.divide" ], [ "numpy.testing.assert_allclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Strabes/h2o-prod
[ "2bfd4c87302c2ca3219b0bc313f13c9e787d84ad" ]
[ "_build/jupyter_execute/Score_LendingClub.py" ]
[ "#!/usr/bin/env python\n# coding: utf-8\n\n# # MOJO Scoring: Two Approaches\n# \n# Now we will use the model we built on the Lending Club data to score the test cases we pickled. To mimick the scoring performance we would experience if the model were implemented in a real-time environment, we will score the records one at a time. We will use the MOJO we downloaded from H2O to score these records in two different ways:\n# \n# 1. Use the `mojo_predict_pandas` method from the `h2o.utils.shared_utils` to score one record at a time\n# \n# 2. Use the java application we just built to score one record at a time. To do so, we will first initialize a java virtual machine using python's `subprocess` package. This JVM will instantiate an instance of our scoring class, which loads the model just once at initialization. As we will see, loading the model once is far more efficient than repeatedly calling `mojo_predict_pandas`, which reloads the model for each call. We will then establish a gateway to our JVM using `JavaGateway` from `py4j` and score our test cases one at a time.\n# \n# Timing of these two approaches will show that the second approach is far faster than the first approach. On my machine, the first approach takes more than 300 *milliseconds* per record whereas the second approach takes less than 100 *microseconds* per record. For many real-time production applications, the difference between the second approach and the first approach is the difference between easily hitting an SLA and almost always failing to hit the SLA.\n\n# ### Imports\n\n# In[1]:\n\n\nimport os, sys, json, pickle\nimport pandas as pd\nimport subprocess\nfrom ast import literal_eval\nfrom py4j.java_gateway import JavaGateway\nfrom h2o.utils import shared_utils as su\n\n\n# ### Read in our pickled test cases and feature engineering pipeline\n\n# In[2]:\n\n\ntest_data = pd.read_pickle('test_cases.pkl')\n\n\n# In[3]:\n\n\nwith open('pipeline.pkl','rb') as f:\n p = pickle.load(f)\n\n\n# In[4]:\n\n\ntest_data.head()\n\n\n# ### Apply feature engineering\n# \n# In real-time production scoring, these transformations would constribute to the end-to-end runtime of the application and therefore influence whether scoring achieves its SLA. Here we are primarily interested in the time it takes to score with the MOJO itself under the two approaches outlined above. Therefore, we do not include this in the timing. \n\n# In[5]:\n\n\ntest_data_prepped = (\n p.transform(test_data)\n .reset_index(drop=True)\n .drop(labels = 'loan_status',axis=1))\n\n\n# In[6]:\n\n\ntest_data_prepped.head()\n\n\n# In[7]:\n\n\npredictors = test_data_prepped.columns.to_list()\n\n\n# ### Scoring Approach 1: `h2o`'s `mojo_predict_pandas` method\n\n# In[8]:\n\n\nmojo_zip_path = 'lendingclub-app/src/main/resources/final_gbm.zip'\ngenmodel_jar_path = 'h2o-genmodel.jar'\n\nrecords = [test_data_prepped.iloc[[i]] for i in range(test_data_prepped.shape[0])]\n\n\n# In[9]:\n\n\nget_ipython().run_cell_magic('timeit', '', '\\nresults = []\\n\\nfor record in records:\\n pred = su.mojo_predict_pandas(\\n record,\\n mojo_zip_path,\\n genmodel_jar_path)\\n results.append(pred)')\n\n\n# In[10]:\n\n\nresults = []\n\nfor record in records:\n pred = su.mojo_predict_pandas(\n record,\n mojo_zip_path,\n genmodel_jar_path)\n results.append(pred)\n\n\n# In[11]:\n\n\n# Predictions:\npd.concat(results)\n\n\n# ### Scoring Approach 2: Our Java Application\n\n# In[12]:\n\n\n## Start JVM using subprocess\n\ncmd = \"java -cp \" + \"lendingclub-app/target/\" + \"lendingclub-app-1.0-SNAPSHOT-jar-with-dependencies.jar \" + \"com.lendingclub.app.MojoScoringEntryPoint\"\njvm = subprocess.Popen(cmd)\n\n\n# In[13]:\n\n\n## Establish gateway with the JVM\n\ngateway = JavaGateway()\nmojoscorer = gateway.entry_point.getScorer()\n\n\n# In[14]:\n\n\n## Construct cases as list of JSON objects\n\ncases = test_data_prepped[predictors].to_dict(orient='records')\ncases = [json.dumps(case) for case in cases]\n\n\n# In[15]:\n\n\nget_ipython().run_cell_magic('timeit', '', 'results = []\\n\\nfor case in cases:\\n results.append(literal_eval(mojoscorer.predict(case)))')\n\n\n# In[16]:\n\n\nresults = []\n\nfor case in cases:\n results.append(literal_eval(mojoscorer.predict(case)))\n\npd.DataFrame(results)\n\n\n# In[17]:\n\n\n## Kill JVM\n\njvm.kill()\n\n" ]
[ [ "pandas.concat", "pandas.read_pickle", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
radiasoft/rs_synergia
[ "b43509de7f4a938354dc127762d8e723463e0e95", "b43509de7f4a938354dc127762d8e723463e0e95" ]
[ "rssynergia/base_diagnostics/write_bunch.py", "rssynergia/base_diagnostics/step_diagnostic.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"?\n\n:copyright: Copyright (c) 2020 RadiaSoft LLC. All Rights Reserved.\n:license: http://www.apache.org/licenses/LICENSE-2.0.html\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\nimport synergia\nimport numpy as np\nimport h5py\n\ndef write_bunch(bunch, file_name = 'particles.h5' , reference_particle = None, txt = False):\n '''\n Write a Synergia bunch to file, mimicing the style of the bunch diagnostics. Defaults to .h5 output.\n\n Assumes that the main processor has access to all particles.\n\n Unlike the standard bunch_diagnostics, this method outputs the beam attributes as h5 attributes rather than as datasets.\n\n Arguments:\n - bunch (synergia.bunch.bunch.Bunch): A Synergia bunch object to be written to file\n - fn (Optional[String]): File name for saving the bunch - defaults to 'particles.h5'\n - txt (Optional[Bool]): Whether to write to a .txt file - defaults to False\n\n '''\n\n particles = bunch.get_local_particles()\n num_particles = particles.shape[0]\n\n #Define attributes for the bunch - this requires a reference particle or other context\n #check to see if reference particle is passed\n if reference_particle is not None:\n charge = reference_particle.get_charge()\n mass = reference_particle.get_total_energy()/reference_particle.get_gamma()\n pz = reference_particle.get_momentum()\n\n else:\n #pass defaults\n charge = 1\n mass = 0.9382723128\n pz = 10.\n\n #specify these as 0 because particle distribution is uncoupled from simulation\n rep = 0\n s_n = 0\n tlen = 0\n\n\n if txt:\n #txt file write is straightforward numpy save\n np.savetxt(file_name,particles)\n\n else:\n #write to HDF-5 by default\n\n #Create h5 file\n dump_file = h5py.File(file_name, 'w')\n\n #specify attributes\n dump_file.attrs['charge'] = charge #Particle charge in units of proton charge\n dump_file.attrs['mass'] = mass #Particle equivalent mass in GeV\n dump_file.attrs['pz'] = pz #Reference particle momentum in GeV/c\n\n dump_file.attrs['s_n'] = s_n #Current s value (between 0 and lattice length), defaults to 0\n dump_file.attrs['tlen'] = tlen #Total length traversed during simulation, defaults to 0\n dump_file.attrs['rep'] = rep #Current repetition, defaults to 0\n\n #write particle phase space array as a single dataset\n ptcl_data = dump_file.create_dataset('particles', (num_particles,7))\n ptcl_data[:] = particles\n\n dump_file.close()\n", "# -*- coding: utf-8 -*-\n\"\"\"?\n\n:copyright: Copyright (c) 2020 RadiaSoft LLC. All Rights Reserved.\n:license: http://www.apache.org/licenses/LICENSE-2.0.html\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\nimport synergia\nimport os\nimport numpy as np\nimport h5py as h5\nfrom mpi4py import MPI\n\ncomm_world = MPI.COMM_WORLD\n\ntry:\n import __builtin__\nexcept ImportError:\n # Python 3\n import builtins as __builtin__\n\nmy_rank = comm_world.rank\n\ndef print(*args, **kwargs):\n \"\"\"Overload print to prevent all ranks from printing\"\"\"\n if my_rank == 0:\n return __builtin__.print(*args, **kwargs)\n\nclass CustomDiagnostic(synergia.simulation.Propagate_actions):\n def __init__(self, stepper, element_names=[], step_numbers=[], positions=[]):\n \"\"\"\n Create a step_end_action that will called by Synergia to output bunch data at arbitrary points in the lattice.\n These points can be chosen by element name, positions in the lattice, or direct input of step number.\n\n Each chosen diagnostic point should be accompanied by a callable function that will operate on the bunch object\n and return diagnostic data.\n\n Data is accumulated in Datum objects with information on where and when it was collected.\n\n :param stepper (synergia stepper object): Stepper being used in propagator.\n :param element_names (list of (str, func)): Optional list of tuples comprising element names and corresponding\n diagnostic function to call\n :param step_numbers (list of (int, func)): Optional list of tuples comprising step number and corresponding\n diagnostic function to call\n :param positions (list of (float, func)): Optional list of tuples comprising position (in meters) and\n corresponding diagnostic function to call\n \"\"\"\n synergia.simulation.Propagate_actions.__init__(self)\n\n self.stepper = stepper\n self.steps = []\n self.diagnostics = []\n self.data = []\n\n for elem in element_names:\n elem_steps = self.find_element_steps(self.stepper, elem[0])\n if len(elem_steps) > 0:\n self.steps.append(elem_steps[len(elem_steps) // 2]) # output in middle of element\n self.diagnostics.append(elem[1])\n elif len(elem_steps) == 1:\n self.steps.append(elem_steps[0]) # output in middle of element\n self.diagnostics.append(elem[1])\n else:\n print(\"Could not find element: {}\".format(elem[0]))\n for step in step_numbers:\n if step[0] not in self.steps:\n self.steps.append(step[0])\n self.diagnostics.append(step[1])\n for pos in positions:\n pos_step = self.find_step_position(self.stepper, pos[0])\n print(\"For position: {}, the closest step is {} m away\".format(pos[0], pos_step[1]))\n if pos_step[0] not in self.steps:\n self.steps.append(pos_step[0])\n self.diagnostics.append(pos[1])\n\n for step, diag in zip(self.steps, self.diagnostics):\n assert callable(diag), \"Diagnostic {} is not callable\".format(diag)\n\n x = Datum(*self.find_step_information(self.stepper, step))\n x.diagnostic_name = diag.__name__\n self.data.append(x)\n\n\n @staticmethod\n def find_step_information(stepper, step_number):\n for i, step in enumerate(stepper.get_steps()):\n if i == step_number:\n oper = step.get_operators()[-1]\n slc = oper.get_slices()[-1]\n slice_element = slc.get_lattice_element()\n position = stepper.get_lattice_simulator().get_lattice_functions(slc).arc_length\n\n return i, slice_element.get_name(), position\n\n @staticmethod\n def find_element_steps(stepper, element_name):\n # TODO: Need to check element memory address to split out multiple element instances\n steps = []\n for step_number, step in enumerate(stepper.get_steps()):\n oper = step.get_operators()[-1]\n slc = oper.get_slices()[-1]\n slice_element = slc.get_lattice_element()\n if slice_element.get_name() == element_name:\n position = stepper.get_lattice_simulator().get_lattice_functions(slc).arc_length\n steps.append(step_number)\n print(\"Step: {}: Slice {}, Element {}, position {}\".format(step_number, slc,\n slice_element.get_name(),\n position))\n\n return steps\n\n @staticmethod\n def find_step_position(stepper, target_position):\n closest_step = (0, 1e130)\n for step_number, step in enumerate(stepper.get_steps()):\n oper = step.get_operators()[-1]\n slc = oper.get_slices()[-1]\n position = stepper.get_lattice_simulator().get_lattice_functions(slc).arc_length\n if abs(target_position - position) < closest_step[1]:\n closest_step = (step_number, abs(target_position - position))\n\n return closest_step\n\n def write_datafiles(self, directory, filesnames=None):\n if filesnames:\n assert len(filesnames) == len(self.data), 'Number of supplied filenames not equal to number of datasets'\n else:\n filesnames = [None] * len(self.data)\n\n for data, name in zip(self.data, filesnames):\n data.write_data(directory, filename=name)\n\n def step_end_action(self, stepper, step, bunch, turn_num, step_num):\n \"\"\"\n Overloads Synergia's default synergia.simulation.Propagate_actions.step_end_action method.\n Must maintain parameter input order (stepper, step, bunch, turn_num, step_num) to function.\n :param stepper: Synergia stepper object\n :param step: Individual step object\n :param bunch: Bunch object\n :param turn_num: (int) Current turn number\n :param step_num: (int) Current step on this turn\n :return:\n \"\"\"\n # TODO: Add option for particular turn intervals\n # TODO: Collect particle data from other processors automatically if statistical data?\n if step_num in self.steps:\n indices = np.where(np.array(self.steps) == step_num)[0]\n for index in indices:\n datum = self.diagnostics[index](bunch)\n self.data[index].turn_num.append(turn_num)\n self.data[index].data.append(datum)\n\n\nclass Datum:\n def __init__(self, step_num, element_name, position):\n self.data = []\n self.turn_num = []\n self.step_num = step_num\n self.element_name = element_name\n self.position = position\n self.diagnostic_name = None\n\n def write_data(self, directory, filename=None):\n # TODO: allow for data caching on an interval\n if my_rank == 0:\n if not os.path.exists(directory):\n os.makedirs(directory)\n if not filename:\n filename = 'data_{}_step_{}.h5'.format(self.diagnostic_name, self.step_num)\n\n filepath = os.path.join(directory, filename)\n\n datafile = h5.File(filepath, 'w')\n for name, attr in [('step_num', self.step_num), ('element_name', self.element_name),\n ('position', self.position)]:\n datafile.attrs[name] = attr\n\n datafile.create_dataset('turns', data=self.turn_num)\n if type(self.data[0]) == np.ndarray:\n for i, d in enumerate(self.data):\n datafile.create_dataset('data{}'.format(i), data=d)\n else:\n datafile.create_dataset('data', data=np.array(self.data))\n\n datafile.close()\n\ndef xdistribution(bunch):\n all_particles = comm_world.gather(bunch.get_local_particles(), root=0)\n if comm_world.rank == 0:\n all_particles = np.vstack(all_particles)[:, 0]\n\n minx, maxx = all_particles.min(), all_particles.max()\n hist, bins = np.histogram(all_particles, range=(minx, maxx), bins='fd')\n centers = []\n for i in range(1, bins.size):\n centers.append((bins[i] + bins[i - 1]) / 2.)\n return np.array([hist, centers])\n\ndef xydistribution(bunch):\n all_particles = comm_world.gather(bunch.get_local_particles(), root=0)\n if comm_world.rank == 0:\n all_particles = np.vstack(all_particles)\n\n minx, maxx = all_particles[:, 0].min(), all_particles[:, 0].max()\n miny, maxy = all_particles[:, 1].min(), all_particles[:, 1].max()\n hist, binsx, binsy = np.histogram2d(all_particles[:, 0],\n all_particles[:, 1],\n range=[[minx, maxx], [miny, maxy]], bins=64)\n hist = np.append(hist, [-1])\n return np.array([hist, binsx, binsy])\n" ]
[ [ "numpy.savetxt" ], [ "numpy.append", "numpy.array", "numpy.histogram", "numpy.vstack", "numpy.histogram2d" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
marcovarrone/squidpy
[ "fb68a913db0e0daaabeab69df308461ecaba1268" ]
[ "squidpy/im/_process.py" ]
[ "from types import MappingProxyType\nfrom typing import Any, Union, Mapping, Callable, Optional, Sequence\n\nfrom scanpy import logging as logg\n\nfrom dask import delayed\nfrom scipy.ndimage.filters import gaussian_filter as scipy_gf\nimport numpy as np\nimport dask.array as da\n\nfrom skimage.color import rgb2gray\nfrom skimage.util.dtype import img_as_float32\nfrom dask_image.ndfilters import gaussian_filter as dask_gf\n\nfrom squidpy._docs import d, inject_docs\nfrom squidpy.im._container import ImageContainer\nfrom squidpy._constants._constants import Processing\nfrom squidpy._constants._pkg_constants import Key\n\n__all__ = [\"process\"]\n\n\ndef to_grayscale(img: Union[np.ndarray, da.Array]) -> Union[np.ndarray, da.Array]:\n if img.shape[-1] != 3:\n raise ValueError(f\"Expected channel dimension to be `3`, found `{img.shape[-1]}`.\")\n\n if isinstance(img, da.Array):\n img = da.from_delayed(delayed(img_as_float32)(img), shape=img.shape, dtype=np.float32)\n coeffs = np.array([0.2125, 0.7154, 0.0721], dtype=img.dtype)\n\n return img @ coeffs\n\n return rgb2gray(img)\n\n\[email protected]\n@inject_docs(p=Processing)\ndef process(\n img: ImageContainer,\n layer: Optional[str] = None,\n library_id: Optional[Union[str, Sequence[str]]] = None,\n method: Union[str, Callable[..., np.ndarray]] = \"smooth\",\n chunks: Optional[int] = None,\n lazy: bool = False,\n layer_added: Optional[str] = None,\n channel_dim: Optional[str] = None,\n copy: bool = False,\n apply_kwargs: Mapping[str, Any] = MappingProxyType({}),\n **kwargs: Any,\n) -> Optional[ImageContainer]:\n \"\"\"\n Process an image by applying a transformation.\n\n Parameters\n ----------\n %(img_container)s\n %(img_layer)s\n %(library_id)s\n If `None`, all Z-dimensions are processed at once, treating the image as a 3D volume.\n method\n Processing method to use. Valid options are:\n\n - `{p.SMOOTH.s!r}` - :func:`skimage.filters.gaussian`.\n - `{p.GRAY.s!r}` - :func:`skimage.color.rgb2gray`.\n\n %(custom_fn)s\n %(chunks_lazy)s\n %(layer_added)s\n If `None`, use ``'{{layer}}_{{method}}'``.\n channel_dim\n Name of the channel dimension of the new image layer. Default is the same as the original, if the\n processing function does not change the number of channels, and ``'{{channel}}_{{processing}}'`` otherwise.\n %(copy_cont)s\n apply_kwargs\n Keyword arguments for :meth:`squidpy.im.ImageContainer.apply`.\n kwargs\n Keyword arguments for ``method``.\n\n Returns\n -------\n If ``copy = True``, returns a new container with the processed image in ``'{{layer_added}}'``.\n\n Otherwise, modifies the ``img`` with the following key:\n\n - :class:`squidpy.im.ImageContainer` ``['{{layer_added}}']`` - the processed image.\n\n Raises\n ------\n NotImplementedError\n If ``method`` has not been implemented.\n \"\"\"\n layer = img._get_layer(layer)\n method = Processing(method) if isinstance(method, (str, Processing)) else method # type: ignore[assignment]\n apply_kwargs = dict(apply_kwargs)\n apply_kwargs[\"lazy\"] = lazy\n\n if channel_dim is None:\n channel_dim = img[layer].dims[-1]\n layer_new = Key.img.process(method, layer, layer_added=layer_added)\n\n if callable(method):\n callback = method\n elif method == Processing.SMOOTH: # type: ignore[comparison-overlap]\n if library_id is None:\n expected_ndim = 4\n kwargs.setdefault(\"sigma\", [1, 1, 0, 0]) # y, x, z, c\n else:\n expected_ndim = 3\n kwargs.setdefault(\"sigma\", [1, 1, 0]) # y, x, c\n\n sigma = kwargs[\"sigma\"]\n if isinstance(sigma, int):\n kwargs[\"sigma\"] = sigma = [sigma, sigma] + [0] * (expected_ndim - 2)\n if len(sigma) != expected_ndim:\n raise ValueError(f\"Expected `sigma` to be of length `{expected_ndim}`, found `{len(sigma)}`.\")\n\n if chunks is not None:\n # dask_image already handles map_overlap\n chunks_, chunks = chunks, None\n callback = lambda arr, **kwargs: dask_gf(da.asarray(arr).rechunk(chunks_), **kwargs) # noqa: E731\n else:\n callback = scipy_gf\n elif method == Processing.GRAY: # type: ignore[comparison-overlap]\n apply_kwargs[\"drop_axis\"] = 3\n callback = to_grayscale\n else:\n raise NotImplementedError(f\"Method `{method}` is not yet implemented.\")\n\n # to which library_ids should this function be applied?\n if library_id is not None:\n callback = {lid: callback for lid in img._get_library_ids(library_id)} # type: ignore[assignment]\n\n start = logg.info(f\"Processing image using `{method}` method\")\n res: ImageContainer = img.apply(\n callback, layer=layer, copy=True, drop=copy, chunks=chunks, fn_kwargs=kwargs, **apply_kwargs\n )\n\n # if the method changes the number of channels\n if res[layer].shape[-1] != img[layer].shape[-1]:\n modifier = \"_\".join(layer_new.split(\"_\")[1:]) if layer_added is None else layer_added\n channel_dim = f\"{channel_dim}_{modifier}\"\n\n res._data = res.data.rename({res[layer].dims[-1]: channel_dim})\n logg.info(\"Finish\", time=start)\n\n if copy:\n return res.rename(layer, layer_new)\n\n img.add_img(\n img=res[layer],\n layer=layer_new,\n copy=False,\n lazy=lazy,\n dims=res[layer].dims,\n library_id=img[layer].coords[\"z\"].values,\n )\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Jovioluiz/IA
[ "35247c782747a972e73a723608e71faa70cb6916" ]
[ "Trabalho LSTM/trabalho_LSTM.py" ]
[ "import pandas as pd\r\nfrom tensorflow import keras\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense, Dropout, LSTM\r\nimport plotly.graph_objects as go\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\n# %matplotlib inline\r\n\r\nDataSet=pd.read_csv('cotacao_ouro_teste.csv')\r\n\r\nfig = go.Figure(data=[go.Candlestick(x=DataSet['Date'],\r\n open=DataSet['Open'], high=DataSet['High'],\r\n low=DataSet['Low'], close=DataSet['Close'])\r\n ])\r\n\r\nfig.update_layout(xaxis_rangeslider_visible=False)\r\n# fig.show()\r\n\r\nDataSet=pd.read_csv('cotacao_ouro_treino2.csv')\r\nDataSet=DataSet.dropna()\r\nDataSet.head()\r\n\r\n\r\nDataSet.describe()\r\n\r\nplt.scatter(DataSet['Date'], DataSet['Open'],)\r\nplt.show()\r\n\r\nbase_treinamento = DataSet.iloc[:, 1:2].values\r\n\r\n#DataSet.drop(['Date','Close','High','Low', 'Volume'],axis=1,inplace=True)\r\n\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nscaler=MinMaxScaler(feature_range=(0, 1))\r\nDataScaled = scaler.fit_transform(base_treinamento)\r\n\r\nprint(DataScaled)\r\n\r\nprevisores = []\r\npreco_real = []\r\nNRecursao = 90\r\nDataSetLen = len(DataScaled)\r\nprint(DataSetLen)\r\n\r\nfor i in range(NRecursao, DataSetLen):\r\n previsores.append(DataScaled[i-NRecursao:i,0])\r\n preco_real.append(DataScaled[i,0])\r\n\r\nprevisores, preco_real = np.array(previsores), np.array(preco_real)\r\n\r\nprevisores.shape\r\n\r\nprevisores = np.reshape(previsores, (previsores.shape[0], previsores.shape[1], 1))\r\nprevisores.shape\r\n\r\nprint(previsores)\r\n\r\n# Camada de entrada\r\nregressor = Sequential()\r\nregressor.add(LSTM(units = 100, return_sequences = True, input_shape = (previsores.shape[1], 1)))\r\nregressor.add(Dropout(0.3))\r\n\r\n# Cada Oculta 1\r\nregressor.add(LSTM(units = 50, return_sequences = True))\r\nregressor.add(Dropout(0.3))\r\n\r\n# Cada Oculta 2\r\nregressor.add(LSTM(units = 50, return_sequences = True))\r\nregressor.add(Dropout(0.3))\r\n\r\n# Cada Oculta 3\r\nregressor.add(LSTM(units = 50))\r\nregressor.add(Dropout(0.3))\r\n\r\n# Camada de Saída\r\nregressor.add(Dense(units = 1, activation = 'linear'))\r\n\r\nregressor.compile(optimizer = 'rmsprop', loss = 'mean_squared_error',\r\n metrics = ['mean_absolute_error'])\r\nregressor.fit(previsores, preco_real, epochs = 500, batch_size = 32)\r\n\r\nDataSet_teste=pd.read_csv('cotacao_ouro_teste.csv')\r\n\r\npreco_real_teste = DataSet_teste.iloc[:, 1:2].values\r\n\r\nbase_completa = pd.concat((DataSet['Open'], DataSet_teste['Open']), axis = 0)\r\nentradas = base_completa[len(base_completa) - len(DataSet_teste) - NRecursao:].values\r\n\r\nentradas = entradas.reshape(-1, 1)\r\nentradas = scaler.transform(entradas)\r\n\r\nDataSetTestLen = len(DataSet_teste)\r\nNPredictions = 90\r\n\r\nX_teste = []\r\nfor i in range(NRecursao, DataSetTestLen + NRecursao):\r\n X_teste.append(entradas[i - NRecursao:i, 0])\r\n\r\nX_teste = np.array(X_teste)\r\nX_teste = np.reshape(X_teste, (X_teste.shape[0], X_teste.shape[1], 1))\r\n\r\nprevisoes = regressor.predict(X_teste)\r\nprevisoes = scaler.inverse_transform(previsoes)\r\n\r\nRNN=[]\r\npredictions_teste=X_teste[0].T\r\npredictions_teste=np.reshape(predictions_teste, (predictions_teste.shape[0], predictions_teste.shape[1], 1))\r\n\r\npredictions_teste[0][NRecursao-1][0]=regressor.predict(predictions_teste)[0][0]\r\nRNN.append(regressor.predict(predictions_teste)[0])\r\n\r\nfor i in range(NPredictions-1):\r\n predictions_teste=np.roll(predictions_teste,-1)\r\n predictions_teste[0][NRecursao-1][0]=regressor.predict(predictions_teste)[0][0]\r\n RNN.append(regressor.predict(predictions_teste)[0])\r\nRNN = scaler.inverse_transform(RNN)\r\n\r\nprint(RNN.mean())\r\nprint(previsoes.mean())\r\nprint(preco_real_teste.mean())\r\n\r\nplt.plot(preco_real_teste, color = 'red', label = 'Preço real')\r\nplt.plot(previsoes, color = 'blue', label = 'Previsões')\r\n# plt.plot(RNN, color = 'green', label = 'RNN')\r\n\r\nplt.title('Cotação Ouro')\r\nplt.xlabel('Tempo')\r\nplt.ylabel('Valor')\r\nplt.legend()\r\nplt.show()" ]
[ [ "matplotlib.pyplot.legend", "pandas.concat", "pandas.read_csv", "matplotlib.pyplot.scatter", "matplotlib.pyplot.title", "numpy.reshape", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "numpy.array", "matplotlib.pyplot.show", "numpy.roll", "sklearn.preprocessing.MinMaxScaler", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
donproc/Paddle
[ "75dadd586996c71cf5b088b6141b94705561773f" ]
[ "python/paddle/fluid/tests/unittests/test_multiply.py" ]
[ "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\nimport paddle\nimport paddle.tensor as tensor\nimport paddle.fluid as fluid\nfrom paddle.fluid import Program, program_guard\nimport numpy as np\nimport unittest\n\n\nclass TestMultiplyAPI(unittest.TestCase):\n \"\"\"TestMultiplyAPI.\"\"\"\n\n def __run_static_graph_case(self, x_data, y_data, axis=-1):\n with program_guard(Program(), Program()):\n x = paddle.nn.data(name='x', shape=x_data.shape, dtype=x_data.dtype)\n y = paddle.nn.data(name='y', shape=y_data.shape, dtype=y_data.dtype)\n res = tensor.multiply(x, y, axis=axis)\n\n place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(\n ) else fluid.CPUPlace()\n exe = fluid.Executor(place)\n outs = exe.run(fluid.default_main_program(),\n feed={'x': x_data,\n 'y': y_data},\n fetch_list=[res])\n res = outs[0]\n return res\n\n def __run_dynamic_graph_case(self, x_data, y_data, axis=-1):\n paddle.disable_static()\n x = paddle.to_variable(x_data)\n y = paddle.to_variable(y_data)\n res = paddle.multiply(x, y, axis=axis)\n return res.numpy()\n\n def test_multiply(self):\n \"\"\"test_multiply.\"\"\"\n np.random.seed(7)\n # test static computation graph: 1-d array\n x_data = np.random.rand(200)\n y_data = np.random.rand(200)\n res = self.__run_static_graph_case(x_data, y_data)\n self.assertTrue(np.allclose(res, np.multiply(x_data, y_data)))\n\n # test static computation graph: 2-d array\n x_data = np.random.rand(2, 500)\n y_data = np.random.rand(2, 500)\n res = self.__run_static_graph_case(x_data, y_data)\n self.assertTrue(np.allclose(res, np.multiply(x_data, y_data)))\n\n # test static computation graph: broadcast\n x_data = np.random.rand(2, 500)\n y_data = np.random.rand(500)\n res = self.__run_static_graph_case(x_data, y_data)\n self.assertTrue(np.allclose(res, np.multiply(x_data, y_data)))\n\n # test static computation graph: broadcast with axis\n x_data = np.random.rand(2, 300, 40)\n y_data = np.random.rand(300)\n res = self.__run_static_graph_case(x_data, y_data, axis=1)\n expected = np.multiply(x_data, y_data[..., np.newaxis])\n self.assertTrue(np.allclose(res, expected))\n\n # test dynamic computation graph: 1-d array\n x_data = np.random.rand(200)\n y_data = np.random.rand(200)\n res = self.__run_dynamic_graph_case(x_data, y_data)\n self.assertTrue(np.allclose(res, np.multiply(x_data, y_data)))\n\n # test dynamic computation graph: 2-d array\n x_data = np.random.rand(20, 50)\n y_data = np.random.rand(20, 50)\n res = self.__run_dynamic_graph_case(x_data, y_data)\n self.assertTrue(np.allclose(res, np.multiply(x_data, y_data)))\n\n # test dynamic computation graph: broadcast\n x_data = np.random.rand(2, 500)\n y_data = np.random.rand(500)\n res = self.__run_dynamic_graph_case(x_data, y_data)\n self.assertTrue(np.allclose(res, np.multiply(x_data, y_data)))\n\n # test dynamic computation graph: broadcast with axis\n x_data = np.random.rand(2, 300, 40)\n y_data = np.random.rand(300)\n res = self.__run_dynamic_graph_case(x_data, y_data, axis=1)\n expected = np.multiply(x_data, y_data[..., np.newaxis])\n self.assertTrue(np.allclose(res, expected))\n\n\nclass TestMultiplyError(unittest.TestCase):\n \"\"\"TestMultiplyError.\"\"\"\n\n def test_errors(self):\n \"\"\"test_errors.\"\"\"\n # test static computation graph: dtype can not be int8\n paddle.enable_static()\n with program_guard(Program(), Program()):\n x = paddle.nn.data(name='x', shape=[100], dtype=np.int8)\n y = paddle.nn.data(name='y', shape=[100], dtype=np.int8)\n self.assertRaises(TypeError, tensor.multiply, x, y)\n\n # test static computation graph: inputs must be broadcastable \n with program_guard(Program(), Program()):\n x = paddle.nn.data(name='x', shape=[20, 50], dtype=np.float64)\n y = paddle.nn.data(name='y', shape=[20], dtype=np.float64)\n self.assertRaises(fluid.core.EnforceNotMet, tensor.multiply, x, y)\n\n np.random.seed(7)\n # test dynamic computation graph: dtype can not be int8\n paddle.disable_static()\n x_data = np.random.randn(200).astype(np.int8)\n y_data = np.random.randn(200).astype(np.int8)\n x = paddle.to_variable(x_data)\n y = paddle.to_variable(y_data)\n self.assertRaises(fluid.core.EnforceNotMet, paddle.multiply, x, y)\n\n # test dynamic computation graph: inputs must be broadcastable\n x_data = np.random.rand(200, 5)\n y_data = np.random.rand(200)\n x = paddle.to_variable(x_data)\n y = paddle.to_variable(y_data)\n self.assertRaises(fluid.core.EnforceNotMet, paddle.multiply, x, y)\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.allclose", "numpy.multiply", "numpy.random.seed", "numpy.random.randn", "numpy.random.rand" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Derek-Wds/MAD-VAE
[ "267ce6ca98f1b1ecc8ebec22ddeca32e2c502d5b", "267ce6ca98f1b1ecc8ebec22ddeca32e2c502d5b" ]
[ "experiments/gen_white_table.py", "experiments/test/train_test_models.py" ]
[ "import json\nimport numpy as np\n\nif __name__ == \"__main__\":\n models = {'vanilla': 0, 'classification': 0, 'proxi_dist': 0, 'combined': 0}\n models_list = ['vanilla', 'classification', 'proxi_dist', 'combined'] # for consistency in older versions\n\n for flavor in models_list:\n with open(f'./accuracy_{flavor}.txt', 'r') as f:\n models[flavor] = json.load(f)\n\n # Models initialized with their base accuracy\n acc = {'fgsm': [0.1316], 'r-fgsm': [0.1521], 'cw': [0.0075], 'mi-fgsm': [0.0074], 'pgd': [0.0073], 'single': [0.9977]}\n acc_name = {'fgsm': 'FGSM', 'r-fgsm': 'Rand-FGSM', 'cw': 'CW', 'mi-fgsm': 'MI-FGSM', 'pgd': 'PGD', 'single': 'Single Pixel'}\n acc_list = list(acc.keys())\n\n for model in acc_list:\n for flavor in models_list:\n acc[model].append(models[flavor][model])\n \n argmax = np.argmax(acc[model][1:]) + 1\n acc[model][argmax] = f'\\\\textbf{{{acc[model][argmax]}}}'\n \n with open('./whitebox_table.tex', 'w') as f:\n c = ['c'] * (len(models_list) + 3)\n f.write(\"\\\\begin{table}[H]\\n\\centering\\n\\\\begin{tabular}{\")\n f.write('|'.join(c))\n f.write(\"}\\nAttack & No Attack & No Defense & Vanilla & Classification & Proximity and Distance & Combined \\\\\\\\ \\\\hline\\n\")\n for model in acc_list:\n acc[model].insert(0, 0.9931)\n acc[model].insert(0, acc_name[model])\n f.write(' & '.join(str(x) for x in acc[model]))\n f.write('\\\\\\\\\\n')\n f.write('\\\\end{tabular}\\n')\n f.write('\\\\caption{Classification accuracy of different models based on the FGSM, Rand-FGSM, CW, Momentum Iterative FGSM, PGD, and Single Pixel White-Box attack on the classifier with the default parameters. The models are trained on the data generated using the first three attack methods while the other three attacks are not included in the training dataset.}\\n')\n f.write('\\\\label{table:whitebox-result}\\n')\n f.write('\\\\end{table}\\n')", "import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torchvision.transforms as transforms\nfrom torch.optim.lr_scheduler import ExponentialLR\nfrom torch.utils.data import DataLoader\nfrom torchvision.datasets.mnist import MNIST, FashionMNIST\nfrom test_models import *\nfrom utils.scheduler import MinExponentialLR\n\n# init dataset\ntransform = transforms.Compose([transforms.CenterCrop(28), transforms.ToTensor()])\ntrain_data = FashionMNIST('../data', train=True, download=True, transform=transform)\ntest_data = FashionMNIST('../data', train=False, download=True, transform=transform)\n\n# init dataloader\ntrain_data_loader = DataLoader(train_data, batch_size=256, shuffle=True, num_workers=8)\ntest_data_loader = DataLoader(test_data, batch_size=1024, num_workers=8)\n\n# init models and optimizers\nmodels = [model_a(), model_b(), model_c(), model_d(), model_e()]\ncriterion = nn.CrossEntropyLoss()\noptimizers = [optim.Adam(model.parameters(), lr=1e-3) for model in models]\nschedulers = [MinExponentialLR(optimizer, gamma=0.95, minimum=1e-5) for optimizer in optimizers]\n\n# train function\ndef train(epoch):\n for model in models:\n model.train()\n if torch.cuda.is_available():\n model.cuda()\n loss_list = [[] for i in range(len(models))]\n for i in range(len(models)):\n for j, (images, labels) in enumerate(train_data_loader):\n if torch.cuda.is_available():\n images = images.cuda()\n labels = labels.cuda()\n optimizers[i].zero_grad()\n output = models[i](images)\n loss = criterion(output, labels)\n loss_list[i].append(loss.detach().cpu().item())\n if j % 10 == 0:\n print(models[i].name)\n print('Train - Epoch %d, Batch: %d, Loss: %f' % (epoch, j, loss.detach().cpu().item()))\n loss.backward()\n optimizers[i].step()\n schedulers[i].step()\n\n# test function\ndef test():\n for model in models:\n model.eval()\n total_corrects = [0 for i in range(len(models))]\n avg_losses = [0.0 for i in range(len(models))]\n for i in range(len(models)):\n for j, (images, labels) in enumerate(test_data_loader):\n output = models[i].cpu()(images)\n avg_losses[i] += criterion(output, labels).sum()\n pred = output.detach().max(1)[1]\n total_corrects[i] += pred.eq(labels.view_as(pred)).sum()\n avg_losses[i] /= len(test_data)\n print('%s test Avg. Loss: %f, Accuracy: %f' % (models[i].name, avg_losses[i].detach().cpu().item(), float(total_corrects[i]) / len(test_data)))\n\n# main function\ndef main():\n for e in range(1, 30):\n train(e)\n test()\n # save pretrained models\n for i in range(len(models)):\n torch.save(models[i].state_dict(), \"pretrained/%s_fmnist_params.pt\" % models[i].name)\n\nif __name__ == '__main__':\n main()" ]
[ [ "numpy.argmax" ], [ "torch.nn.CrossEntropyLoss", "torch.utils.data.DataLoader", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zx-sdu/NodeFinder
[ "edaeeba8fb5a1ca28222313f6de7a6dfa8253093" ]
[ "tests/search/test_nodal_surface.py" ]
[ "# -*- coding: utf-8 -*-\n\n# © 2017-2019, ETH Zurich, Institut für Theoretische Physik\n# Author: Dominik Gresch <[email protected]>\n\"\"\"\nTests with a nodal line.\n\"\"\"\n\nimport numpy as np\nimport pytest\n\nfrom nodefinder.search import run\nfrom nodefinder.search.refinement_stencil import get_mesh_stencil\n\n\[email protected]\ndef nodal_surface_properties():\n \"\"\"\n Fixture which defines the helper functions describing the nodal surface.\n \"\"\"\n\n def dist_fct(pos):\n _, _, dz = (np.array(pos) % 1) - 0.5\n return abs(dz)\n\n def gap_fct(pos):\n dx, _, dz = (np.array(pos) % 1) - 0.5\n return dz**2 * (0.1 + 10 * dx**2)\n\n def parametrization(s, t):\n return [s, t, 0.5]\n\n return dist_fct, gap_fct, parametrization\n\n\ndef test_nodal_surface(nodal_surface_properties, score_nodal_surface): # pylint: disable=redefined-outer-name\n \"\"\"\n Test that a nodal surface is found.\n \"\"\"\n dist_fct, gap_fct, parametrization = nodal_surface_properties\n\n result = run(\n gap_fct=gap_fct,\n gap_threshold=1e-4,\n feature_size=1e-1,\n refinement_stencil=get_mesh_stencil(mesh_size=(2, 2, 2)),\n initial_mesh_size=(3, 3, 3),\n use_fake_potential=False,\n )\n score_nodal_surface(\n result=result,\n dist_func=dist_fct,\n surface_parametrization=parametrization,\n cutoff_accuracy=2e-3,\n cutoff_coverage=1e-1,\n )\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
gicsaw/ARAE_SMILES
[ "05cd508be0450ad0e8944e6280b8fa2863cc8dd0" ]
[ "gen_CARAE_con_logP_SAS_TPSA.py" ]
[ "from model.CARAE import ARAE\n#from utils.utils import *\nimport numpy as np\nimport os, sys\nimport time\nimport tensorflow as tf\nimport collections\nimport copy\nfrom six.moves import cPickle\n#os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"\"\n\ndef convert_to_smiles(vector, char):\n smiles=\"\"\n for i in vector:\n smiles+=char[i]\n return smiles\n\ndef cal_accuracy(S1, S2, length):\n count = 0\n for i in range(len(S1)):\n if np.array_equal(S1[i][1:length[i]+1],S2[i][:length[i]]):\n count+=1\n return count\n\nchar_list= [\"H\",\"C\",\"N\",\"O\",\"F\",\"P\",\"S\",\"Cl\",\"Br\",\"I\",\n\"n\",\"c\",\"o\",\"s\",\n\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\n\"(\",\")\",\"[\",\"]\",\n\"-\",\"=\",\"#\",\"/\",\"\\\\\",\"+\",\"@\",\"X\",\"Y\"]\n\nchar_dict={'H': 0, 'C': 1, 'N': 2, 'O': 3, 'F': 4, 'P': 5, \n'S': 6, 'Cl': 7, 'Br': 8, 'I': 9, \n'n': 10, 'c': 11, 'o': 12, 's': 13, \n'1': 14, '2': 15, '3': 16, '4': 17, '5': 18, '6': 19, '7': 20, '8': 21, \n'(': 22, ')': 23, '[': 24, ']': 25, '-': 26, '=': 27, '#': 28, \n'/': 29, '\\\\': 30, '+': 31, '@': 32, 'X': 33, 'Y': 34}\n\nvocab_size = len(char_list)\nlatent_size = 300\nbatch_size = 100\nsample_size = 100\nseq_length = 110\ndev = 0.0\n\n#input properties, [logP,SAS,TPSA]\n#task_val=np.array([1.5,2,30])\nif len(sys.argv)<=3:\n print(\"python gen_CARAE_con_logP_SAS_TPSA logP SAS TPSA \")\n sys.exit()\n\nlogP_set=float(sys.argv[1])\nSAS_set=float(sys.argv[2])\nTPSA_set=float(sys.argv[3])\ntask_val=np.array([logP_set,SAS_set,TPSA_set])\n\nprint(task_val)\n\nmodel_name=\"CARAE_logP_SAS_TPSA\"\nsave_dir=\"./save/\"+model_name\n\nout_dir0=\"out_\"+model_name+\"G_%d_%d_%d\" %(int(logP_set*10),int(SAS_set),int(TPSA_set))\nif not os.path.exists(out_dir0):\n os.makedirs(out_dir0)\n\nproperty_task=3\ntask_nor=np.array([10.0,10.0,150.0])\ntask_low=np.array([-1.0,1.0,0.0])\ntask_high=np.array([5.0,8.0,150.0])\ntask_low=task_low/task_nor\ntask_high=task_high/task_nor\n\ntask_val=task_val/task_nor\n\nNtest=10000\nnum_test_batches = int(Ntest/batch_size)\n\n\nmodel = ARAE(vocab_size = vocab_size,\n batch_size = batch_size,\n latent_size = latent_size,\n sample_size = sample_size,\n property_task = property_task\n )\n\n\ntotal_st=time.time()\n\nepochs=[39]\n\nfor epoch in epochs:\n out_dir=out_dir0+\"/%d\" %epoch\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n output_file=out_dir+\"/result_\"+model_name+\"_%d.txt\" %epoch\n fp0=open(output_file,\"w\")\n model.restore(save_dir+\"/model.ckpt-%d\" %epoch)\n\n latent_vector_fake=[]\n Y_fake=[]\n P_fake=[]\n smiles_fake=[]\n\n for itest in range(num_test_batches):\n\n# fp0.write('**********************************************\\n')\n decoder_state = model.get_decoder_state()\n s = np.random.normal(0.0, 0.25, [batch_size, sample_size]).clip(-1.0,1.0)\n\n# p = p_batches2[itest]\n# cp = np.random.uniform(task_low,task_high, [batch_size, property_task])\n p=np.empty([batch_size,property_task])\n p[:,0].fill(task_val[0])\n p[:,1].fill(task_val[1])\n p[:,2].fill(task_val[2])\n\n P_fake.append(p)\n\n latent_vector = model.generate_latent_vector(s)\n latent_vector_fake.append(latent_vector)\n\n start_token = np.array([char_list.index('X') for i in range(batch_size)])\n start_token = np.reshape(start_token, [batch_size, 1])\n length = np.array([1 for i in range(batch_size)])\n smiles = ['' for i in range(batch_size)]\n Y=[]\n for i in range(seq_length):\n m, state = model.generate_molecule(start_token, latent_vector, length, p, decoder_state)\n decoder_state = state\n start_token = np.argmax(m,2)\n Y.append(start_token[:,0])\n smiles = [s + str(char_list[start_token[j][0]]) for j,s in enumerate(smiles)]\n Y=list(map(list,zip(*Y)))\n Y_fake.append(Y)\n smiles_fake+=smiles\n\n\n latent_vector_fake=np.array(latent_vector_fake,dtype=\"float32\").reshape(-1,latent_size)\n P_fake=np.array(P_fake,dtype=\"float32\").reshape(-1,property_task)\n Y_fake=np.array(Y_fake,dtype=\"int32\").reshape(-1,seq_length)\n outfile=out_dir+\"/Zfake.npy\"\n np.save(outfile,latent_vector_fake)\n outfile=out_dir+\"/Pfake.npy\"\n np.save(outfile,P_fake)\n outfile=out_dir+\"/Yfake.npy\"\n np.save(outfile,Y_fake)\n\n outfile=out_dir+\"/smiles_fake.txt\"\n fp_out=open(outfile,'w')\n for line in smiles_fake:\n line_out=line+\"\\n\"\n fp_out.write(line_out)\n fp_out.close()\n\ntotal_et=time.time()\n\nprint (\"total_time : \", total_et-total_st)\n\n\n\n\n" ]
[ [ "numpy.array_equal", "numpy.reshape", "numpy.save", "numpy.random.normal", "numpy.argmax", "numpy.array", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
BolachasDaAvo/PyTorch-StudioGAN
[ "21d0f1d976d0c5c3d240295e9efa83c105e40ac7", "21d0f1d976d0c5c3d240295e9efa83c105e40ac7" ]
[ "src/utils/resize.py", "src/utils/style_ops/filtered_lrelu.py" ]
[ "\"\"\"\nMIT License\n\nCopyright (c) 2021 Gaurav Parmar\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\n### On Buggy Resizing Libraries and Surprising Subtleties in FID Calculation\n### (https://www.cs.cmu.edu/~clean-fid/)\n### Gaurav Parmar, Richard Zhang, Jun-Yan Zhu\n### https://github.com/GaParmar/clean-fid/blob/main/cleanfid/resize.py\n\n\nimport os\n\nfrom PIL import Image\nimport torch\nimport torch.nn.functional as F\nimport numpy as np\n\n\ndict_name_to_filter = {\n \"PIL\": {\n \"bicubic\": Image.BICUBIC,\n \"bilinear\": Image.BILINEAR,\n \"nearest\": Image.NEAREST,\n \"lanczos\": Image.LANCZOS,\n \"box\": Image.BOX\n }\n}\n\n\ndef build_resizer(mode, size):\n if mode == \"clean\":\n return make_resizer(\"PIL\", \"bilinear\", (size, size))\n elif mode == \"legacy\":\n return make_resizer(\"PyTorch\", \"bilinear\", (size, size))\n else:\n raise ValueError(f\"Invalid mode {mode} specified\")\n\n\ndef make_resizer(library, filter, output_size):\n if library == \"PIL\":\n s1, s2 = output_size\n def resize_single_channel(x_np):\n img = Image.fromarray(x_np.astype(np.float32), mode='F')\n img = img.resize(output_size, resample=dict_name_to_filter[library][filter])\n return np.asarray(img).reshape(s1, s2, 1)\n def func(x):\n x = [resize_single_channel(x[:, :, idx]) for idx in range(3)]\n x = np.concatenate(x, axis=2).astype(np.float32)\n return x\n elif library == \"PyTorch\":\n import warnings\n # ignore the numpy warnings\n warnings.filterwarnings(\"ignore\")\n def func(x):\n x = torch.Tensor(x.transpose((2, 0, 1)))[None, ...]\n x = F.interpolate(x, size=output_size, mode=filter, align_corners=False)\n x = x[0, ...].cpu().data.numpy().transpose((1, 2, 0)).clip(0, 255)\n return x\n else:\n raise NotImplementedError('library [%s] is not include' % library)\n return func\n", "# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n#\n# NVIDIA CORPORATION and its licensors retain all intellectual property\n# and proprietary rights in and to this software, related documentation\n# and any modifications thereto. Any use, reproduction, disclosure or\n# distribution of this software and related documentation without an express\n# license agreement from NVIDIA CORPORATION is strictly prohibited.\n\nimport os\nimport numpy as np\nimport torch\nimport warnings\n\nfrom .. import custom_ops\nfrom .. import style_misc as misc\nfrom . import upfirdn2d\nfrom . import bias_act\n\n#----------------------------------------------------------------------------\n\n_plugin = None\n\ndef _init():\n global _plugin\n if _plugin is None:\n _plugin = custom_ops.get_plugin(\n module_name='filtered_lrelu_plugin',\n sources=['filtered_lrelu.cpp', 'filtered_lrelu_wr.cu', 'filtered_lrelu_rd.cu', 'filtered_lrelu_ns.cu'],\n headers=['filtered_lrelu.h', 'filtered_lrelu.cu'],\n source_dir=os.path.dirname(__file__),\n extra_cuda_cflags=['--use_fast_math'],\n )\n return True\n\ndef _get_filter_size(f):\n if f is None:\n return 1, 1\n assert isinstance(f, torch.Tensor)\n assert 1 <= f.ndim <= 2\n return f.shape[-1], f.shape[0] # width, height\n\ndef _parse_padding(padding):\n if isinstance(padding, int):\n padding = [padding, padding]\n assert isinstance(padding, (list, tuple))\n assert all(isinstance(x, (int, np.integer)) for x in padding)\n padding = [int(x) for x in padding]\n if len(padding) == 2:\n px, py = padding\n padding = [px, px, py, py]\n px0, px1, py0, py1 = padding\n return px0, px1, py0, py1\n\n#----------------------------------------------------------------------------\n\ndef filtered_lrelu(x, fu=None, fd=None, b=None, up=1, down=1, padding=0, gain=np.sqrt(2), slope=0.2, clamp=None, flip_filter=False, impl='cuda'):\n r\"\"\"Filtered leaky ReLU for a batch of 2D images.\n\n Performs the following sequence of operations for each channel:\n\n 1. Add channel-specific bias if provided (`b`).\n\n 2. Upsample the image by inserting N-1 zeros after each pixel (`up`).\n\n 3. Pad the image with the specified number of zeros on each side (`padding`).\n Negative padding corresponds to cropping the image.\n\n 4. Convolve the image with the specified upsampling FIR filter (`fu`), shrinking it\n so that the footprint of all output pixels lies within the input image.\n\n 5. Multiply each value by the provided gain factor (`gain`).\n\n 6. Apply leaky ReLU activation function to each value.\n\n 7. Clamp each value between -clamp and +clamp, if `clamp` parameter is provided.\n\n 8. Convolve the image with the specified downsampling FIR filter (`fd`), shrinking\n it so that the footprint of all output pixels lies within the input image.\n\n 9. Downsample the image by keeping every Nth pixel (`down`).\n\n The fused op is considerably more efficient than performing the same calculation\n using standard PyTorch ops. It supports gradients of arbitrary order.\n\n Args:\n x: Float32/float16/float64 input tensor of the shape\n `[batch_size, num_channels, in_height, in_width]`.\n fu: Float32 upsampling FIR filter of the shape\n `[filter_height, filter_width]` (non-separable),\n `[filter_taps]` (separable), or\n `None` (identity).\n fd: Float32 downsampling FIR filter of the shape\n `[filter_height, filter_width]` (non-separable),\n `[filter_taps]` (separable), or\n `None` (identity).\n b: Bias vector, or `None` to disable. Must be a 1D tensor of the same type\n as `x`. The length of vector must must match the channel dimension of `x`.\n up: Integer upsampling factor (default: 1).\n down: Integer downsampling factor. (default: 1).\n padding: Padding with respect to the upsampled image. Can be a single number\n or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`\n (default: 0).\n gain: Overall scaling factor for signal magnitude (default: sqrt(2)).\n slope: Slope on the negative side of leaky ReLU (default: 0.2).\n clamp: Maximum magnitude for leaky ReLU output (default: None).\n flip_filter: False = convolution, True = correlation (default: False).\n impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).\n\n Returns:\n Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.\n \"\"\"\n assert isinstance(x, torch.Tensor)\n assert impl in ['ref', 'cuda']\n if impl == 'cuda' and x.device.type == 'cuda' and _init():\n return _filtered_lrelu_cuda(up=up, down=down, padding=padding, gain=gain, slope=slope, clamp=clamp, flip_filter=flip_filter).apply(x, fu, fd, b, None, 0, 0)\n return _filtered_lrelu_ref(x, fu=fu, fd=fd, b=b, up=up, down=down, padding=padding, gain=gain, slope=slope, clamp=clamp, flip_filter=flip_filter)\n\n#----------------------------------------------------------------------------\n\ndef _filtered_lrelu_ref(x, fu=None, fd=None, b=None, up=1, down=1, padding=0, gain=np.sqrt(2), slope=0.2, clamp=None, flip_filter=False):\n \"\"\"Slow and memory-inefficient reference implementation of `filtered_lrelu()` using\n existing `upfirdn2n()` and `bias_act()` ops.\n \"\"\"\n assert isinstance(x, torch.Tensor) and x.ndim == 4\n fu_w, fu_h = _get_filter_size(fu)\n fd_w, fd_h = _get_filter_size(fd)\n if b is not None:\n assert isinstance(b, torch.Tensor) and b.dtype == x.dtype\n misc.assert_shape(b, [x.shape[1]])\n assert isinstance(up, int) and up >= 1\n assert isinstance(down, int) and down >= 1\n px0, px1, py0, py1 = _parse_padding(padding)\n assert gain == float(gain) and gain > 0\n assert slope == float(slope) and slope >= 0\n assert clamp is None or (clamp == float(clamp) and clamp >= 0)\n\n # Calculate output size.\n batch_size, channels, in_h, in_w = x.shape\n in_dtype = x.dtype\n out_w = (in_w * up + (px0 + px1) - (fu_w - 1) - (fd_w - 1) + (down - 1)) // down\n out_h = (in_h * up + (py0 + py1) - (fu_h - 1) - (fd_h - 1) + (down - 1)) // down\n\n # Compute using existing ops.\n x = bias_act.bias_act(x=x, b=b) # Apply bias.\n x = upfirdn2d.upfirdn2d(x=x, f=fu, up=up, padding=[px0, px1, py0, py1], gain=up**2, flip_filter=flip_filter) # Upsample.\n x = bias_act.bias_act(x=x, act='lrelu', alpha=slope, gain=gain, clamp=clamp) # Bias, leaky ReLU, clamp.\n x = upfirdn2d.upfirdn2d(x=x, f=fd, down=down, flip_filter=flip_filter) # Downsample.\n\n # Check output shape & dtype.\n misc.assert_shape(x, [batch_size, channels, out_h, out_w])\n assert x.dtype == in_dtype\n return x\n\n#----------------------------------------------------------------------------\n\n_filtered_lrelu_cuda_cache = dict()\n\ndef _filtered_lrelu_cuda(up=1, down=1, padding=0, gain=np.sqrt(2), slope=0.2, clamp=None, flip_filter=False):\n \"\"\"Fast CUDA implementation of `filtered_lrelu()` using custom ops.\n \"\"\"\n assert isinstance(up, int) and up >= 1\n assert isinstance(down, int) and down >= 1\n px0, px1, py0, py1 = _parse_padding(padding)\n assert gain == float(gain) and gain > 0\n gain = float(gain)\n assert slope == float(slope) and slope >= 0\n slope = float(slope)\n assert clamp is None or (clamp == float(clamp) and clamp >= 0)\n clamp = float(clamp if clamp is not None else 'inf')\n\n # Lookup from cache.\n key = (up, down, px0, px1, py0, py1, gain, slope, clamp, flip_filter)\n if key in _filtered_lrelu_cuda_cache:\n return _filtered_lrelu_cuda_cache[key]\n\n # Forward op.\n class FilteredLReluCuda(torch.autograd.Function):\n @staticmethod\n def forward(ctx, x, fu, fd, b, si, sx, sy): # pylint: disable=arguments-differ\n assert isinstance(x, torch.Tensor) and x.ndim == 4\n\n # Replace empty up/downsample kernels with full 1x1 kernels (faster than separable).\n if fu is None:\n fu = torch.ones([1, 1], dtype=torch.float32, device=x.device)\n if fd is None:\n fd = torch.ones([1, 1], dtype=torch.float32, device=x.device)\n assert 1 <= fu.ndim <= 2\n assert 1 <= fd.ndim <= 2\n\n # Replace separable 1x1 kernels with full 1x1 kernels when scale factor is 1.\n if up == 1 and fu.ndim == 1 and fu.shape[0] == 1:\n fu = fu.square()[None]\n if down == 1 and fd.ndim == 1 and fd.shape[0] == 1:\n fd = fd.square()[None]\n\n # Missing sign input tensor.\n if si is None:\n si = torch.empty([0])\n\n # Missing bias tensor.\n if b is None:\n b = torch.zeros([x.shape[1]], dtype=x.dtype, device=x.device)\n\n # Construct internal sign tensor only if gradients are needed.\n write_signs = (si.numel() == 0) and (x.requires_grad or b.requires_grad)\n\n # Warn if input storage strides are not in decreasing order due to e.g. channels-last layout.\n strides = [x.stride(i) for i in range(x.ndim) if x.size(i) > 1]\n if any(a < b for a, b in zip(strides[:-1], strides[1:])):\n warnings.warn(\"low-performance memory layout detected in filtered_lrelu input\", RuntimeWarning)\n\n # Call C++/Cuda plugin if datatype is supported.\n if x.dtype in [torch.float16, torch.float32]:\n if torch.cuda.current_stream(x.device) != torch.cuda.default_stream(x.device):\n warnings.warn(\"filtered_lrelu called with non-default cuda stream but concurrent execution is not supported\", RuntimeWarning)\n y, so, return_code = _plugin.filtered_lrelu(x, fu, fd, b, si, up, down, px0, px1, py0, py1, sx, sy, gain, slope, clamp, flip_filter, write_signs)\n else:\n return_code = -1\n\n # No Cuda kernel found? Fall back to generic implementation. Still more memory efficient than the reference implementation because\n # only the bit-packed sign tensor is retained for gradient computation.\n if return_code < 0:\n warnings.warn(\"filtered_lrelu called with parameters that have no optimized CUDA kernel, using generic fallback\", RuntimeWarning)\n\n y = x.add(b.unsqueeze(-1).unsqueeze(-1)) # Add bias.\n y = upfirdn2d.upfirdn2d(x=y, f=fu, up=up, padding=[px0, px1, py0, py1], gain=up**2, flip_filter=flip_filter) # Upsample.\n so = _plugin.filtered_lrelu_act_(y, si, sx, sy, gain, slope, clamp, write_signs) # Activation function and sign handling. Modifies y in-place.\n y = upfirdn2d.upfirdn2d(x=y, f=fd, down=down, flip_filter=flip_filter) # Downsample.\n\n # Prepare for gradient computation.\n ctx.save_for_backward(fu, fd, (si if si.numel() else so))\n ctx.x_shape = x.shape\n ctx.y_shape = y.shape\n ctx.s_ofs = sx, sy\n return y\n\n @staticmethod\n def backward(ctx, dy): # pylint: disable=arguments-differ\n fu, fd, si = ctx.saved_tensors\n _, _, xh, xw = ctx.x_shape\n _, _, yh, yw = ctx.y_shape\n sx, sy = ctx.s_ofs\n dx = None # 0\n dfu = None; assert not ctx.needs_input_grad[1]\n dfd = None; assert not ctx.needs_input_grad[2]\n db = None # 3\n dsi = None; assert not ctx.needs_input_grad[4]\n dsx = None; assert not ctx.needs_input_grad[5]\n dsy = None; assert not ctx.needs_input_grad[6]\n\n if ctx.needs_input_grad[0] or ctx.needs_input_grad[3]:\n pp = [\n (fu.shape[-1] - 1) + (fd.shape[-1] - 1) - px0,\n xw * up - yw * down + px0 - (up - 1),\n (fu.shape[0] - 1) + (fd.shape[0] - 1) - py0,\n xh * up - yh * down + py0 - (up - 1),\n ]\n gg = gain * (up ** 2) / (down ** 2)\n ff = (not flip_filter)\n sx = sx - (fu.shape[-1] - 1) + px0\n sy = sy - (fu.shape[0] - 1) + py0\n dx = _filtered_lrelu_cuda(up=down, down=up, padding=pp, gain=gg, slope=slope, clamp=None, flip_filter=ff).apply(dy, fd, fu, None, si, sx, sy)\n\n if ctx.needs_input_grad[3]:\n db = dx.sum([0, 2, 3])\n\n return dx, dfu, dfd, db, dsi, dsx, dsy\n\n # Add to cache.\n _filtered_lrelu_cuda_cache[key] = FilteredLReluCuda\n return FilteredLReluCuda\n\n#----------------------------------------------------------------------------\n" ]
[ [ "numpy.asarray", "numpy.concatenate", "torch.nn.functional.interpolate" ], [ "torch.ones", "torch.empty", "numpy.sqrt", "torch.zeros", "torch.cuda.current_stream", "torch.cuda.default_stream" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
SeanNaren/aitextgen
[ "12a647cd6e8f2a9f8b0dfa5e380ad50d10a527cd" ]
[ "aitextgen/train.py" ]
[ "import pytorch_lightning as pl\nfrom pytorch_lightning.callbacks.progress import ProgressBarBase\nfrom tqdm.auto import tqdm\nimport sys\nimport torch\nfrom torch.optim import AdamW\nfrom torch.utils.data import DataLoader\nfrom transformers import get_linear_schedule_with_warmup\nimport os\nimport shutil\nimport subprocess\n\n\nclass ATGTransformer(pl.LightningModule):\n \"\"\"\n A training module for aitextgen.\n \"\"\"\n\n def __init__(self, model, dataset, hparams, tokenizer):\n super(ATGTransformer, self).__init__()\n self.model, self.dataset, self.hparams, self.tokenizer = (\n model,\n dataset,\n hparams,\n tokenizer,\n )\n\n def forward(self, inputs):\n return self.model(**inputs, return_dict=False)\n\n def training_step(self, batch, batch_num):\n outputs = self({\"input_ids\": batch, \"labels\": batch})\n loss = outputs[0]\n\n return {\"loss\": loss}\n\n def train_dataloader(self):\n return DataLoader(\n self.dataset,\n batch_size=self.hparams[\"batch_size\"],\n shuffle=True,\n pin_memory=self.hparams[\"pin_memory\"],\n num_workers=self.hparams[\"num_workers\"],\n )\n\n def configure_optimizers(self):\n \"Prepare optimizer\"\n\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [\n p\n for n, p in self.model.named_parameters()\n if not any(nd in n for nd in no_decay)\n ],\n \"weight_decay\": self.hparams[\"weight_decay\"],\n },\n {\n \"params\": [\n p\n for n, p in self.model.named_parameters()\n if any(nd in n for nd in no_decay)\n ],\n \"weight_decay\": 0.0,\n },\n ]\n optimizer = AdamW(\n optimizer_grouped_parameters,\n lr=self.hparams[\"learning_rate\"],\n eps=self.hparams[\"adam_epsilon\"],\n )\n\n scheduler = get_linear_schedule_with_warmup(\n optimizer,\n num_warmup_steps=self.hparams[\"warmup_steps\"],\n num_training_steps=self.hparams[\"num_steps\"],\n )\n\n return [optimizer], [scheduler]\n\n\nclass ATGProgressBar(ProgressBarBase):\n \"\"\"A variant progress bar that works off of steps and prints periodically.\"\"\"\n\n def __init__(\n self,\n save_every,\n generate_every,\n output_dir,\n n_generate,\n gpu,\n smoothing,\n run_id,\n save_gdrive,\n progress_bar_refresh_rate,\n train_transformers_only,\n num_layers_freeze,\n ):\n super().__init__()\n self.enabled = True\n self.save_every = save_every\n self.generate_every = generate_every\n self.output_dir = output_dir\n self.n_generate = n_generate\n self.gpu = gpu\n self.steps = 0\n self.prev_avg_loss = None\n self.smoothing = smoothing\n self.run_id = run_id\n self.save_gdrive = save_gdrive\n self.progress_bar_refresh_rate = progress_bar_refresh_rate\n self.train_transformers_only = train_transformers_only\n self.num_layers_freeze = num_layers_freeze\n\n def enabled(self):\n self.enabled = True\n\n def disable(self):\n self.enabled = False\n\n def on_train_start(self, trainer, pl_module):\n super().on_train_start(trainer, pl_module)\n self.main_progress_bar = tqdm(\n total=trainer.max_steps,\n disable=not self.enabled,\n smoothing=0,\n leave=True,\n dynamic_ncols=True,\n file=sys.stdout,\n )\n self.freeze_layers(pl_module)\n\n def on_train_end(self, trainer, pl_module):\n self.main_progress_bar.close()\n self.unfreeze_layers(pl_module)\n\n def on_batch_end(self, trainer, pl_module):\n super().on_batch_end(trainer, pl_module)\n\n # clean up the GPU cache used for the benchmark\n # https://discuss.pytorch.org/t/about-torch-cuda-empty-cache/34232/4\n if self.steps == 0 and self.gpu:\n torch.cuda.empty_cache()\n\n current_loss = float(trainer.progress_bar_dict[\"loss\"])\n self.steps += 1\n avg_loss = 0\n if current_loss == current_loss: # don't add if current_loss is NaN\n avg_loss = self.average_loss(\n current_loss, self.prev_avg_loss, self.smoothing\n )\n self.prev_avg_loss = avg_loss\n\n desc = f\"Loss: {current_loss:.3f} — Avg: {avg_loss:.3f}\"\n\n if self.steps % self.progress_bar_refresh_rate == 0:\n if self.gpu:\n # via pytorch-lightning's get_gpu_memory_map()\n result = subprocess.run(\n [\n shutil.which(\"nvidia-smi\"),\n \"--query-gpu=memory.used\",\n \"--format=csv,nounits,noheader\",\n ],\n encoding=\"utf-8\",\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n check=True,\n )\n gpu_memory = result.stdout.strip().split(os.linesep)[0]\n desc += f\" — GPU Mem: {gpu_memory} MB\"\n self.main_progress_bar.update(self.progress_bar_refresh_rate)\n self.main_progress_bar.set_description(desc)\n\n if self.enabled:\n did_unfreeze = False\n if self.save_every > 0 and self.steps % self.save_every == 0:\n self.unfreeze_layers(pl_module)\n self.save_pytorch_model(trainer, pl_module)\n did_unfreeze = True\n\n if self.generate_every > 0 and self.steps % self.generate_every == 0:\n self.unfreeze_layers(pl_module)\n self.generate_sample_text(trainer, pl_module)\n did_unfreeze = True\n\n if did_unfreeze:\n self.freeze_layers(pl_module)\n\n def generate_sample_text(self, trainer, pl_module):\n self.main_progress_bar.write(\n f\"\\033[1m{self.steps:,} steps reached: generating sample texts.\\033[0m\"\n )\n\n gen_length = min(pl_module.model.config.n_positions, 256)\n\n outputs = pl_module.model.generate(\n input_ids=None,\n max_length=gen_length,\n do_sample=True,\n num_return_sequences=self.n_generate,\n temperature=0.7,\n pad_token_id=pl_module.tokenizer.pad_token_id,\n )\n\n special_token_id_tensor = torch.unique(\n torch.as_tensor(\n [pl_module.tokenizer.bos_token_id, pl_module.tokenizer.eos_token_id]\n )\n ).to(pl_module.model.device.type)\n\n outputs = [\n output[\n ~output.unsqueeze(1).eq(special_token_id_tensor.unsqueeze(1)).any(1)\n ].tolist()\n for output in outputs\n ]\n\n if self.n_generate > 1:\n gen_texts = pl_module.tokenizer.batch_decode(outputs)\n else:\n gen_texts = [pl_module.tokenizer.decode(outputs[0])]\n\n for text in gen_texts:\n self.main_progress_bar.write(\"=\" * 10)\n self.main_progress_bar.write(text)\n\n self.main_progress_bar.write(\"=\" * 10)\n\n def save_pytorch_model(self, trainer, pl_module):\n self.main_progress_bar.write(\n f\"\\033[1m{self.steps:,} steps reached: saving model to /{self.output_dir}\\033[0m\"\n )\n pl_module.model.save_pretrained(self.output_dir)\n\n if self.save_gdrive:\n for pt_file in [\"pytorch_model.bin\", \"config.json\"]:\n shutil.copyfile(\n os.path.join(self.output_dir, pt_file),\n os.path.join(\"/content/drive/My Drive/\", self.run_id, pt_file),\n )\n\n def average_loss(self, current_loss, prev_avg_loss, smoothing):\n if prev_avg_loss is None:\n return current_loss\n else:\n return (smoothing * current_loss) + (1 - smoothing) * prev_avg_loss\n\n def modify_layers(self, pl_module, unfreeze):\n if self.train_transformers_only:\n for name, param in pl_module.model.named_parameters():\n if self.num_layers_freeze:\n layer_num = int(name.split(\".\")[2]) if \".h.\" in name else None\n to_freeze = layer_num and layer_num < self.num_layers_freeze\n else:\n to_freeze = False\n if name == \"transformer.wte.weight\" or to_freeze:\n param.requires_grad = unfreeze\n\n def freeze_layers(self, pl_module):\n self.modify_layers(pl_module, False)\n\n def unfreeze_layers(self, pl_module):\n self.modify_layers(pl_module, True)\n" ]
[ [ "torch.optim.AdamW", "torch.utils.data.DataLoader", "torch.cuda.empty_cache", "torch.as_tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
urasakikeisuke/segmenter
[ "69ff016e621b7d1e4b8573a8b150e80dbc70cf84" ]
[ "segm/utils/torch.py" ]
[ "import os\nimport torch\n\n\n\"\"\"\nGPU wrappers\n\"\"\"\n\nuse_gpu = False\ngpu_id = 0\ndevice = None\n\ndistributed = False\ndist_rank = 0\nworld_size = 1\n\n\ndef set_gpu_mode(mode):\n global use_gpu\n global device\n global gpu_id\n global distributed\n global dist_rank\n global world_size\n gpu_id = int(os.environ.get(\"SLURM_LOCALID\", 0))\n dist_rank = int(os.environ.get(\"SLURM_PROCID\", 0))\n world_size = int(os.environ.get(\"SLURM_NTASKS\", 1))\n\n distributed = world_size > 1\n use_gpu = mode\n device = torch.device(f\"cuda:{gpu_id}\" if use_gpu else \"cpu\")\n torch.backends.cudnn.benchmark = True\n" ]
[ [ "torch.device" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
anton-buyskikh/QuSpin
[ "4e46b495e399414d9361d659e186492a1ac5b511", "769d3817870f6ff55c4283af46f94e11c36f4121", "769d3817870f6ff55c4283af46f94e11c36f4121", "769d3817870f6ff55c4283af46f94e11c36f4121" ]
[ "quspin/tools/expm_multiply_parallel_core/expm_multiply_parallel_core.py", "tests/sent_wrapper_test.py", "tests/get_vec_boson_test.py", "sphinx/doc_examples/spinful_fermion_basis_general-adv_ph-example.py" ]
[ "from scipy.sparse.linalg import LinearOperator,onenormest,aslinearoperator\nfrom .expm_multiply_parallel_wrapper import _wrapper_expm_multiply,_wrapper_csr_trace\nimport scipy.sparse as _sp\nimport numpy as _np\n\nclass expm_multiply_parallel(object):\n\t\"\"\"Implements `scipy.sparse.linalg.expm_multiply()` for *openmp*.\n\n\tNotes\n\t-----\n\tThis is a wrapper over custom c++ code.\n\n\tExamples\n\t--------\n\n\tThis example shows how to construct the `expm_multiply_parallel` object.\n\n\tFurther code snippets can be found in the examples for the function methods of the class.\n\tThe code snippet below initiates the class, and is required to run the example codes for the function methods.\n\t\n\t.. literalinclude:: ../../doc_examples/expm_multiply_parallel-example.py\n\t\t:linenos:\n\t\t:language: python\n\t\t:lines: 7-30\n\t\n\t\"\"\"\n\tdef __init__(self,A,a=1.0):\n\t\t\"\"\"Initializes `expm_multiply_parallel`. \n\n\t\tParameters\n\t\t-----------\n\t\tA : {array_like, scipy.sparse matrix}\n\t\t\tThe operator (matrix) whose exponential is to be calculated.\n\t\ta : scalar, optional\n\t\t\tscalar value multiplying generator matrix :math:`A` in matrix exponential: :math:`\\\\mathrm{e}^{aA}`.\n\t\t\t\n\t\t\"\"\"\n\t\tif _np.array(a).ndim == 0:\n\t\t\tself._a = a\n\t\telse:\n\t\t\traise ValueError(\"a must be scalar value.\")\n\n\t\tself._A = _sp.csr_matrix(A,copy=False)\n\n\t\tif A.shape[0] != A.shape[1]:\n\t\t\traise ValueError(\"A must be a square matrix.\")\n\n\t\ttol = _np.finfo(A.dtype).eps/2\n\t\tself._tol = _np.array(tol,dtype=tol.dtype)\n\n\t\tself._mu = _np.array(_wrapper_csr_trace(self._A.indptr,self._A.indices,self._A.data)/self._A.shape[0],dtype=A.dtype)\n\t\tself._A -= self._mu * _sp.identity(self._A.shape[0],dtype=self._A.dtype,format=\"csr\")\n\t\tself._A_1_norm = _np.max(_np.abs(A).sum(axis=0))\n\t\tself._calculate_partition()\n\n\n\t@property\n\tdef a(self):\n\t\t\"\"\"scalar: value multiplying generator matrix :math:`A` in matrix exponential: :math:`\\\\mathrm{e}^{aA}`\"\"\"\n\t\treturn self._a\n\n\t@property\n\tdef A(self):\n\t\t\"\"\"scipy.sparse.csr_matrix: csr_matrix to be exponentiated.\"\"\"\n\t\treturn self._A\n\n\n\tdef set_a(self,a):\n\t\t\"\"\"Sets the value of the property `a`.\n\n\t\tExamples\n\t\t--------\n\n\t\t.. literalinclude:: ../../doc_examples/expm_multiply_parallel-example.py\n\t\t\t:linenos:\n\t\t\t:language: python\n\t\t\t:lines: 32-35\n\t\t\t\n\t\tParameters\n\t\t-----------\n\t\ta : scalar\n\t\t\tnew value of `a`.\n\n\t\t\"\"\"\n\n\t\tif _np.array(a).ndim == 0:\n\t\t\tself._a = a\n\t\t\tself._calculate_partition()\n\t\telse:\n\t\t\traise ValueError(\"expecting 'a' to be scalar.\")\n\n\tdef dot(self,v,work_array=None,overwrite_v=False):\n\t\t\"\"\"Calculates the action of :math:`\\\\mathrm{e}^{aA}` on a vector :math:`v`. \n\n\t\tExamples\n\t\t--------\n\n\t\t.. literalinclude:: ../../doc_examples/expm_multiply_parallel-example.py\n\t\t\t:linenos:\n\t\t\t:language: python\n\t\t\t:lines: 37-\n\n\t\tParameters\n\t\t-----------\n\t\tv : contiguous numpy.ndarray\n\t\t\tarray to apply :math:`\\\\mathrm{e}^{aA}` on.\n\t\twork_array : contiguous numpy.ndarray, optional\n\t\t\tarray of `shape = (2*len(v),)` which is used as work_array space for the underlying c-code. This saves extra memory allocation for function operations.\n\t\toverwrite_v : bool\n\t\t\tif set to `True`, the data in `v` is overwritten by the function. This saves extra memory allocation for the results.\n\n\t\tReturns\n\t\t--------\n\t\tnumpy.ndarray\n\t\t\tresult of :math:`\\\\mathrm{e}^{aA}v`. \n\n\t\t\tIf `overwrite_v = True` the dunction returns `v` with the data overwritten, otherwise the result is stored in a new array. \n\n\t\t\"\"\"\n\t\tv = _np.asarray(v)\n\t\t\t\n\t\tif v.ndim != 1:\n\t\t\traise ValueError(\"array must have ndim of 1.\")\n\t\t\n\t\tif v.shape[0] != self._A.shape[1]:\n\t\t\traise ValueError(\"dimension mismatch {}, {}\".format(self._A.shape,v.shape))\n\n\t\ta_dtype = _np.array(self._a).dtype\n\t\tv_dtype = _np.result_type(self._A.dtype,a_dtype,v.dtype)\n\n\t\tif overwrite_v:\n\t\t\tif v_dtype != v.dtype:\n\t\t\t\traise ValueError(\"if overwrite_v is True, the input array must match correct output dtype for matrix multiplication.\")\n\n\t\t\tif not v.flags[\"CARRAY\"]:\n\t\t\t\traise TypeError(\"input array must a contiguous and writable.\")\n\n\t\t\tif v.ndim != 1:\n\t\t\t\traise ValueError(\"array must have ndim of 1.\")\n\t\telse:\n\t\t\tv = v.astype(v_dtype,order=\"C\",copy=True)\n\n\t\tif work_array is None:\n\t\t\twork_array = _np.zeros((2*self._A.shape[0],),dtype=v.dtype)\n\t\telse:\n\t\t\twork_array = _np.ascontiguousarray(work_array)\n\t\t\tif work_array.shape != (2*self._A.shape[0],):\n\t\t\t\traise ValueError(\"work_array array must be an array of shape (2*v.shape[0],) with same dtype as v.\")\n\t\t\tif work_array.dtype != v_dtype:\n\t\t\t\traise ValueError(\"work_array must be array of dtype which matches the result of the matrix-vector multiplication.\")\n\n\t\ta = _np.array(self._a,dtype=v_dtype)\n\t\t_wrapper_expm_multiply(self._A.indptr,self._A.indices,self._A.data,\n\t\t\t\t\tself._m_star,self._s,a,self._tol,self._mu,v,work_array)\n\n\t\treturn v\n\n\tdef _calculate_partition(self):\n\t\tif _np.abs(self._a)*self._A_1_norm == 0:\n\t\t\tself._m_star, self._s = 0, 1\n\t\telse:\n\t\t\tell = 2\n\t\t\tself._norm_info = LazyOperatorNormInfo(self._A, self._A_1_norm, self._a, ell=ell)\n\t\t\tself._m_star, self._s = _fragment_3_1(self._norm_info, 1, self._tol, ell=ell)\n\n\n##### code below is copied from scipy.sparse.linalg._expm_multiply_core and modified slightly.\n\n\n# This table helps to compute bounds.\n# They seem to have been difficult to calculate, involving symbolic\n# manipulation of equations, followed by numerical root finding.\n_theta = {\n\t\t# The first 30 values are from table A.3 of Computing Matrix Functions.\n\t\t1: 2.29e-16,\n\t\t2: 2.58e-8,\n\t\t3: 1.39e-5,\n\t\t4: 3.40e-4,\n\t\t5: 2.40e-3,\n\t\t6: 9.07e-3,\n\t\t7: 2.38e-2,\n\t\t8: 5.00e-2,\n\t\t9: 8.96e-2,\n\t\t10: 1.44e-1,\n\t\t# 11\n\t\t11: 2.14e-1,\n\t\t12: 3.00e-1,\n\t\t13: 4.00e-1,\n\t\t14: 5.14e-1,\n\t\t15: 6.41e-1,\n\t\t16: 7.81e-1,\n\t\t17: 9.31e-1,\n\t\t18: 1.09,\n\t\t19: 1.26,\n\t\t20: 1.44,\n\t\t# 21\n\t\t21: 1.62,\n\t\t22: 1.82,\n\t\t23: 2.01,\n\t\t24: 2.22,\n\t\t25: 2.43,\n\t\t26: 2.64,\n\t\t27: 2.86,\n\t\t28: 3.08,\n\t\t29: 3.31,\n\t\t30: 3.54,\n\t\t# The rest are from table 3.1 of\n\t\t# Computing the Action of the Matrix Exponential.\n\t\t35: 4.7,\n\t\t40: 6.0,\n\t\t45: 7.2,\n\t\t50: 8.5,\n\t\t55: 9.9,\n\t\t}\n\n\nclass LazyOperatorNormInfo:\n\t\"\"\"\n\tInformation about an operator is lazily computed.\n\n\tThe information includes the exact 1-norm of the operator,\n\tin addition to estimates of 1-norms of powers of the operator.\n\tThis uses the notation of Computing the Action (2011).\n\tThis class is specialized enough to probably not be of general interest\n\toutside of this module.\n\n\t\"\"\"\n\tdef __init__(self, A, A_1_norm, a, ell=2):\n\t\t\"\"\"\n\t\tProvide the operator and some norm-related information.\n\n\t\tParameters\n\t\t-----------\n\t\tA : linear operator\n\t\t\tThe operator of interest.\n\t\tA_1_norm : float\n\t\t\tThe exact 1-norm of A.\n\t\tell : int, optional\n\t\t\tA technical parameter controlling norm estimation quality.\n\n\t\t\"\"\"\n\t\tself._A = A\n\t\tself._a = a\n\t\tself._A_1_norm = A_1_norm\n\t\tself._ell = ell\n\t\tself._d = {}\n\n\tdef onenorm(self):\n\t\t\"\"\"\n\t\tCompute the exact 1-norm.\n\t\t\"\"\"\n\t\treturn _np.abs(self._a) * self._A_1_norm\n\n\tdef d(self, p):\n\t\t\"\"\"\n\t\tLazily estimate d_p(A) ~= || A^p ||^(1/p) where ||.|| is the 1-norm.\n\t\t\"\"\"\n\t\tif p not in self._d:\n\t\t\test = onenormest((self._a*aslinearoperator(self._A))**p)\n\t\t\tself._d[p] = est ** (1.0 / p)\n\t\treturn self._d[p]\n\n\tdef alpha(self, p):\n\t\t\"\"\"\n\t\tLazily compute max(d(p), d(p+1)).\n\t\t\"\"\"\n\t\treturn max(self.d(p), self.d(p+1))\n\n\ndef _compute_cost_div_m(m, p, norm_info):\n\t\"\"\"\n\tA helper function for computing bounds.\n\n\tThis is equation (3.10).\n\tIt measures cost in terms of the number of required matrix products.\n\n\tParameters\n\t-----------\n\tm : int\n\t\tA valid key of _theta.\n\tp : int\n\t\tA matrix power.\n\tnorm_info : LazyOperatorNormInfo\n\t\tInformation about 1-norms of related operators.\n\n\tReturns\n\t--------\n\tcost_div_m : int\n\t\tRequired number of matrix products divided by m.\n\n\t\"\"\"\n\treturn int(_np.ceil(norm_info.alpha(p) / _theta[m]))\n\n\ndef _compute_p_max(m_max):\n\t\"\"\"\n\tCompute the largest positive integer p such that p*(p-1) <= m_max + 1.\n\n\tDo this in a slightly dumb way, but safe and not too slow.\n\n\tParameters\n\t-----------\n\tm_max : int\n\t\tA count related to bounds.\n\n\t\"\"\"\n\tsqrt_m_max = _np.sqrt(m_max)\n\tp_low = int(_np.floor(sqrt_m_max))\n\tp_high = int(_np.ceil(sqrt_m_max + 1))\n\treturn max(p for p in range(p_low, p_high+1) if p*(p-1) <= m_max + 1)\n\n\ndef _fragment_3_1(norm_info, n0, tol, m_max=55, ell=2):\n\t\"\"\"\n\tA helper function for the _expm_multiply_* functions.\n\n\tParameters\n\t-----------\n\tnorm_info : LazyOperatorNormInfo\n\t\tInformation about norms of certain linear operators of interest.\n\tn0 : int\n\t\tNumber of columns in the _expm_multiply_* B matrix.\n\ttol : float\n\t\tExpected to be\n\t\t:math:`2^{-24}` for single precision or\n\t\t:math:`2^{-53}` for double precision.\n\tm_max : int\n\t\tA value related to a bound.\n\tell : int\n\t\tThe number of columns used in the 1-norm approximation.\n\t\tThis is usually taken to be small, maybe between 1 and 5.\n\n\tReturns\n\t--------\n\tbest_m : int\n\t\tRelated to bounds for error control.\n\tbest_s : int\n\t\tAmount of scaling.\n\n\tNotes\n\t-----\n\tThis is code fragment (3.1) in Al-Mohy and Higham (2011).\n\tThe discussion of default values for m_max and ell\n\tis given between the definitions of equation (3.11)\n\tand the definition of equation (3.12).\n\n\t\"\"\"\n\tif ell < 1:\n\t\traise ValueError('expected ell to be a positive integer')\n\tbest_m = None\n\tbest_s = None\n\tif _condition_3_13(norm_info.onenorm(), n0, m_max, ell):\n\t\tfor m, theta in _theta.items():\n\t\t\ts = int(_np.ceil(norm_info.onenorm() / theta))\n\t\t\tif best_m is None or m * s < best_m * best_s:\n\t\t\t\tbest_m = m\n\t\t\t\tbest_s = s\n\telse:\n\t\t# Equation (3.11).\n\t\tfor p in range(2, _compute_p_max(m_max) + 1):\n\t\t\tfor m in range(p*(p-1)-1, m_max+1):\n\t\t\t\tif m in _theta:\n\t\t\t\t\ts = _compute_cost_div_m(m, p, norm_info)\n\t\t\t\t\tif best_m is None or m * s < best_m * best_s:\n\t\t\t\t\t\tbest_m = m\n\t\t\t\t\t\tbest_s = s\n\t\tbest_s = max(best_s, 1)\n\treturn best_m, best_s\n\n\ndef _condition_3_13(A_1_norm, n0, m_max, ell):\n\t\"\"\"\n\tA helper function for the _expm_multiply_* functions.\n\n\tParameters\n\t-----------\n\tA_1_norm : float\n\t\tThe precomputed 1-norm of A.\n\tn0 : int\n\t\tNumber of columns in the _expm_multiply_* B matrix.\n\tm_max : int\n\t\tA value related to a bound.\n\tell : int\n\t\tThe number of columns used in the 1-norm approximation.\n\t\tThis is usually taken to be small, maybe between 1 and 5.\n\n\tReturns\n\t--------\n\tvalue : bool\n\t\tIndicates whether or not the condition has been met.\n\n\tNotes\n\t-----\n\tThis is condition (3.13) in Al-Mohy and Higham (2011).\n\n\t\"\"\"\n\n\t# This is the rhs of equation (3.12).\n\tp_max = _compute_p_max(m_max)\n\ta = 2 * ell * p_max * (p_max + 3)\n\n\t# Evaluate the condition (3.13).\n\tb = _theta[m_max] / float(n0 * m_max)\n\treturn A_1_norm <= a * b\n", "from __future__ import print_function, division\n\nimport sys,os\nquspin_path = os.path.join(os.getcwd(),\"../\")\nsys.path.insert(0,quspin_path)\n\nfrom quspin.basis import spin_basis_1d,photon_basis # Hilbert space bases\nfrom quspin.operators import hamiltonian # Hamiltonian and observables\nfrom quspin.tools.measurements import ent_entropy as ent_entropy\n#from quspin.tools.measurements import _ent_entropy as _ent_entropy\nimport numpy as np\nfrom numpy.random import uniform,seed,shuffle,randint # pseudo random numbers\n\nseed(0)\n\n\ndtypes={\"complex128\":np.complex128,\"float64\":np.float64,\n\t\t\"float32\":np.float32,\"complex64\":np.complex64}\n\natols={\"float32\":1E-4,\"float64\":1E-12,\n\t\t\"complex64\":1E-4,\"complex128\":1E-12}\n\n\n\ndef spin_entropy(dtype,symm,Sent_args):\n\n\tif symm:\n\t\tbasis = spin_basis_1d(L,kblock=0,pblock=1,zblock=1) \n\telse:\n\t\tbasis = spin_basis_1d(L)\n\t# define operators with OBC using site-coupling lists\n\tJ_zz = [[1.0,i,(i+1)%L,(i+2)%L] for i in range(0,L)] \n\tJ_xy = [[1.0,i,(i+1)%L] for i in range(0,L)]\n\n\t# static and dynamic lists\n\tstatic = [[\"+-\",J_xy],[\"-+\",J_xy],[\"zxz\",J_zz]]\n\t# build Hamiltonian\n\tH=hamiltonian(static,[],basis=basis,dtype=dtype,check_herm=False,check_symm=False)\n\t# diagonalise H\n\tE,V = H.eigh()\n\tpsi0=V[:,0]\n\trho0=np.outer(psi0.conj(),psi0)\n\trho_d=rho0\n\tEd,Vd = np.linalg.eigh(rho_d)\n\n\n\tS_pure = ent_entropy(psi0,basis,**Sent_args)\n\tS_DM = ent_entropy(rho0,basis,**Sent_args)\n\tS_all = ent_entropy({'V_states':V},basis,**Sent_args)\n\n\n\treturn (S_pure, S_DM, S_all)\n\n\n\n\nfor _r in range(10): # do 10 random checks\n\n\tL=6\n\tif uniform(0.0,1.0) < 1.0: #0.5:\n\t\tchain_subsys=list(np.unique([randint(0,L) for r in range(L//2)]))\n\telse:\n\t\tchain_subsys=[r for r in range(L)]\n\talpha=uniform(5)\n\n\tSent_args={'chain_subsys':chain_subsys,'alpha':alpha,'density':randint(2)}\n\n\t\n\n\tfor dtype_str in dtypes.keys():\n\t\t\n\t\tatol = atols[dtype_str]\n\t\tdtype=dtypes[dtype_str]\n\t\t\n\t\tS=np.zeros((2,4),dtype=dtype)\n\t\tfor symm in [0,1]:\n\n\t\t\t\"\"\"\n\t\t\tS1=[]\n\t\t\t# check entropies also between symmetries\n\t\t\tfor _i,_s in enumerate( spin_photon_entropy_cons(dtype,symm,Sent_args) ):\n\t\t\t\t\n\t\t\t\tif isinstance(_s['Sent'],np.ndarray):\n\t\t\t\t\tS1.append(_s['Sent'][0])\n\t\t\t\telse:\n\t\t\t\t\tS1.append(_s['Sent'])\n\n\t\t\tS[symm,:] = S1\n\n\t\t\tnp.testing.assert_allclose(np.diff(S1),0.0,atol=atol,err_msg='Failed entropies comparison!')\n\t\t\tif symm == 2:\n\t\t\t\tnp.testing.assert_allclose(np.diff(S.ravel()),0.0,atol=atol,err_msg='Failed entropies comparison symm <--> no_symm!')\n\t\t\t\"\"\"\n\t\t\t# check reduced DM's\n\t\t\tSent_args['DM']='both'\n\t\t\tDM_chain_subsys=[]\n\t\t\tDM_other_subsys=[]\n\t\t\tS2=[]\n\t\t\tfor _i,_s in enumerate( spin_entropy(dtype,symm,Sent_args) ):\n\n\t\t\t\tif isinstance(_s['Sent'],np.ndarray) and _s['Sent'].ndim > 0:\n\t\t\t\t\tDM_chain_subsys.append(_s['DM_chain_subsys'][0])\n\t\t\t\t\tDM_other_subsys.append(_s['DM_other_subsys'][0])\n\t\t\t\t\tS2.append(_s['Sent'][0])\n\t\t\t\telse:\n\t\t\t\t\tDM_chain_subsys.append(_s['DM_chain_subsys'])\n\t\t\t\t\tDM_other_subsys.append(_s['DM_other_subsys'])\n\t\t\t\t\tS2.append(_s['Sent'])\n\n\n\t\t\tnp.testing.assert_allclose(np.diff(S2),0.0,atol=atol,err_msg='Failed entropies comparison!')\n\t\t\tnp.testing.assert_allclose(np.diff(DM_chain_subsys,axis=0),0.0,atol=atol,err_msg='Failed DM_chain_subsys comparison!')\n\t\t\tnp.testing.assert_allclose(np.diff(DM_other_subsys,axis=0),0.0,atol=atol,err_msg='Failed DM_other_subsys comparison!')\n\n\t\t\t\t\t\n\t\t\t# call ent entropy to make sure U, lmbda, V do not produce errors\n\t\t\tSent_args['svd_return_vec']=[1,1,1]\n\t\t\tspin_entropy(dtype,symm,Sent_args)\n\n\tprint(\"entropy (photon, conserved) random check {} finished successfully\".format(_r))\n\n\n\n\nfor _r in range(10): # do 10 random checks\n\n\tL=6\n\tif uniform(0.0,1.0) < 0.5:\n\t\tchain_subsys=list(np.unique([randint(0,L) for r in range(L//2)]))\n\telse:\n\t\tchain_subsys=[r for r in range(L-1)]\n\talpha=uniform(5)\n\n\tSent_args={'chain_subsys':chain_subsys,'alpha':alpha,'density':randint(2)}\n\n\n\tfor dtype_str in dtypes.keys():\n\t\t\n\t\tatol = atols[dtype_str]\n\t\tdtype=dtypes[dtype_str]\n\t\t\n\t\tS=np.zeros((2,4),dtype=dtype)\n\t\tfor symm in [0,1]:\n\t\t\t\"\"\"\n\t\t\tS1=[]\n\t\t\t# check entropies also between symmetries\n\t\t\tfor _i,_s in enumerate( spin_photon_entropy(dtype,symm,Sent_args) ):\n\t\t\t\t\n\t\t\t\tif isinstance(_s['Sent'],np.ndarray):\n\t\t\t\t\tS1.append(_s['Sent'][0])\n\t\t\t\telse:\n\t\t\t\t\tS1.append(_s['Sent'])\n\n\t\t\tS[symm,:] = S1\n\n\t\t\tnp.testing.assert_allclose(np.diff(S1),0.0,atol=atol,err_msg='Failed entropies comparison!')\n\t\t\tif symm == 2:\n\t\t\t\tnp.testing.assert_allclose(np.diff(S.ravel()),0.0,atol=atol,err_msg='Failed entropies comparison symm <--> no_symm!')\n\t\t\t\n\t\t\t\"\"\"\n\t\t\t# check reduced DM's\n\t\t\tSent_args['DM']='both'\n\t\t\tDM_chain_subsys=[]\n\t\t\tDM_other_subsys=[]\n\t\t\tS2=[]\n\t\t\tfor _i,_s in enumerate( spin_entropy(dtype,symm,Sent_args) ):\n\n\t\t\t\tif isinstance(_s['Sent'],np.ndarray) and _s['Sent'].ndim > 0:\n\t\t\t\t\tDM_chain_subsys.append(_s['DM_chain_subsys'][0])\n\t\t\t\t\tDM_other_subsys.append(_s['DM_other_subsys'][0])\n\t\t\t\t\tS2.append(_s['Sent'][0])\n\t\t\t\telse:\n\t\t\t\t\tDM_chain_subsys.append(_s['DM_chain_subsys'])\n\t\t\t\t\tDM_other_subsys.append(_s['DM_other_subsys'])\n\t\t\t\t\tS2.append(_s['Sent'])\n\n\t\t\tnp.testing.assert_allclose(np.diff(S2),0.0,atol=atol,err_msg='Failed entropies comparison!')\n\t\t\tnp.testing.assert_allclose(np.diff(DM_chain_subsys,axis=0),0.0,atol=atol,err_msg='Failed DM_chain_subsys comparison!')\n\t\t\tnp.testing.assert_allclose(np.diff(DM_other_subsys,axis=0),0.0,atol=atol,err_msg='Failed DM_other_subsys comparison!')\n\n\t\t\t\t\t\n\t\t\t# call ent entropy to make sure U, lmbda, V do not produce errors\n\t\t\tSent_args['svd_return_vec']=[1,1,1]\n\t\t\tspin_entropy(dtype,symm,Sent_args)\n\n\tprint(\"entropy (photon) random check {} finished successfully\".format(_r))\n\n\n\nfor _r in range(10): # do 10 random checks\n\n\tL=8\n\tchain_subsys=list(np.unique([randint(0,L) for r in range(L//2)]))\n\talpha=uniform(5)\n\n\tSent_args={'chain_subsys':chain_subsys,'alpha':alpha,'density':randint(2)}\n\n\n\tfor dtype_str in dtypes.keys():\n\t\t\n\t\tatol = atols[dtype_str]\n\t\tdtype=dtypes[dtype_str]\n\t\t\n\t\tS=np.zeros((2,4),dtype=dtype)\n\t\tfor symm in [0,1]:\n\t\t\t\"\"\"\n\t\t\tS1=[]\n\t\t\t# check entropies also between symmetries\n\t\t\tfor _i,_s in enumerate( spin_entropy(dtype,symm,Sent_args) ):\n\t\t\t\t\n\t\t\t\tif isinstance(_s['Sent'],np.ndarray) and _s['Sent'].ndim > 0:\n\t\t\t\t\tS1.append(_s['Sent'][0])\n\t\t\t\telse:\n\t\t\t\t\tS1.append(_s['Sent'])\n\n\t\t\tS[symm,:] = S1\n\n\t\t\tnp.testing.assert_allclose(np.diff(S1),0.0,atol=atol,err_msg='Failed entropies comparison!')\n\t\t\tif symm == 1:\n\t\t\t\tnp.testing.assert_allclose(np.diff(S.ravel()),0.0,atol=atol,err_msg='Failed entropies comparison symm <--> no_symm!')\n\t\t\t\"\"\"\n\t\t\t# check reduced DM's\n\t\t\tSent_args['DM']='both'\n\t\t\tDM_chain_subsys=[]\n\t\t\tDM_other_subsys=[]\n\t\t\tS2=[]\n\t\t\tfor _i,_s in enumerate( spin_entropy(dtype,symm,Sent_args) ):\n\n\t\t\t\tif isinstance(_s['Sent'],np.ndarray) and _s['Sent'].ndim > 0:\n\t\t\t\t\tDM_chain_subsys.append(_s['DM_chain_subsys'][0])\n\t\t\t\t\tDM_other_subsys.append(_s['DM_other_subsys'][0])\n\t\t\t\t\tS2.append(_s['Sent'][0])\n\t\t\t\telse:\n\t\t\t\t\tDM_chain_subsys.append(_s['DM_chain_subsys'])\n\t\t\t\t\tDM_other_subsys.append(_s['DM_other_subsys'])\n\t\t\t\t\tS2.append(_s['Sent'])\n\n\t\t\t\n\t\t\tnp.testing.assert_allclose(np.diff(S2),0.0,atol=atol,err_msg='Failed entropies comparison!')\n\t\t\tnp.testing.assert_allclose(np.diff(DM_chain_subsys,axis=0),0.0,atol=atol,err_msg='Failed DM_chain_subsys comparison!')\n\t\t\tnp.testing.assert_allclose(np.diff(DM_other_subsys,axis=0),0.0,atol=atol,err_msg='Failed DM_other_subsys comparison!')\n\n\t\t\t\t\t\n\t\t\t# call ent entropy to make sure U, lmbda, V do not produce errors\n\t\t\tSent_args['svd_return_vec']=[1,1,1]\n\t\t\tspin_entropy(dtype,symm,Sent_args)\n\n\tprint(\"entropy (spin) random check {} finished successfully\".format(_r))\n\nprint(\"Entanglement entropy checks passed!\")\n", "from __future__ import print_function, division\n\nimport sys,os\nquspin_path = os.path.join(os.getcwd(),\"../\")\nsys.path.insert(0,quspin_path)\n\nfrom quspin.operators import hamiltonian\nfrom quspin.basis import boson_basis_1d\nimport numpy as np\nimport scipy.sparse as sp\nfrom numpy.linalg import norm\nfrom numpy.random import random,seed\n\nseed()\ndtypes=[np.float32,np.float64,np.complex64,np.complex128]\n\ndef J(L,jb,l):\n\tblist=[]\n\tfor i,j in jb:\n\t\tb=[j]\n\t\tb.extend([(i+j)%L for j in range(l)])\n\t\tblist.append(b)\n\n\treturn blist\n\ndef Jnn(L,jb,l):\n\tblist=[]\n\tfor i,j in jb:\n\t\tb=[j]\n\t\tb.extend([(i+j)%L for j in range(0,l+1,2)])\n\t\tblist.append(b)\n\n\treturn blist\n\n\ndef getvec_boson(L,H1,static,sps=2,Nb=None,kblock=None,pblock=None,a=1,sparse=True):\n\tjb=[[i,1.0] for i in range(L)]\n\tdtype=np.complex128\n\n\tb = boson_basis_1d(L,sps=sps,Nb=Nb,kblock=kblock,pblock=pblock,a=a)\n\t\n\tNs = b.Ns\n\tif Ns == 0:\n\t\treturn \n\n\tH2 = hamiltonian(static,[],basis=b,dtype=dtype)\n\n\tE,v0 = H2.eigh()\n\tv = b.get_vec(v0,sparse=sparse)\n\tP = b.get_proj(dtype=np.complex128)\n\n\tif sp.issparse(v):\n\t\tv = v.todense()\n\n\n\tH2 = H2.todense()\n\tH2 = v0.T.conj() * (H2 * v0)\n\tH1 = v.T.conj().dot(H1.dot(v))\n\terr_msg = \"get_vec() symmetries failed for L={0} {1}\".format(b.N,b.blocks)\n\tnp.testing.assert_allclose(H1,H2,atol=1e-10,err_msg=err_msg)\n\n\ndef check_getvec_boson(L,sps=2,a=1,sparse=True):\n\tdtype=np.complex128\n\tjb=[[i,1.0] for i in range(L)]\n\tstatic = [\n\t\t\t\t\t\t['nn',J(L,jb,2)],\n\t\t\t\t\t\t['+-',J(L,jb,2)],\n\t\t\t\t\t\t['-+',J(L,jb,2)],\n\t\t\t\t\t\t['nnnn',J(L,jb,4)],\n\t\t\t\t\t\t['+nn-',J(L,jb,4)],\n\t\t\t\t\t\t['-nn+',J(L,jb,4)],\n\t\t\t\t\t\t['++--',J(L,jb,4)],\n\t\t\t\t\t\t['--++',J(L,jb,4)],\n\t\t\t\t\t\t['+-+-',J(L,jb,4)],\n\t\t\t\t\t\t['-+-+',J(L,jb,4)],\n\t\t\t\t\t]\n\tb_full = boson_basis_1d(L,sps=sps)\n\tH1 = hamiltonian(static,[],basis=b_full,dtype=dtype)\n\n\tH1 = H1.todense()\n\n\tfor k in range(-L//a,L//a):\n\t\tgetvec_boson(L,H1,static,sps=sps,kblock=k,a=a,sparse=sparse)\n\n\n\tfor j in range(-1,2,2):\n\t\tgetvec_boson(L,H1,static,sps=sps,pblock=j,a=a,sparse=sparse)\n\t\tfor k in range(-L//a,L//a):\n\t\t\tgetvec_boson(L,H1,static,sps=sps,kblock=k,pblock=j,a=a,sparse=sparse)\n\n\tfor Nb in range(L+1):\n\t\tfor k in range(-L//a,L//a):\n\t\t\t\tgetvec_boson(L,H1,static,sps=sps,Nb=Nb,kblock=k,a=a,sparse=sparse)\n\n\tfor Nb in range(0,L+1):\n\t\tfor j in range(-1,2,2):\n\t\t\tgetvec_boson(L,H1,static,sps=sps,Nb=Nb,pblock=j,a=a,sparse=sparse)\n\t\t\tfor k in range(-L//a,L//a):\n\t\t\t\tgetvec_boson(L,H1,static,sps=sps,kblock=k,Nb=Nb,pblock=j,a=a,sparse=sparse)\n\n\n\t\n\n\n\n\ncheck_getvec_boson(6,sps=2,sparse=True)\ncheck_getvec_boson(6,sps=2,sparse=False)\ncheck_getvec_boson(6,sps=3,sparse=True)\ncheck_getvec_boson(6,sps=3,sparse=False)\n\n\n\n", "from __future__ import print_function, division\n#\nimport sys,os\nquspin_path = os.path.join(os.getcwd(),\"../../\")\nsys.path.insert(0,quspin_path)\n#\nfrom quspin.operators import hamiltonian # operators\nfrom quspin.basis import spinful_fermion_basis_general # spin basis constructor\nimport numpy as np # general math functions\n#\n###### define model parameters ######\nLx, Ly = 4,3 # linear dimension of spin 1 2d lattice\nN_2d = Lx*Ly # number of sites for spin 1\n#\nJ=1.0 # hopping matrix element\nU=2.0 # onsite interaction\n#\n###### setting up user-defined BASIC symmetry transformations for 2d lattice ######\n# we build the advanced symmetry operations operations by concatenating operations for a single spin species\nx = np.arange(N_2d)%Lx # x positions for sites for one spin species\ny = np.arange(N_2d)//Lx # y positions for sites for one spin species\nt_x = (x+1)%Lx + Lx*y # translation along x-direction for one spin species\nt_y = x + Lx*((y+1)%Ly) # translation along y-direction for one spin species\n# create the spin-up spin-down combined transformations\ns = np.arange(2*N_2d) # sites [0,1,2,...,N_2d-1,...,2*N_2d-1] in advanced notation\nT_x = np.hstack((t_x,t_x+N_2d)) # translation along x-direction for both spin species\nT_y = np.hstack((t_y,t_y+N_2d)) # translation along y-direction for both spin species\nPH = -(s+1) # particle-hole in the advanced case\n#\n###### setting up bases ###### (note optional argument simple_symm=False)\n#basis_2d=spinful_fermion_basis_general(N_2d,simple_symm=False,Nf=(2,2),kxblock=(T_x,0),kyblock=(T_y,0))\nbasis_2d=spinful_fermion_basis_general(N_2d,simple_symm=False,Nf=(6,6),kxblock=(T_x,0),kyblock=(T_y,0),phblock=(PH,0)) \n#\n###### setting up hamiltonian ######\n# setting up site-coupling lists for advanced case\nhopping_left =[[-J,i,T_x[i]] for i in range(2*N_2d)] + [[-J,i,T_y[i]] for i in range(2*N_2d)]\nhopping_right=[[+J,i,T_x[i]] for i in range(2*N_2d)] + [[+J,i,T_y[i]] for i in range(2*N_2d)]\ninteraction=[[U,i,i+N_2d] for i in range(N_2d)]\n#\nstatic=[[\"+-\",hopping_left], # spin-up and spin-down hop to left\n\t\t[\"-+\",hopping_right], # spin-up and spin-down hop to right\n\t\t[\"zz\",interaction]] # spin-up spin-down interaction\n# build hamiltonian\nH=hamiltonian(static,[],basis=basis_2d,dtype=np.float64)\n# diagonalise H\nE=H.eigsh(k=10,which=\"SA\",return_eigenvectors=False)\nprint(E)" ]
[ [ "numpy.sqrt", "numpy.abs", "numpy.asarray", "numpy.ascontiguousarray", "scipy.sparse.csr_matrix", "numpy.finfo", "numpy.result_type", "numpy.ceil", "scipy.sparse.identity", "scipy.sparse.linalg.aslinearoperator", "numpy.floor", "numpy.array", "numpy.zeros" ], [ "numpy.random.seed", "numpy.linalg.eigh", "numpy.diff", "numpy.random.uniform", "numpy.zeros", "numpy.random.randint" ], [ "scipy.sparse.issparse", "numpy.random.seed", "numpy.testing.assert_allclose" ], [ "numpy.arange", "numpy.hstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mlilab/Mixed-Effect-Composite-RNN-Gaussian-Process
[ "dd7da89ce3c41d459a26ad1ce5ed2f40ab4ca85d" ]
[ "GPflow/testing/test_triang.py" ]
[ "import unittest\nfrom gpflow.tf_wraps import vec_to_tri\nimport tensorflow as tf\nimport numpy as np\n\nfrom testing.gpflow_testcase import GPflowTestCase\nfrom gpflow.tf_wraps import vec_to_tri\n\n\nclass TestVecToTri(GPflowTestCase):\n def referenceInverse(self, matrices):\n\t\t#this is the inverse operation of the vec_to_tri\n\t\t#op being tested.\n D, N, _ = matrices.shape\n M = (N * (N + 1)) // 2\n tril_indices = np.tril_indices(N)\n output = np.zeros((D, M))\n for vector_index in range(D):\n matrix = matrices[vector_index, :]\n output[vector_index, :] = matrix[tril_indices]\n return output\n\n def getExampleMatrices(self, D, N ):\n rng = np.random.RandomState(1)\n random_matrices = rng.randn(D, N, N)\n for matrix_index in range(D):\n for row_index in range(N):\n for col_index in range(N):\n if col_index > row_index:\n random_matrices[matrix_index, row_index, col_index] = 0.\n return random_matrices\n\n def testBasicFunctionality(self):\n with self.test_session() as sess:\n N = 3\n D = 3\n reference_matrices = self.getExampleMatrices(D, N)\n input_vector_tensor = tf.constant(self.referenceInverse(reference_matrices))\n\n test_matrices_tensor = vec_to_tri(input_vector_tensor, N)\n test_matrices = sess.run(test_matrices_tensor)\n np.testing.assert_array_almost_equal(reference_matrices, test_matrices)\n\n def testDifferentiable(self):\n with self.test_session() as sess:\n N = 3\n D = 3\n reference_matrices = self.getExampleMatrices(D, N)\n input_vector_array = self.referenceInverse(reference_matrices)\n input_vector_tensor = tf.constant(input_vector_array)\n\n test_matrices_tensor = vec_to_tri(input_vector_tensor, N)\n reduced_sum = tf.reduce_sum(test_matrices_tensor)\n gradient = tf.gradients(reduced_sum, input_vector_tensor)[0]\n reference_gradient = np.ones_like(input_vector_array)\n test_gradient = sess.run(gradient)\n np.testing.assert_array_almost_equal(reference_gradient, test_gradient)\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "tensorflow.constant", "numpy.ones_like", "numpy.tril_indices", "tensorflow.reduce_sum", "tensorflow.gradients", "numpy.random.RandomState", "numpy.zeros", "numpy.testing.assert_array_almost_equal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
jordan-owen/tpqoa
[ "7f75b3399ec5b05ad8c543e95c815a76372db98d" ]
[ "tpqoa/tpqoa.py" ]
[ "#\n# tpqoa is a wrapper class for the\n# Oanda v20 API (RESTful & streaming)\n# making use of the v20 Python package\n#\n# (c) Dr. Yves J. Hilpisch\n# The Python Quants GmbH\n#\n#\n# Trading forex/CFDs on margin carries a high level of risk and may\n# not be suitable for all investors as you could sustain losses\n# in excess of deposits. Leverage can work against you. Due to the certain\n# restrictions imposed by the local law and regulation, German resident\n# retail client(s) could sustain a total loss of deposited funds but are\n# not subject to subsequent payment obligations beyond the deposited funds.\n# Be aware and fully understand all risks associated with\n# the market and trading. Prior to trading any products,\n# carefully consider your financial situation and\n# experience level. Any opinions, news, research, analyses, prices,\n# or other information is provided as general market commentary, and does not\n# constitute investment advice. The Python Quants GmbH will not accept\n# liability for any loss or damage, including without limitation to,\n# any loss of profit, which may arise directly or indirectly from use\n# of or reliance on such information.\n#\n# The tpqoa package is intended as a technological illustration only.\n# It comes with no warranties or representations,\n# to the extent permitted by applicable law.\n#\nimport v20\nimport json\nimport configparser\nimport pandas as pd\nfrom v20.transaction import StopLossDetails, ClientExtensions\nfrom v20.transaction import TrailingStopLossDetails, TakeProfitDetails\n\n\nclass tpqoa(object):\n ''' tpqoa is a Python wrapper class for the Oanda v20 API. '''\n\n def __init__(self, conf_file):\n ''' Init function is expecting a configuration file with\n the following content:\n\n [oanda]\n account_id = XYZ-ABC-...\n access_token = ZYXCAB...\n account_type = practice (default) or live\n\n Parameters\n ==========\n conf_file: string\n path to and filename of the configuration file,\n e.g. '/home/me/oanda.cfg'\n '''\n self.config = configparser.ConfigParser()\n self.config.read(conf_file)\n self.access_token = self.config['oanda']['access_token']\n self.account_id = self.config['oanda']['account_id']\n self.account_type = self.config['oanda']['account_type']\n\n if self.account_type == 'live':\n self.hostname = 'api-fxtrade.oanda.com'\n self.stream_hostname = 'stream-fxtrade.oanda.com'\n else:\n self.hostname = 'api-fxpractice.oanda.com'\n self.stream_hostname = 'stream-fxpractice.oanda.com'\n\n self.ctx = v20.Context(\n hostname=self.hostname,\n port=443,\n token=self.access_token,\n poll_timeout=10\n )\n self.ctx_stream = v20.Context(\n hostname=self.stream_hostname,\n port=443,\n token=self.access_token,\n )\n\n self.suffix = '.000000000Z'\n self.stop_stream = False\n\n def get_instruments(self):\n ''' Retrieves and returns all instruments for the given account. '''\n resp = self.ctx.account.instruments(self.account_id)\n instruments = resp.get('instruments')\n instruments = [ins.dict() for ins in instruments]\n instruments = [(ins['displayName'], ins['name'])\n for ins in instruments]\n return sorted(instruments)\n\n def get_prices(self, instrument):\n ''' Returns the current BID/ASK prices for instrument. '''\n r = self.ctx.pricing.get(self.account_id, instruments=instrument)\n r = json.loads(r.raw_body)\n bid = float(r['prices'][0]['closeoutBid'])\n ask = float(r['prices'][0]['closeoutAsk'])\n return r['time'], bid, ask\n\n def transform_datetime(self, dati):\n ''' Transforms Python datetime object to string. '''\n if isinstance(dati, str):\n dati = pd.Timestamp(dati).to_pydatetime()\n return dati.isoformat('T') + self.suffix\n\n def retrieve_data(self, instrument, start, end, granularity, price):\n raw = self.ctx.instrument.candles(\n instrument=instrument,\n fromTime=start, toTime=end,\n granularity=granularity, price=price)\n raw = raw.get('candles')\n raw = [cs.dict() for cs in raw]\n if price == 'A':\n for cs in raw:\n cs.update(cs['ask'])\n del cs['ask']\n elif price == 'B':\n for cs in raw:\n cs.update(cs['bid'])\n del cs['bid']\n elif price == 'M':\n for cs in raw:\n cs.update(cs['mid'])\n del cs['mid']\n else:\n raise ValueError(\"price must be either 'B', 'A' or 'M'.\")\n if len(raw) == 0:\n return pd.DataFrame() # return empty DataFrame if no data\n data = pd.DataFrame(raw)\n data['time'] = pd.to_datetime(data['time'])\n data = data.set_index('time')\n data.index = pd.DatetimeIndex(data.index)\n for col in list('ohlc'):\n data[col] = data[col].astype(float)\n return data\n\n def get_history(self, instrument, start, end,\n granularity, price, localize=True):\n ''' Retrieves historical data for instrument.\n\n Parameters\n ==========\n instrument: string\n valid instrument name\n start, end: datetime, str\n Python datetime or string objects for start and end\n granularity: string\n a string like 'S5', 'M1' or 'D'\n price: string\n one of 'A' (ask), 'B' (bid) or 'M' (middle)\n\n Returns\n =======\n data: pd.DataFrame\n pandas DataFrame object with data\n '''\n if granularity.startswith('S') or granularity.startswith('M'):\n if granularity.startswith('S'):\n freq = '1h'\n else:\n freq = 'D'\n data = pd.DataFrame()\n dr = pd.date_range(start, end, freq=freq)\n\n for t in range(len(dr)):\n batch_start = self.transform_datetime(dr[t])\n if t != len(dr) - 1:\n batch_end = self.transform_datetime(dr[t + 1])\n else:\n batch_end = self.transform_datetime(end)\n\n batch = self.retrieve_data(instrument, batch_start, batch_end,\n granularity, price)\n data = data.append(batch)\n else:\n start = self.transform_datetime(start)\n end = self.transform_datetime(end)\n data = self.retrieve_data(instrument, start, end,\n granularity, price)\n if localize:\n data.index = data.index.tz_localize(None)\n\n return data[['o', 'h', 'l', 'c', 'volume', 'complete']]\n\n def create_order(self, instrument, units, price=None, sl_distance=None,\n tsl_distance=None, tp_price=None, comment=None,\n touch=False, suppress=False, ret=False):\n ''' Places order with Oanda.\n\n Parameters\n ==========\n instrument: string\n valid instrument name\n units: int\n number of units of instrument to be bought\n (positive int, eg 'units=50')\n or to be sold (negative int, eg 'units=-100')\n price: float\n limit order price, touch order price\n sl_distance: float\n stop loss distance price, mandatory eg in Germany\n tsl_distance: float\n trailing stop loss distance\n tp_price: float\n take profit price to be used for the trade\n comment: str\n string\n touch: boolean\n market_if_touched order (requires price to be set)\n suppress: boolean\n whether to suppress print out\n ret: boolean\n whether to return the order object\n '''\n client_ext = ClientExtensions(\n comment=comment) if comment is not None else None\n sl_details = (StopLossDetails(distance=sl_distance,\n clientExtensions=client_ext)\n if sl_distance is not None else None)\n tsl_details = (TrailingStopLossDetails(distance=tsl_distance,\n clientExtensions=client_ext)\n if tsl_distance is not None else None)\n tp_details = (TakeProfitDetails(\n price=tp_price, clientExtensions=client_ext)\n if tp_price is not None else None)\n if price is None:\n request = self.ctx.order.market(\n self.account_id,\n instrument=instrument,\n units=units,\n stopLossOnFill=sl_details,\n trailingStopLossOnFill=tsl_details,\n takeProfitOnFill=tp_details,\n )\n elif touch:\n request = self.ctx.order.market_if_touched(\n self.account_id,\n instrument=instrument,\n price=price,\n units=units,\n stopLossOnFill=sl_details,\n trailingStopLossOnFill=tsl_details,\n takeProfitOnFill=tp_details\n )\n else:\n request = self.ctx.order.limit(\n self.account_id,\n instrument=instrument,\n price=price,\n units=units,\n stopLossOnFill=sl_details,\n trailingStopLossOnFill=tsl_details,\n takeProfitOnFill=tp_details\n )\n try:\n order = request.get('orderFillTransaction')\n except Exception:\n order = request.get('orderCreateTransaction')\n if not suppress:\n print('\\n\\n', order.dict(), '\\n')\n if ret is True:\n return order.dict()\n\n def stream_data(self, instrument, stop=None, ret=False):\n ''' Starts a real-time data stream.\n\n Parameters\n ==========\n instrument: string\n valid instrument name\n '''\n self.stream_instrument = instrument\n self.ticks = 0\n response = self.ctx_stream.pricing.stream(\n self.account_id, snapshot=True,\n instruments=instrument)\n msgs = []\n for msg_type, msg in response.parts():\n msgs.append(msg)\n # print(msg_type, msg)\n if msg_type == 'pricing.ClientPrice':\n self.ticks += 1\n self.time = msg.time\n self.on_success(msg.time,\n float(msg.bids[0].dict()['price']),\n float(msg.asks[0].dict()['price']))\n if stop is not None:\n if self.ticks >= stop:\n if ret:\n return msgs\n break\n if self.stop_stream:\n if ret:\n return msgs\n break\n\n def on_success(self, time, bid, ask):\n ''' Method called when new data is retrieved. '''\n print(time, bid, ask)\n\n def get_account_summary(self, detailed=False):\n ''' Returns summary data for Oanda account.'''\n if detailed is True:\n response = self.ctx.account.get(self.account_id)\n else:\n response = self.ctx.account.summary(self.account_id)\n raw = response.get('account')\n return raw.dict()\n\n def get_transaction(self, tid=0):\n ''' Retrieves and returns transaction data. '''\n response = self.ctx.transaction.get(self.account_id, tid)\n transaction = response.get('transaction')\n return transaction.dict()\n\n def get_transactions(self, tid=0):\n ''' Retrieves and returns transactions data. '''\n response = self.ctx.transaction.since(self.account_id, id=tid)\n transactions = response.get('transactions')\n transactions = [t.dict() for t in transactions]\n return transactions\n\n def print_transactions(self, tid=0):\n ''' Prints basic transactions data. '''\n transactions = self.get_transactions(tid)\n for trans in transactions:\n try:\n templ = '%4s | %s | %7s | %12s | %8s'\n print(templ % (trans['id'],\n trans['time'][:-8],\n trans['instrument'],\n trans['units'],\n trans['pl']))\n except Exception:\n pass\n\n def get_positions(self):\n ''' Retrieves and returns positions data. '''\n response = self.ctx.position.list_open(self.account_id).body\n positions = [p.dict() for p in response.get('positions')]\n return positions\n" ]
[ [ "pandas.to_datetime", "pandas.DatetimeIndex", "pandas.DataFrame", "pandas.date_range", "pandas.Timestamp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
FarCaptain/MyUnet
[ "a02516a7ffa9e6fd5730e05fb5f95386bbb4b3d6" ]
[ "models/unet_original_dice&BCE.py" ]
[ "\n# coding: utf-8\n\n# In[1]:\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n#get_ipython().magic('matplotlib inline')\nimport numpy as np\nimport sklearn\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nimport os\nimport sys\nimport time\n#import keras\n#from keras.layers.convolutional import Conv2D\nimport tensorflow as tf\ntf.enable_eager_execution()\n\nfrom tensorflow import keras\n\n\nprint(tf.__version__)\nprint(sys.version_info)\nfor module in mpl, np, pd, sklearn, tf, keras:\n print(module.__name__, module.__version__)\n\n\n# In[2]:\n\ndef conv2d_block(input_tensor, n_filters, kernel_size = 3, batchnorm = True):\n \"\"\"Function to add 2 convolutional layers with the parameters passed to it\"\"\"\n # first layer\n x = keras.layers.Conv2D(filters = n_filters, kernel_size = (kernel_size, kernel_size), kernel_initializer = 'he_normal', padding = 'same')(input_tensor)\n if batchnorm:\n x = keras.layers.BatchNormalization()(x)\n x = keras.layers.Activation('relu')(x)\n\n # second layer\n x = keras.layers.Conv2D(filters = n_filters, kernel_size = (kernel_size, kernel_size),\n kernel_initializer = 'he_normal', padding = 'same')(x)\n if batchnorm:\n x = keras.layers.BatchNormalization()(x)\n x = keras.layers.Activation('relu')(x)\n\n return x\n\ndef get_unet(input_img, n_filters = 16, dropout = 0.1, batchnorm = False):\n # Contracting Path\n input_img = keras.Input(shape = [224,800,1])#224*800\n c1 = conv2d_block(input_img, n_filters * 1, kernel_size = 3, batchnorm = batchnorm)\n p1 = keras.layers.MaxPooling2D((2, 2))(c1)\n p1 = keras.layers.Dropout(dropout)(p1)\n\n c2 = conv2d_block(p1, n_filters * 2, kernel_size = 3, batchnorm = batchnorm)\n p2 = keras.layers.MaxPooling2D((2, 2))(c2)\n p2 = keras.layers.Dropout(dropout)(p2)\n\n c3 = conv2d_block(p2, n_filters * 4, kernel_size = 3, batchnorm = batchnorm)\n p3 = keras.layers.MaxPooling2D((2, 2))(c3)\n p3 = keras.layers.Dropout(dropout)(p3)\n\n c4 = conv2d_block(p3, n_filters * 8, kernel_size = 3, batchnorm = batchnorm)\n p4 = keras.layers.MaxPooling2D((2, 2))(c4)\n p4 = keras.layers.Dropout(dropout)(p4)\n\n c5 = conv2d_block(p4, n_filters = n_filters * 16, kernel_size = 3, batchnorm = batchnorm)\n\n # Expansive Path\n u6 = keras.layers.Conv2DTranspose(n_filters * 8, (3, 3), strides = (2, 2), padding = 'same')(c5)\n u6 = keras.layers.concatenate([u6, c4])\n u6 = keras.layers.Dropout(dropout)(u6)\n c6 = conv2d_block(u6, n_filters * 8, kernel_size = 3, batchnorm = batchnorm)\n\n u7 = keras.layers.Conv2DTranspose(n_filters * 4, (3, 3), strides = (2, 2), padding = 'same')(c6)\n u7 = keras.layers.concatenate([u7, c3])\n u7 = keras.layers.Dropout(dropout)(u7)\n c7 = conv2d_block(u7, n_filters * 4, kernel_size = 3, batchnorm = batchnorm)\n\n u8 = keras.layers.Conv2DTranspose(n_filters * 2, (3, 3), strides = (2, 2), padding = 'same')(c7)\n u8 = keras.layers.concatenate([u8, c2])\n u8 = keras.layers.Dropout(dropout)(u8)\n c8 = conv2d_block(u8, n_filters * 2, kernel_size = 3, batchnorm = batchnorm)\n\n u9 = keras.layers.Conv2DTranspose(n_filters * 1, (3, 3), strides = (2, 2), padding = 'same')(c8)\n u9 = keras.layers.concatenate([u9, c1])\n u9 = keras.layers.Dropout(dropout)(u9)\n c9 = conv2d_block(u9, n_filters * 1, kernel_size = 3, batchnorm = batchnorm)\n\n outputs = keras.layers.Conv2D(1, (1, 1), activation='sigmoid')(c9)\n model = keras.Model(inputs=[input_img], outputs=[outputs])\n return model\n\n\n# In[3]:\n\ndef get_number_list(ss):\n ss = ss.split(',')\n ss = [int(x) for x in ss]\n return ss\n\n#224*1016 -> 224*800\ndef read_file(index):\n mat = np.array([[0]*800 for i in range(224) ])\n string = \"D:\\\\0Sphinx\\\\SaveData\\\\data_\"+str(index)+\".txt\"\n f = open(string,\"r\")\n line = f.readline()\n i = 0\n while line:\n j = 0\n line = get_number_list(line)\n line = line[:800]\n for x in line:\n mat[i][j] = x\n j+=1\n i+=1\n line = f.readline()\n f.close()\n return mat\n\ndef read_label(index):\n mat =np.array([[0]*800 for i in range(224) ])\n string = \"D:\\\\0Sphinx\\\\SaveData\\\\data_\"+str(index)+\"_label.txt\"\n f = open(string,\"r\")\n line = f.readline()\n i = 0\n while line:\n j = 0\n line = get_number_list(line)\n line = line[:800]\n for x in line:\n mat[i][j] = x\n j+=1\n i+=1\n line = f.readline()\n f.close()\n return mat\n\nimages = []\nlabels = []\nfor i in range(1,721):\n images.append(read_file(i))\n string = \"D:\\\\0Sphinx\\\\SaveData\\\\data_\"+str(i)+\"_label.txt\"\n if os.path.exists(string):\n labels.append(read_label(i))\n else:\n labels.append(read_label(78)) #零矩阵\n\nX = np.array(images) #数据集\nX = np.reshape(X,(-1,224,800,1))\nprint(X.shape)\n\nY = np.array(labels)\nY = np.reshape(Y,(-1,224,800,1))\nprint(Y.shape)\n\n#split into train,valid,test\n#x_train_all, x_test, y_train_all, y_test = train_test_split(X,Y,train_size=0.79,random_state=30)\n\n\n# In[4]:\n\nx_train_all, x_test, y_train_all, y_test = train_test_split(X,Y,train_size=0.896,random_state=30)\nprint(x_train_all.shape)\nprint(x_test.shape)\n\n\n# In[5]:\n\ndef make_dataset(images, labels, epochs, batch_size, shuffle = True):\n\tdataset = tf.data.Dataset.from_tensor_slices((images, labels))\n\tif shuffle:\n\t\tdataset = dataset.shuffle(10000)\n\tdataset = dataset.repeat(epochs).batch(batch_size)\n\treturn dataset\n\n\n# In[6]:\n\nx_train, x_valid = x_train_all[:570], x_train_all[570:]\ny_train, y_valid = y_train_all[:570], y_train_all[570:]\nprint(x_train.shape,\" \", x_valid.shape,\" \", x_test.shape) #6:2:2\n#scale here\n\nfrom sklearn.preprocessing import StandardScaler\nscaler = StandardScaler()\n#x_train: [None,224,800,1] -> [None,‭179200,1‬]\nx_train_scaled = scaler.fit_transform(\n x_train.astype(np.float32).reshape(-1,1)).reshape(-1,224,800,1)\nx_valid_scaled = scaler.transform(\n x_valid.astype(np.float32).reshape(-1,1)).reshape(-1,224,800,1)\nx_test_scaled = scaler.transform(\n x_test.astype(np.float32).reshape(-1,1)).reshape(-1,224,800,1)\n\ny_train.astype(np.float32)\ny_valid.astype(np.float32)\ny_test.astype(np.float32)\n\n\n# In[7]:\n\nprint(np.max(x_train_scaled), np.min(x_train_scaled))\n\n\ntrain_set = make_dataset(x_train_scaled,y_train,epochs=20 ,batch_size=10)\nvalid_set = make_dataset(x_valid_scaled,y_valid,epochs=10 ,batch_size=15)#75/5 = 15\ntest_set = make_dataset(x_test_scaled,y_test,epochs=10 ,batch_size=15)\n\n\n# In[8]:\n\nx_train_scaled= tf.convert_to_tensor(x_train_scaled)\nx_valid_scaled= tf.convert_to_tensor(x_valid_scaled)\nx_test_scaled= tf.convert_to_tensor(x_test_scaled)\n\ny_train= tf.convert_to_tensor(y_train)\ny_valid= tf.convert_to_tensor(y_valid)\ny_test= tf.convert_to_tensor(y_test)\n\n#x_train = tf.reshape(x_train,[-1,1,224,1016])\n#x_valid = tf.reshape(x_valid,[-1,1,224,1016])\n#x_test = tf.reshape(x_test,[-1,224,1016])\n\nprint(x_train.shape,\" \", x_valid.shape,\" \", x_test.shape) #6:2:2\n\n\n# In[9]:\n\nfrom tensorflow.contrib.opt import AdamWOptimizer\n\ndef dice_coe(output, target, loss_type='sorensen', axis=(1, 2, 3), smooth=1.):\n \"\"\"\n Soft dice (Sørensen or Jaccard) coefficient for comparing the similarity of two batch of data,\n usually be used for binary image segmentation\n i.e. labels are binary.\n The coefficient between 0 to 1, 1 means totally match.\n\n Parameters\n -----------\n output : Tensor\n A distribution with shape: [batch_size, ....], (any dimensions).\n target : Tensor\n The target distribution, format the same with `output`.\n loss_type : str\n ``jaccard`` or ``sorensen``, default is ``jaccard``.\n axis : tuple of int\n All dimensions are reduced, default ``[1,2,3]``.\n smooth : float\n This small value will be added to the numerator and denominator.\n - If both output and target are empty, it makes sure dice is 1.\n - If either output or target are empty (all pixels are background), dice = ```smooth/(small_value + smooth)``, then if smooth is very small, dice close to 0 (even the image values lower than the threshold), so in this case, higher smooth can have a higher dice.\n\n Examples\n ---------\n\n References\n -----------\n - `Wiki-Dice <https://en.wikipedia.org/wiki/Sørensen–Dice_coefficient>`__\n\n \"\"\"\n inse = tf.reduce_sum(output * target, axis=axis)\n if loss_type == 'jaccard':\n l = tf.reduce_sum(output * output, axis=axis)\n r = tf.reduce_sum(target * target, axis=axis)\n elif loss_type == 'sorensen':\n l = tf.reduce_sum(output, axis=axis)\n r = tf.reduce_sum(target, axis=axis)\n else:\n raise Exception(\"Unknow loss_type\")\n dice = (2. * inse + smooth) / (l + r + smooth)\n dice = tf.reduce_mean(dice)\n return dice\n\ndef dice_coef_loss(y_true, y_pred):\n return 1. - dice_coe(y_true, y_pred)\n\nfrom keras.losses import binary_crossentropy\n\ndef dice_p_bce(in_gt, in_pred):\n return 1.0*tf.reduce_mean(binary_crossentropy(in_gt, in_pred)) + dice_coef_loss(in_gt, in_pred)\n\n# adam = tf.train.AdamOptimizer()\n\n# adam with weight decay\nadamw = AdamWOptimizer(weight_decay=1e-4)\n\nmodel = get_unet(x_train_scaled, n_filters = 16, dropout = 0.4, batchnorm = False)\nmodel.compile(optimizer = adamw,\n loss=dice_p_bce,\n metrics=[dice_coe])\n\n\n# In[10]:\n\nmodel.summary()\n\n\n# In[ ]:\n\nbatch_size = 10\nhistory = model.fit(train_set,\n validation_data=valid_set,\n shuffle = True,\n steps_per_epoch = 570 // batch_size, #batch_size = data_size/steps_per_epoch\n validation_steps = 5, #75\n epochs = 20\n ) # starts training\n\n\n# In[ ]:\n\nmodel.evaluate(test_set, steps = 5)#75\n\n\n# In[ ]:\n\n\n\n\n# In[ ]:\n\n\n\n" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.enable_eager_execution", "tensorflow.reduce_sum", "tensorflow.keras.layers.Conv2DTranspose", "numpy.max", "tensorflow.keras.Input", "numpy.reshape", "tensorflow.keras.layers.Conv2D", "numpy.min", "sklearn.model_selection.train_test_split", "tensorflow.keras.Model", "tensorflow.contrib.opt.AdamWOptimizer", "numpy.array", "tensorflow.keras.layers.Activation", "tensorflow.reduce_mean", "tensorflow.data.Dataset.from_tensor_slices", "tensorflow.keras.layers.concatenate", "tensorflow.keras.layers.BatchNormalization", "tensorflow.keras.layers.Dropout", "sklearn.preprocessing.StandardScaler", "tensorflow.keras.layers.MaxPooling2D" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
ChristineBasta/fairseq
[ "c349ec3ec7fa0da7306c91769fbdc6146569a0ee" ]
[ "longformer_scripts/sequence_encoder.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport math\nfrom typing import Dict, List, Optional\n\nimport torch\nimport torch.nn as nn\nfrom fairseq import search, utils\nfrom fairseq.data import data_utils\nfrom fairseq.models import FairseqIncrementalDecoder\nfrom torch import Tensor\nfrom fairseq.ngram_repeat_block import NGramRepeatBlock\n\n\nclass SequenceEncoder(nn.Module):\n def __init__(\n self,\n models\n\n ):\n \"\"\"Generates encodings of a given source sentence.\n\n Args:\n models (List[~fairseq.models.FairseqModel]): ensemble of models,\n currently support fairseq.models.TransformerModel for scripting\n\n \"\"\"\n super().__init__()\n if isinstance(models, EnsembleModel):\n self.model = models\n else:\n self.model = EnsembleModel(models)\n self.model.eval()\n\n\n def cuda(self):\n self.model.cuda()\n return self\n\n @torch.no_grad()\n def forward(\n self,\n sample: Dict[str, Dict[str, Tensor]],\n prefix_tokens: Optional[Tensor] = None,\n bos_token: Optional[int] = None,\n ):\n \"\"\"Generate a batch of translations.\n\n Args:\n sample (dict): batch\n prefix_tokens (torch.LongTensor, optional): force decoder to begin\n with these tokens\n bos_token (int, optional): beginning of sentence token\n (default: self.eos)\n \"\"\"\n return self._generate(sample, prefix_tokens, bos_token=bos_token)\n\n # TODO(myleott): unused, deprecate after pytorch-translate migration\n def generate_batched_itr(self, data_itr, beam_size=None, cuda=False, timer=None):\n \"\"\"Iterate over a batched dataset and yield individual translations.\n Args:\n cuda (bool, optional): use GPU for generation\n timer (StopwatchMeter, optional): time generations\n \"\"\"\n for sample in data_itr:\n s = utils.move_to_cuda(sample) if cuda else sample\n if \"net_input\" not in s:\n continue\n input = s[\"net_input\"]\n # model.forward normally channels prev_output_tokens into the decoder\n # separately, but SequenceGenerator directly calls model.encoder\n encoder_input = {\n k: v for k, v in input.items() if k != \"prev_output_tokens\"\n }\n if timer is not None:\n timer.start()\n with torch.no_grad():\n hypos = self.generate(encoder_input)\n if timer is not None:\n timer.stop(sum(len(h[0][\"tokens\"]) for h in hypos))\n for i, id in enumerate(s[\"id\"].data):\n # remove padding\n src = utils.strip_pad(input[\"src_tokens\"].data[i, :], self.pad)\n ref = (\n utils.strip_pad(s[\"target\"].data[i, :], self.pad)\n if s[\"target\"] is not None\n else None\n )\n yield id, src, ref, hypos[i]\n\n @torch.no_grad()\n def generate(self, models, sample: Dict[str, Dict[str, Tensor]], **kwargs) -> List[List[Dict[str, Tensor]]]:\n \"\"\"Generate translations. Match the api of other fairseq generators.\n\n Args:\n models (List[~fairseq.models.FairseqModel]): ensemble of models\n sample (dict): batch\n prefix_tokens (torch.LongTensor, optional): force decoder to begin\n with these tokens\n constraints (torch.LongTensor, optional): force decoder to include\n the list of constraints\n bos_token (int, optional): beginning of sentence token\n (default: self.eos)\n \"\"\"\n return self._generate(sample, **kwargs)\n\n def _generate(\n self,\n sample: Dict[str, Dict[str, Tensor]],\n prefix_tokens: Optional[Tensor] = None,\n constraints: Optional[Tensor] = None,\n bos_token: Optional[int] = None,\n ):\n incremental_states = torch.jit.annotate(\n List[Dict[str, Dict[str, Optional[Tensor]]]],\n [\n torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {})\n for i in range(self.model.models_size)\n ],\n )\n net_input = sample[\"net_input\"]\n\n encoder_outs = self.model.forward_encoder(net_input)\n\n return encoder_outs\n\n \n\nclass EnsembleModel(nn.Module):\n \"\"\"A wrapper around an ensemble of models.\"\"\"\n\n def __init__(self, models):\n super().__init__()\n self.models_size = len(models)\n # method '__len__' is not supported in ModuleList for torch script\n self.single_model = models[0]\n self.models = nn.ModuleList(models)\n\n self.has_incremental: bool = False\n if all(\n hasattr(m, \"decoder\") and isinstance(m.decoder, FairseqIncrementalDecoder)\n for m in models\n ):\n self.has_incremental = True\n\n def forward(self):\n pass\n\n def has_encoder(self):\n return hasattr(self.single_model, \"encoder\")\n\n def has_incremental_states(self):\n return self.has_incremental\n\n def max_decoder_positions(self):\n return min([m.max_decoder_positions() for m in self.models])\n\n @torch.jit.export\n def forward_encoder(self, net_input: Dict[str, Tensor]):\n if not self.has_encoder():\n return None\n return [model.encoder.forward_torchscript(net_input) for model in self.models]\n\n @torch.jit.export\n def forward_decoder(\n self,\n tokens,\n encoder_outs: List[Dict[str, List[Tensor]]],\n incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]],\n temperature: float = 1.0,\n ):\n log_probs = []\n avg_attn: Optional[Tensor] = None\n encoder_out: Optional[Dict[str, List[Tensor]]] = None\n for i, model in enumerate(self.models):\n if self.has_encoder():\n encoder_out = encoder_outs[i]\n # decode each model\n if self.has_incremental_states():\n decoder_out = model.decoder.forward(\n tokens,\n encoder_out=encoder_out,\n incremental_state=incremental_states[i],\n )\n else:\n decoder_out = model.decoder.forward(tokens, encoder_out=encoder_out)\n\n attn: Optional[Tensor] = None\n decoder_len = len(decoder_out)\n if decoder_len > 1 and decoder_out[1] is not None:\n if isinstance(decoder_out[1], Tensor):\n attn = decoder_out[1]\n else:\n attn_holder = decoder_out[1][\"attn\"]\n if isinstance(attn_holder, Tensor):\n attn = attn_holder\n elif attn_holder is not None:\n attn = attn_holder[0]\n if attn is not None:\n attn = attn[:, -1, :]\n\n decoder_out_tuple = (\n decoder_out[0][:, -1:, :].div_(temperature),\n None if decoder_len <= 1 else decoder_out[1],\n )\n\n probs = model.get_normalized_probs(\n decoder_out_tuple, log_probs=True, sample=None\n )\n probs = probs[:, -1, :]\n if self.models_size == 1:\n return probs, attn\n\n log_probs.append(probs)\n if attn is not None:\n if avg_attn is None:\n avg_attn = attn\n else:\n avg_attn.add_(attn)\n\n avg_probs = torch.logsumexp(torch.stack(log_probs, dim=0), dim=0) - math.log(\n self.models_size\n )\n\n if avg_attn is not None:\n avg_attn.div_(self.models_size)\n return avg_probs, avg_attn\n\n @torch.jit.export\n def reorder_encoder_out(\n self, encoder_outs: Optional[List[Dict[str, List[Tensor]]]], new_order\n ):\n \"\"\"\n Reorder encoder output according to *new_order*.\n\n Args:\n encoder_out: output from the ``forward()`` method\n new_order (LongTensor): desired order\n\n Returns:\n *encoder_out* rearranged according to *new_order*\n \"\"\"\n new_outs: List[Dict[str, List[Tensor]]] = []\n if not self.has_encoder():\n return new_outs\n for i, model in enumerate(self.models):\n assert encoder_outs is not None\n new_outs.append(\n model.encoder.reorder_encoder_out(encoder_outs[i], new_order)\n )\n return new_outs\n\n @torch.jit.export\n def reorder_incremental_state(\n self,\n incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]],\n new_order,\n ):\n if not self.has_incremental_states():\n return\n for i, model in enumerate(self.models):\n model.decoder.reorder_incremental_state_scripting(\n incremental_states[i], new_order\n )\n\n\n\n" ]
[ [ "torch.stack", "torch.nn.ModuleList", "torch.no_grad", "torch.jit.annotate" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rishavpramanik/mealpy
[ "d4a4d5810f15837764e4ee61517350fef3dc92b3", "d4a4d5810f15837764e4ee61517350fef3dc92b3", "d4a4d5810f15837764e4ee61517350fef3dc92b3", "d4a4d5810f15837764e4ee61517350fef3dc92b3" ]
[ "mealpy/swarm_based/SSpiderO.py", "tests/physics_based/test_NRO.py", "tests/swarm_based/test_ACOR.py", "mealpy/swarm_based/SSA.py" ]
[ "# !/usr/bin/env python\n# Created by \"Thieu\" at 12:00, 17/03/2020 ----------%\n# Email: [email protected] %\n# Github: https://github.com/thieu1995 %\n# --------------------------------------------------%\n\nimport numpy as np\nfrom copy import deepcopy\nfrom mealpy.optimizer import Optimizer\n\n\nclass BaseSSpiderO(Optimizer):\n \"\"\"\n The original version of: Social Spider Optimization (SSpiderO)\n\n Links:\n 1. https://www.hindawi.com/journals/mpe/2018/6843923/\n\n Hyper-parameters should fine tuned in approximate range to get faster convergence toward the global optimum:\n + fp (list, tuple): (fp_min, fp_max): Female Percent, default = (0.65, 0.9)\n\n Examples\n ~~~~~~~~\n >>> import numpy as np\n >>> from mealpy.swarm_based.SSpiderO import BaseSSpiderO\n >>>\n >>> def fitness_function(solution):\n >>> return np.sum(solution**2)\n >>>\n >>> problem_dict1 = {\n >>> \"fit_func\": fitness_function,\n >>> \"lb\": [-10, -15, -4, -2, -8],\n >>> \"ub\": [10, 15, 12, 8, 20],\n >>> \"minmax\": \"min\",\n >>> }\n >>>\n >>> epoch = 1000\n >>> pop_size = 50\n >>> fb = [0.65, 0.9]\n >>> model = BaseSSpiderO(problem_dict1, epoch, pop_size, fb)\n >>> best_position, best_fitness = model.solve()\n >>> print(f\"Solution: {best_position}, Fitness: {best_fitness}\")\n\n References\n ~~~~~~~~~~\n [1] Luque-Chang, A., Cuevas, E., Fausto, F., Zaldivar, D. and Pérez, M., 2018. Social spider\n optimization algorithm: modifications, applications, and perspectives. Mathematical\n Problems in Engineering, 2018.\n \"\"\"\n\n ID_POS = 0\n ID_TAR = 1\n ID_WEI = 2\n\n def __init__(self, problem, epoch=10000, pop_size=100, fp=(0.65, 0.9), **kwargs):\n \"\"\"\n Args:\n problem (dict): The problem dictionary\n epoch (int): maximum number of iterations, default = 10000\n pop_size (int): number of population size, default = 100\n fp (list, tuple): (fp_min, fp_max): Female Percent, default = (0.65, 0.9)\n \"\"\"\n super().__init__(problem, kwargs)\n self.epoch = self.validator.check_int(\"epoch\", epoch, [1, 100000])\n self.pop_size = self.validator.check_int(\"pop_size\", pop_size, [10, 10000])\n fp = self.validator.check_tuple_float(\"fp (min, max)\", fp, ((0, 1.0), (0, 1.0)))\n self.fp = (min(fp), max(fp))\n\n def create_solution(self, lb=None, ub=None):\n \"\"\"\n To get the position, fitness wrapper, target and obj list\n + A[self.ID_POS] --> Return: position\n + A[self.ID_TAR] --> Return: [target, [obj1, obj2, ...]]\n + A[self.ID_TAR][self.ID_FIT] --> Return: target\n + A[self.ID_TAR][self.ID_OBJ] --> Return: [obj1, obj2, ...]\n\n Returns:\n list: wrapper of solution with format [position, target, weight]\n \"\"\"\n position = np.random.uniform(lb, ub)\n position = self.amend_position(position, lb, ub)\n target = self.get_target_wrapper(position)\n weight = 0.0\n return [position, target, weight]\n\n def amend_position(self, position=None, lb=None, ub=None):\n \"\"\"\n Depend on what kind of problem are we trying to solve, there will be an different amend_position\n function to rebound the position of agent into the valid range.\n\n Args:\n position: vector position (location) of the solution.\n lb: list of lower bound values\n ub: list of upper bound values\n\n Returns:\n Amended position (make the position is in bound)\n \"\"\"\n return np.where(np.logical_and(lb <= position, position <= ub), position, np.random.uniform(lb, ub))\n\n def initialization(self):\n fp_temp = self.fp[0] + (self.fp[1] - self.fp[0]) * np.random.uniform() # Female Aleatory Percent\n self.n_f = int(self.pop_size * fp_temp) # number of female\n self.n_m = self.pop_size - self.n_f # number of male\n # Probabilities of attraction or repulsion Proper tuning for better results\n self.p_m = (self.epoch + 1 - np.array(range(1, self.epoch + 1))) / (self.epoch + 1)\n\n self.pop_males = self.create_population(self.n_m)\n self.pop_females = self.create_population(self.n_f)\n pop = deepcopy(self.pop_females) + deepcopy(self.pop_males)\n self.pop = self._recalculate_weights(pop)\n _, self.g_best = self.get_global_best_solution(self.pop)\n\n def _move_females(self, epoch=None):\n scale_distance = np.sum(self.problem.ub - self.problem.lb)\n pop = self.pop_females + self.pop_males\n # Start looking for any stronger vibration\n for i in range(0, self.n_f): # Move the females\n ## Find the position s\n id_min = None\n dist_min = 2 ** 16\n for j in range(0, self.pop_size):\n if self.pop_females[i][self.ID_WEI] < pop[j][self.ID_WEI]:\n dt = np.linalg.norm(pop[j][self.ID_POS] - self.pop_females[i][self.ID_POS]) / scale_distance\n if dt < dist_min and dt != 0:\n dist_min = dt\n id_min = j\n x_s = np.zeros(self.problem.n_dims)\n vibs = 0\n if id_min is not None:\n vibs = 2 * (pop[id_min][self.ID_WEI] * np.exp(-(np.random.uniform() * dist_min ** 2))) # Vib for the shortest\n x_s = pop[id_min][self.ID_POS]\n\n ## Find the position b\n dtb = np.linalg.norm(self.g_best[self.ID_POS] - self.pop_females[i][self.ID_POS]) / scale_distance\n vibb = 2 * (self.g_best[self.ID_WEI] * np.exp(-(np.random.uniform() * dtb ** 2)))\n\n ## Do attraction or repulsion\n beta = np.random.uniform(0, 1, self.problem.n_dims)\n gamma = np.random.uniform(0, 1, self.problem.n_dims)\n random = 2 * self.p_m[epoch] * (np.random.uniform(0, 1, self.problem.n_dims) - 0.5)\n if np.random.uniform() >= self.p_m[epoch]: # Do an attraction\n pos_new = self.pop_females[i][self.ID_POS] + vibs * (x_s - self.pop_females[i][self.ID_POS]) * beta + \\\n vibb * (self.g_best[self.ID_POS] - self.pop_females[i][self.ID_POS]) * gamma + random\n else: # Do a repulsion\n pos_new = self.pop_females[i][self.ID_POS] - vibs * (x_s - self.pop_females[i][self.ID_POS]) * beta - \\\n vibb * (self.g_best[self.ID_POS] - self.pop_females[i][self.ID_POS]) * gamma + random\n self.pop_females[i][self.ID_POS] = self.amend_position(pos_new, self.problem.lb, self.problem.ub)\n self.pop_females = self.update_target_wrapper_population(self.pop_females)\n self.nfe_epoch += self.n_f\n\n def _move_males(self, epoch=None):\n scale_distance = np.sum(self.problem.ub - self.problem.lb)\n my_median = np.median([it[self.ID_WEI] for it in self.pop_males])\n pop = self.pop_females + self.pop_males\n all_pos = np.array([it[self.ID_POS] for it in pop])\n all_wei = np.array([it[self.ID_WEI] for it in pop]).reshape((self.pop_size, 1))\n total_wei = np.sum(all_wei)\n if total_wei == 0:\n mean = np.mean(all_pos, axis=0)\n else:\n mean = np.sum(all_wei * all_pos, axis=0) / total_wei\n for i in range(0, self.n_m):\n delta = 2 * np.random.uniform(0, 1, self.problem.n_dims) - 0.5\n random = 2 * self.p_m[epoch] * (np.random.uniform(0, 1, self.problem.n_dims) - 0.5)\n\n if self.pop_males[i][self.ID_WEI] >= my_median: # Spider above the median\n # Start looking for a female with stronger vibration\n id_min = None\n dist_min = 99999999\n for j in range(0, self.n_f):\n if self.pop_females[j][self.ID_WEI] > self.pop_males[i][self.ID_WEI]:\n dt = np.linalg.norm(self.pop_females[j][self.ID_POS] - self.pop_males[i][self.ID_POS]) / scale_distance\n if dt < dist_min and dt != 0:\n dist_min = dt\n id_min = j\n x_s = np.zeros(self.problem.n_dims)\n vibs = 0\n if id_min != None:\n # Vib for the shortest\n vibs = 2 * (self.pop_females[id_min][self.ID_WEI] * np.exp(-(np.random.uniform() * dist_min ** 2)))\n x_s = self.pop_females[id_min][self.ID_POS]\n pos_new = self.pop_males[i][self.ID_POS] + vibs * (x_s - self.pop_males[i][self.ID_POS]) * delta + random\n else:\n # Spider below median, go to weighted mean\n pos_new = self.pop_males[i][self.ID_POS] + delta * (mean - self.pop_males[i][self.ID_POS]) + random\n self.pop_males[i][self.ID_POS] = self.amend_position(pos_new, self.problem.lb, self.problem.ub)\n self.pop_males = self.update_target_wrapper_population(self.pop_males)\n self.nfe_epoch += self.n_m\n\n ### Crossover\n def _crossover__(self, mom=None, dad=None, id=0):\n child1 = np.zeros(self.problem.n_dims)\n child2 = np.zeros(self.problem.n_dims)\n if id == 0: # arithmetic recombination\n r = np.random.uniform(0.5, 1) # w1 = w2 when r =0.5\n child1 = np.multiply(r, mom) + np.multiply((1 - r), dad)\n child2 = np.multiply(r, dad) + np.multiply((1 - r), mom)\n\n elif id == 1:\n id1 = np.random.randint(1, int(self.problem.n_dims / 2))\n id2 = int(id1 + self.problem.n_dims / 2)\n\n child1[:id1] = mom[:id1]\n child1[id1:id2] = dad[id1:id2]\n child1[id2:] = mom[id2:]\n\n child2[:id1] = dad[:id1]\n child2[id1:id2] = mom[id1:id2]\n child2[id2:] = dad[id2:]\n elif id == 2:\n temp = int(self.problem.n_dims / 2)\n child1[:temp] = mom[:temp]\n child1[temp:] = dad[temp:]\n child2[:temp] = dad[:temp]\n child2[temp:] = mom[temp:]\n\n return child1, child2\n\n def _mating(self):\n # Check whether a spider is good or not (above median)\n my_median = np.median([it[self.ID_WEI] for it in self.pop_males])\n pop_males_new = [self.pop_males[i] for i in range(self.n_m) if self.pop_males[i][self.ID_WEI] > my_median]\n\n # Calculate the radio\n pop = self.pop_females + self.pop_males\n all_pos = np.array([it[self.ID_POS] for it in pop])\n rad = np.max(all_pos, axis=1) - np.min(all_pos, axis=1)\n r = np.sum(rad) / (2 * self.problem.n_dims)\n\n # Start looking if there's a good female near\n list_child = []\n couples = []\n for i in range(0, len(pop_males_new)):\n for j in range(0, self.n_f):\n dist = np.linalg.norm(pop_males_new[i][self.ID_POS] - self.pop_females[j][self.ID_POS])\n if dist < r:\n couples.append([pop_males_new[i], self.pop_females[j]])\n if couples:\n n_child = len(couples)\n for k in range(n_child):\n child1, child2 = self._crossover__(couples[k][0][self.ID_POS], couples[k][1][self.ID_POS], 0)\n pos1 = self.amend_position(child1, self.problem.lb, self.problem.ub)\n pos2 = self.amend_position(child2, self.problem.lb, self.problem.ub)\n target1 = self.get_target_wrapper(pos1)\n target2 = self.get_target_wrapper(pos2)\n list_child.append([pos1, target1, 0.0])\n list_child.append([pos2, target2, 0.0])\n\n else:\n list_child = self.create_population(self.pop_size)\n self.nfe_epoch += len(list_child)\n return list_child\n\n def _survive(self, pop=None, pop_child=None):\n n_child = len(pop)\n pop_child = self.get_sorted_strim_population(pop_child, n_child)\n for i in range(0, n_child):\n if self.compare_agent(pop_child[i], pop[i]):\n pop[i] = deepcopy(pop_child[i])\n return pop\n\n def _recalculate_weights(self, pop=None):\n fit_total, fit_best, fit_worst = self.get_special_fitness(pop)\n for i in range(len(pop)):\n if fit_best == fit_worst:\n pop[i][self.ID_WEI] = np.random.uniform(0.2, 0.8)\n else:\n pop[i][self.ID_WEI] = 0.001 + (pop[i][self.ID_TAR][self.ID_FIT] - fit_worst) / (fit_best - fit_worst)\n return pop\n\n def evolve(self, epoch):\n \"\"\"\n The main operations (equations) of algorithm. Inherit from Optimizer class\n\n Args:\n epoch (int): The current iteration\n \"\"\"\n self.nfe_epoch = 0\n ### Movement of spiders\n self._move_females(epoch)\n self._move_males(epoch)\n\n # Recalculate weights\n pop = self.pop_females + self.pop_males\n pop = self._recalculate_weights(pop)\n\n # Mating Operator\n pop_child = self._mating()\n pop = self._survive(pop, pop_child)\n self.pop = self._recalculate_weights(pop)\n self.nfe_per_epoch = self.nfe_epoch\n", "#!/usr/bin/env python\n# Created by \"Thieu\" at 20:07, 19/03/2022 ----------% \n# Email: [email protected] % \n# Github: https://github.com/thieu1995 % \n# --------------------------------------------------%\n\nfrom mealpy.physics_based import NRO\nfrom mealpy.optimizer import Optimizer\nimport numpy as np\nimport pytest\n\n\[email protected](scope=\"module\") # scope: Call only 1 time at the beginning\ndef problem():\n def fitness_function(solution):\n return np.sum(solution ** 2)\n\n problem = {\n \"fit_func\": fitness_function,\n \"lb\": [-10, -10, -10, -10, -10],\n \"ub\": [10, 10, 10, 10, 10],\n \"minmax\": \"min\",\n \"log_to\": None\n }\n return problem\n\n\ndef test_NRO_results(problem):\n models = [\n NRO.BaseNRO(problem, epoch=100, pop_size=50),\n ]\n for model in models:\n best_position, best_fitness = model.solve()\n assert isinstance(model, Optimizer)\n assert isinstance(best_position, np.ndarray)\n assert len(best_position) == len(problem[\"lb\"])\n", "#!/usr/bin/env python\n# Created by \"Thieu\" at 00:32, 20/03/2022 ----------% \n# Email: [email protected] % \n# Github: https://github.com/thieu1995 % \n# --------------------------------------------------%\n\nfrom mealpy.swarm_based import ACOR\nfrom mealpy.optimizer import Optimizer\nimport numpy as np\nimport pytest\n\n\[email protected](scope=\"module\") # scope: Call only 1 time at the beginning\ndef problem():\n def fitness_function(solution):\n return np.sum(solution ** 2)\n\n problem = {\n \"fit_func\": fitness_function,\n \"lb\": [-10, -10, -10, -10, -10],\n \"ub\": [10, 10, 10, 10, 10],\n \"minmax\": \"min\",\n \"log_to\": None\n }\n return problem\n\n\ndef test_ACOR_results(problem):\n models = [\n ACOR.BaseACOR(problem, epoch=10, pop_size=50, sample_count=25, intent_factor=0.5, zeta=1.0)\n ]\n for model in models:\n best_position, best_fitness = model.solve()\n assert isinstance(model, Optimizer)\n assert isinstance(best_position, np.ndarray)\n assert len(best_position) == len(problem[\"lb\"])\n", "# !/usr/bin/env python\n# Created by \"Thieu\" at 17:22, 29/05/2020 ----------%\n# Email: [email protected] %\n# Github: https://github.com/thieu1995 %\n# --------------------------------------------------%\n\nimport numpy as np\nfrom copy import deepcopy\nfrom mealpy.optimizer import Optimizer\n\n\nclass BaseSSA(Optimizer):\n \"\"\"\n My changed version of: Sparrow Search Algorithm (SSA)\n\n Notes\n ~~~~~\n + First, I sort the algorithm and find g-best and g-worst\n + In Eq. 4, Instead of using A+ and L, I used np.random.normal()\n + Some components (g_best_position, fitness updated) are missing in Algorithm 1 (paper)\n\n Hyper-parameters should fine tuned in approximate range to get faster convergence toward the global optimum:\n + ST (float): ST in [0.5, 1.0], safety threshold value, default = 0.8\n + PD (float): number of producers (percentage), default = 0.2\n + SD (float): number of sparrows who perceive the danger, default = 0.1\n\n Examples\n ~~~~~~~~\n >>> import numpy as np\n >>> from mealpy.swarm_based.SSA import BaseSSA\n >>>\n >>> def fitness_function(solution):\n >>> return np.sum(solution**2)\n >>>\n >>> problem_dict1 = {\n >>> \"fit_func\": fitness_function,\n >>> \"lb\": [-10, -15, -4, -2, -8],\n >>> \"ub\": [10, 15, 12, 8, 20],\n >>> \"minmax\": \"min\",\n >>> }\n >>>\n >>> epoch = 1000\n >>> pop_size = 50\n >>> ST = 0.8\n >>> PD = 0.2\n >>> SD = 0.1\n >>> model = BaseSSA(problem_dict1, epoch, pop_size, ST, PD, SD)\n >>> best_position, best_fitness = model.solve()\n >>> print(f\"Solution: {best_position}, Fitness: {best_fitness}\")\n\n References\n ~~~~~~~~~~\n [1] Xue, J. and Shen, B., 2020. A novel swarm intelligence optimization approach:\n sparrow search algorithm. Systems Science & Control Engineering, 8(1), pp.22-34.\n \"\"\"\n\n def __init__(self, problem, epoch=10000, pop_size=100, ST=0.8, PD=0.2, SD=0.1, **kwargs):\n \"\"\"\n Args:\n problem (dict): The problem dictionary\n epoch (int): maximum number of iterations, default = 10000\n pop_size (int): number of population size, default = 100\n ST (float): ST in [0.5, 1.0], safety threshold value, default = 0.8\n PD (float): number of producers (percentage), default = 0.2\n SD (float): number of sparrows who perceive the danger, default = 0.1\n \"\"\"\n super().__init__(problem, kwargs)\n self.epoch = self.validator.check_int(\"epoch\", epoch, [1, 100000])\n self.pop_size = self.validator.check_int(\"pop_size\", pop_size, [10, 10000])\n self.ST = self.validator.check_float(\"ST\", ST, (0, 1.0))\n self.PD = self.validator.check_float(\"PD\", PD, (0, 1.0))\n self.SD = self.validator.check_float(\"SD\", SD, (0, 1.0))\n self.n1 = int(self.PD * self.pop_size)\n self.n2 = int(self.SD * self.pop_size)\n self.nfe_per_epoch = 2 * self.pop_size - self.n2\n self.sort_flag = True\n\n def amend_position(self, position=None, lb=None, ub=None):\n \"\"\"\n Depend on what kind of problem are we trying to solve, there will be an different amend_position\n function to rebound the position of agent into the valid range.\n\n Args:\n position: vector position (location) of the solution.\n lb: list of lower bound values\n ub: list of upper bound values\n\n Returns:\n Amended position (make the position is in bound)\n \"\"\"\n return np.where(np.logical_and(lb <= position, position <= ub), position, np.random.uniform(lb, ub))\n\n def evolve(self, epoch):\n \"\"\"\n The main operations (equations) of algorithm. Inherit from Optimizer class\n\n Args:\n epoch (int): The current iteration\n \"\"\"\n r2 = np.random.uniform() # R2 in [0, 1], the alarm value, random value\n pop_new = []\n for idx in range(0, self.pop_size):\n # Using equation (3) update the sparrow’s location;\n if idx < self.n1:\n if r2 < self.ST:\n des = (epoch + 1) / (np.random.uniform() * self.epoch + self.EPSILON)\n if des > 5:\n des = np.random.normal()\n x_new = self.pop[idx][self.ID_POS] * np.exp(des)\n else:\n x_new = self.pop[idx][self.ID_POS] + np.random.normal() * np.ones(self.problem.n_dims)\n else:\n # Using equation (4) update the sparrow’s location;\n _, x_p, worst = self.get_special_solutions(self.pop, best=1, worst=1)\n g_best = x_p[0], g_worst = worst[0]\n if idx > int(self.pop_size / 2):\n x_new = np.random.normal() * np.exp((g_worst[self.ID_POS] - self.pop[idx][self.ID_POS]) / (idx + 1) ** 2)\n else:\n x_new = g_best[self.ID_POS] + np.abs(self.pop[idx][self.ID_POS] - g_best[self.ID_POS]) * np.random.normal()\n pos_new = self.amend_position(x_new, self.problem.lb, self.problem.ub)\n pop_new.append([pos_new, None])\n pop_new = self.update_target_wrapper_population(pop_new)\n pop_new = self.greedy_selection_population(self.pop, pop_new)\n pop_new, best, worst = self.get_special_solutions(pop_new, best=1, worst=1)\n g_best, g_worst = best[0], worst[0]\n pop2 = deepcopy(pop_new[self.n2:])\n child = []\n for idx in range(0, len(pop2)):\n # Using equation (5) update the sparrow’s location;\n if self.compare_agent(self.pop[idx], g_best):\n x_new = pop2[idx][self.ID_POS] + \\\n np.random.uniform(-1, 1) * (np.abs(pop2[idx][self.ID_POS] - g_worst[self.ID_POS]) /\n (pop2[idx][self.ID_TAR][self.ID_FIT] - g_worst[self.ID_TAR][self.ID_FIT] + self.EPSILON))\n else:\n x_new = g_best[self.ID_POS] + np.random.normal() * np.abs(pop2[idx][self.ID_POS] - g_best[self.ID_POS])\n pos_new = self.amend_position(x_new, self.problem.lb, self.problem.ub)\n child.append([pos_new, None])\n child = self.update_target_wrapper_population(child)\n child = self.greedy_selection_population(pop2, child)\n self.pop = pop_new[:self.n2] + child\n\n\nclass OriginalSSA(BaseSSA):\n \"\"\"\n The original version of: Sparrow Search Algorithm (SSA)\n\n Links:\n 1. https://doi.org/10.1080/21642583.2019.1708830\n\n Notes\n ~~~~~\n + The paper contains some unclear equations and symbol\n\n Hyper-parameters should fine tuned in approximate range to get faster convergence toward the global optimum:\n + ST (float): ST in [0.5, 1.0], safety threshold value, default = 0.8\n + PD (float): number of producers (percentage), default = 0.2\n + SD (float): number of sparrows who perceive the danger, default = 0.1\n\n Examples\n ~~~~~~~~\n >>> import numpy as np\n >>> from mealpy.swarm_based.SSA import OriginalSSA\n >>>\n >>> def fitness_function(solution):\n >>> return np.sum(solution**2)\n >>>\n >>> problem_dict1 = {\n >>> \"fit_func\": fitness_function,\n >>> \"lb\": [-10, -15, -4, -2, -8],\n >>> \"ub\": [10, 15, 12, 8, 20],\n >>> \"minmax\": \"min\",\n >>> }\n >>>\n >>> epoch = 1000\n >>> pop_size = 50\n >>> ST = 0.8\n >>> PD = 0.2\n >>> SD = 0.1\n >>> model = OriginalSSA(problem_dict1, epoch, pop_size, ST, PD, SD)\n >>> best_position, best_fitness = model.solve()\n >>> print(f\"Solution: {best_position}, Fitness: {best_fitness}\")\n\n References\n ~~~~~~~~~~\n [1] Xue, J. and Shen, B., 2020. A novel swarm intelligence optimization approach:\n sparrow search algorithm. Systems Science & Control Engineering, 8(1), pp.22-34.\n \"\"\"\n\n def __init__(self, problem, epoch=10000, pop_size=100, ST=0.8, PD=0.2, SD=0.1, **kwargs):\n \"\"\"\n Args:\n problem (dict): The problem dictionary\n epoch (int): maximum number of iterations, default = 10000\n pop_size (int): number of population size, default = 100\n ST (float): ST in [0.5, 1.0], safety threshold value, default = 0.8\n PD (float): number of producers (percentage), default = 0.2\n SD (float): number of sparrows who perceive the danger, default = 0.1\n \"\"\"\n super().__init__(problem, epoch, pop_size, ST, PD, SD, **kwargs)\n\n def evolve(self, epoch):\n \"\"\"\n The main operations (equations) of algorithm. Inherit from Optimizer class\n\n Args:\n epoch (int): The current iteration\n \"\"\"\n r2 = np.random.uniform() # R2 in [0, 1], the alarm value, random value\n pop_new = []\n for idx in range(0, self.pop_size):\n # Using equation (3) update the sparrow’s location;\n if idx < self.n1:\n if r2 < self.ST:\n des = (idx + 1) / (np.random.uniform() * self.epoch + self.EPSILON)\n if des > 5:\n des = np.random.uniform()\n x_new = self.pop[idx][self.ID_POS] * np.exp(des)\n else:\n x_new = self.pop[idx][self.ID_POS] + np.random.normal() * np.ones(self.problem.n_dims)\n else:\n # Using equation (4) update the sparrow’s location;\n _, x_p, worst = self.get_special_solutions(self.pop, best=1, worst=1)\n g_best, g_worst = x_p[0], worst[0]\n if idx > int(self.pop_size / 2):\n x_new = np.random.normal() * np.exp((g_worst[self.ID_POS] - self.pop[idx][self.ID_POS]) / (idx + 1) ** 2)\n else:\n L = np.ones((1, self.problem.n_dims))\n A = np.sign(np.random.uniform(-1, 1, (1, self.problem.n_dims)))\n A1 = A.T * np.linalg.inv(np.matmul(A, A.T)) * L\n x_new = g_best[self.ID_POS] + np.matmul(np.abs(self.pop[idx][self.ID_POS] - g_best[self.ID_POS]), A1)\n pos_new = self.amend_position(x_new, self.problem.lb, self.problem.ub)\n pop_new.append([pos_new, None])\n pop_new = self.update_target_wrapper_population(pop_new)\n pop_new = self.greedy_selection_population(self.pop, pop_new)\n pop_new, best, worst = self.get_special_solutions(pop_new, best=1, worst=1)\n g_best, g_worst = best[0], worst[0]\n pop2 = pop_new[self.n2:]\n child = []\n for idx in range(0, len(pop2)):\n # Using equation (5) update the sparrow’s location;\n if self.compare_agent(self.pop[idx], g_best):\n x_new = pop2[idx][self.ID_POS] + \\\n np.random.uniform(-1, 1) * (np.abs(pop2[idx][self.ID_POS] - g_worst[self.ID_POS]) /\n (pop2[idx][self.ID_TAR][self.ID_FIT] - g_worst[self.ID_TAR][self.ID_FIT] + self.EPSILON))\n else:\n x_new = g_best[self.ID_POS] + np.random.normal() * np.abs(pop2[idx][self.ID_POS] - g_best[self.ID_POS])\n pos_new = self.amend_position(x_new, self.problem.lb, self.problem.ub)\n child.append([pos_new, None])\n child = self.update_target_wrapper_population(child)\n child = self.greedy_selection_population(pop2, child)\n self.pop = pop_new[:self.n2] + child\n" ]
[ [ "numpy.logical_and", "numpy.min", "numpy.multiply", "numpy.median", "numpy.linalg.norm", "numpy.max", "numpy.mean", "numpy.random.uniform", "numpy.array", "numpy.zeros", "numpy.sum" ], [ "numpy.sum" ], [ "numpy.sum" ], [ "numpy.abs", "numpy.logical_and", "numpy.matmul", "numpy.ones", "numpy.random.normal", "numpy.random.uniform", "numpy.exp" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
yueqiw/ml-agents
[ "499120a45c6a0203ff39770ded9a9dc6069ffa46" ]
[ "ml-agents/mlagents/trainers/ppo/trainer.py" ]
[ "# # Unity ML-Agents Toolkit\n# ## ML-Agent Learning (PPO)\n# Contains an implementation of PPO as described (https://arxiv.org/abs/1707.06347).\n\nimport logging\nimport os\nfrom collections import deque\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom mlagents.envs import AllBrainInfo, BrainInfo\nfrom mlagents.trainers.buffer import Buffer\nfrom mlagents.trainers.ppo.policy import PPOPolicy\nfrom mlagents.trainers.trainer import UnityTrainerException, Trainer\n\nlogger = logging.getLogger(\"mlagents.envs\")\n\n\nclass PPOTrainer(Trainer):\n \"\"\"The PPOTrainer is an implementation of the PPO algorithm.\"\"\"\n\n def __init__(self, sess, brain, reward_buff_cap, trainer_parameters, training, seed, run_id):\n \"\"\"\n Responsible for collecting experiences and training PPO model.\n :param sess: Tensorflow session.\n :param trainer_parameters: The parameters for the trainer (dictionary).\n :param training: Whether the trainer is set for training.\n \"\"\"\n super(PPOTrainer, self).__init__(sess, brain.brain_name, trainer_parameters, training, run_id)\n\n self.param_keys = ['batch_size', 'beta', 'buffer_size', 'epsilon', 'gamma', 'hidden_units', 'lambd',\n 'learning_rate', 'max_steps', 'normalize', 'num_epoch', 'num_layers',\n 'time_horizon', 'sequence_length', 'summary_freq', 'use_recurrent',\n 'graph_scope', 'summary_path', 'memory_size', 'use_curiosity', 'curiosity_strength',\n 'curiosity_enc_size', 'only_vec']\n #print(self.param_keys)\n #print(trainer_parameters)\n for k in self.param_keys:\n if k not in trainer_parameters:\n raise UnityTrainerException(\"The hyperparameter {0} could not be found for the PPO trainer of \"\n \"brain {1}.\".format(k, brain.brain_name))\n\n self.use_curiosity = bool(trainer_parameters['use_curiosity'])\n\n self.step = 0\n\n self.policy = PPOPolicy(seed, brain, trainer_parameters,\n sess, self.is_training)\n\n stats = {'cumulative_reward': [], 'episode_length': [], 'value_estimate': [],\n 'entropy': [], 'value_loss': [], 'policy_loss': [], 'learning_rate': []}\n if self.use_curiosity:\n stats['forward_loss'] = []\n stats['inverse_loss'] = []\n stats['intrinsic_reward'] = []\n self.intrinsic_rewards = {}\n self.stats = stats\n\n self.training_buffer = Buffer()\n self.cumulative_rewards = {}\n self._reward_buffer = deque(maxlen=reward_buff_cap)\n self.episode_steps = {}\n self.summary_path = trainer_parameters['summary_path']\n if not os.path.exists(self.summary_path):\n os.makedirs(self.summary_path)\n\n self.summary_writer = tf.summary.FileWriter(self.summary_path)\n\n def __str__(self):\n return '''Hyperparameters for the PPO Trainer of brain {0}: \\n{1}'''.format(\n self.brain_name, '\\n'.join(['\\t{0}:\\t{1}'.format(x, self.trainer_parameters[x]) for x in self.param_keys]))\n\n @property\n def parameters(self):\n \"\"\"\n Returns the trainer parameters of the trainer.\n \"\"\"\n return self.trainer_parameters\n\n @property\n def get_max_steps(self):\n \"\"\"\n Returns the maximum number of steps. Is used to know when the trainer should be stopped.\n :return: The maximum number of steps of the trainer\n \"\"\"\n return float(self.trainer_parameters['max_steps'])\n\n @property\n def get_step(self):\n \"\"\"\n Returns the number of steps the trainer has performed\n :return: the step count of the trainer\n \"\"\"\n return self.step\n\n @property\n def reward_buffer(self):\n \"\"\"\n Returns the reward buffer. The reward buffer contains the cumulative\n rewards of the most recent episodes completed by agents using this\n trainer.\n :return: the reward buffer.\n \"\"\"\n return self._reward_buffer\n\n def increment_step_and_update_last_reward(self):\n \"\"\"\n Increment the step count of the trainer and Updates the last reward\n \"\"\"\n if len(self.stats['cumulative_reward']) > 0:\n mean_reward = np.mean(self.stats['cumulative_reward'])\n self.policy.update_reward(mean_reward)\n self.policy.increment_step()\n self.step = self.policy.get_current_step()\n\n def take_action(self, all_brain_info: AllBrainInfo):\n \"\"\"\n Decides actions given observations information, and takes them in environment.\n :param all_brain_info: A dictionary of brain names and BrainInfo from environment.\n :return: a tuple containing action, memories, values and an object\n to be passed to add experiences\n \"\"\"\n curr_brain_info = all_brain_info[self.brain_name]\n if len(curr_brain_info.agents) == 0:\n return [], [], [], None, None\n\n run_out = self.policy.evaluate(curr_brain_info)\n self.stats['value_estimate'].append(run_out['value'].mean())\n self.stats['entropy'].append(run_out['entropy'].mean())\n self.stats['learning_rate'].append(run_out['learning_rate'])\n if self.policy.use_recurrent:\n return run_out['action'], run_out['memory_out'], None, \\\n run_out['value'], run_out\n else:\n return run_out['action'], None, None, run_out['value'], run_out\n\n def construct_curr_info(self, next_info: BrainInfo) -> BrainInfo:\n \"\"\"\n Constructs a BrainInfo which contains the most recent previous experiences for all agents info\n which correspond to the agents in a provided next_info.\n :BrainInfo next_info: A t+1 BrainInfo.\n :return: curr_info: Reconstructed BrainInfo to match agents of next_info.\n \"\"\"\n visual_observations = [[]]\n vector_observations = []\n text_observations = []\n memories = []\n rewards = []\n local_dones = []\n max_reacheds = []\n agents = []\n prev_vector_actions = []\n prev_text_actions = []\n for agent_id in next_info.agents:\n agent_brain_info = self.training_buffer[agent_id].last_brain_info\n if agent_brain_info is None:\n agent_brain_info = next_info\n agent_index = agent_brain_info.agents.index(agent_id)\n for i in range(len(next_info.visual_observations)):\n visual_observations[i].append(agent_brain_info.visual_observations[i][agent_index])\n vector_observations.append(agent_brain_info.vector_observations[agent_index])\n text_observations.append(agent_brain_info.text_observations[agent_index])\n if self.policy.use_recurrent:\n if len(agent_brain_info.memories > 0):\n memories.append(agent_brain_info.memories[agent_index])\n else:\n memories.append(self.policy.make_empty_memory(1))\n rewards.append(agent_brain_info.rewards[agent_index])\n local_dones.append(agent_brain_info.local_done[agent_index])\n max_reacheds.append(agent_brain_info.max_reached[agent_index])\n agents.append(agent_brain_info.agents[agent_index])\n prev_vector_actions.append(agent_brain_info.previous_vector_actions[agent_index])\n prev_text_actions.append(agent_brain_info.previous_text_actions[agent_index])\n if self.policy.use_recurrent:\n memories = np.vstack(memories)\n curr_info = BrainInfo(visual_observations, vector_observations, text_observations,\n memories, rewards, agents, local_dones, prev_vector_actions,\n prev_text_actions, max_reacheds)\n return curr_info\n\n def add_experiences(self, curr_all_info: AllBrainInfo, next_all_info: AllBrainInfo, take_action_outputs):\n \"\"\"\n Adds experiences to each agent's experience history.\n :param curr_all_info: Dictionary of all current brains and corresponding BrainInfo.\n :param next_all_info: Dictionary of all current brains and corresponding BrainInfo.\n :param take_action_outputs: The outputs of the take action method.\n \"\"\"\n curr_info = curr_all_info[self.brain_name]\n next_info = next_all_info[self.brain_name]\n\n for agent_id in curr_info.agents:\n self.training_buffer[agent_id].last_brain_info = curr_info\n self.training_buffer[agent_id].last_take_action_outputs = take_action_outputs\n\n if curr_info.agents != next_info.agents:\n curr_to_use = self.construct_curr_info(next_info)\n else:\n curr_to_use = curr_info\n\n intrinsic_rewards = self.policy.get_intrinsic_rewards(curr_to_use, next_info)\n\n for agent_id in next_info.agents:\n stored_info = self.training_buffer[agent_id].last_brain_info\n stored_take_action_outputs = self.training_buffer[agent_id].last_take_action_outputs\n if stored_info is not None:\n idx = stored_info.agents.index(agent_id)\n next_idx = next_info.agents.index(agent_id)\n if not stored_info.local_done[idx]:\n for i, _ in enumerate(stored_info.visual_observations):\n self.training_buffer[agent_id]['visual_obs%d' % i].append(\n stored_info.visual_observations[i][idx])\n self.training_buffer[agent_id]['next_visual_obs%d' % i].append(\n next_info.visual_observations[i][next_idx])\n if self.policy.use_vec_obs:\n self.training_buffer[agent_id]['vector_obs'].append(stored_info.vector_observations[idx])\n self.training_buffer[agent_id]['next_vector_in'].append(\n next_info.vector_observations[next_idx])\n if self.policy.use_recurrent:\n if stored_info.memories.shape[1] == 0:\n stored_info.memories = np.zeros((len(stored_info.agents), self.policy.m_size))\n self.training_buffer[agent_id]['memory'].append(stored_info.memories[idx])\n actions = stored_take_action_outputs['action']\n if self.policy.use_continuous_act:\n actions_pre = stored_take_action_outputs['pre_action']\n self.training_buffer[agent_id]['actions_pre'].append(actions_pre[idx])\n else:\n self.training_buffer[agent_id]['action_mask'].append(\n stored_info.action_masks[idx])\n a_dist = stored_take_action_outputs['log_probs']\n value = stored_take_action_outputs['value']\n self.training_buffer[agent_id]['actions'].append(actions[idx])\n self.training_buffer[agent_id]['prev_action'].append(stored_info.previous_vector_actions[idx])\n self.training_buffer[agent_id]['masks'].append(1.0)\n if self.use_curiosity:\n self.training_buffer[agent_id]['rewards'].append(next_info.rewards[next_idx] +\n intrinsic_rewards[next_idx])\n else:\n self.training_buffer[agent_id]['rewards'].append(next_info.rewards[next_idx])\n self.training_buffer[agent_id]['action_probs'].append(a_dist[idx])\n self.training_buffer[agent_id]['value_estimates'].append(value[idx][0])\n if agent_id not in self.cumulative_rewards:\n self.cumulative_rewards[agent_id] = 0\n self.cumulative_rewards[agent_id] += next_info.rewards[next_idx]\n if self.use_curiosity:\n if agent_id not in self.intrinsic_rewards:\n self.intrinsic_rewards[agent_id] = 0\n self.intrinsic_rewards[agent_id] += intrinsic_rewards[next_idx]\n if not next_info.local_done[next_idx]:\n if agent_id not in self.episode_steps:\n self.episode_steps[agent_id] = 0\n self.episode_steps[agent_id] += 1\n\n def process_experiences(self, current_info: AllBrainInfo, new_info: AllBrainInfo):\n \"\"\"\n Checks agent histories for processing condition, and processes them as necessary.\n Processing involves calculating value and advantage targets for model updating step.\n :param current_info: Dictionary of all current brains and corresponding BrainInfo.\n :param new_info: Dictionary of all next brains and corresponding BrainInfo.\n \"\"\"\n\n info = new_info[self.brain_name]\n for l in range(len(info.agents)):\n agent_actions = self.training_buffer[info.agents[l]]['actions']\n if ((info.local_done[l] or len(agent_actions) > self.trainer_parameters['time_horizon'])\n and len(agent_actions) > 0):\n agent_id = info.agents[l]\n if info.local_done[l] and not info.max_reached[l]:\n value_next = 0.0\n else:\n if info.max_reached[l]:\n bootstrapping_info = self.training_buffer[agent_id].last_brain_info\n idx = bootstrapping_info.agents.index(agent_id)\n else:\n bootstrapping_info = info\n idx = l\n value_next = self.policy.get_value_estimate(bootstrapping_info, idx)\n\n self.training_buffer[agent_id]['advantages'].set(\n get_gae(\n rewards=self.training_buffer[agent_id]['rewards'].get_batch(),\n value_estimates=self.training_buffer[agent_id]['value_estimates'].get_batch(),\n value_next=value_next,\n gamma=self.trainer_parameters['gamma'],\n lambd=self.trainer_parameters['lambd']))\n self.training_buffer[agent_id]['discounted_returns'].set(\n self.training_buffer[agent_id]['advantages'].get_batch()\n + self.training_buffer[agent_id]['value_estimates'].get_batch())\n\n self.training_buffer.append_update_buffer(agent_id, batch_size=None,\n training_length=self.policy.sequence_length)\n\n self.training_buffer[agent_id].reset_agent()\n if info.local_done[l]:\n self.stats['cumulative_reward'].append(\n self.cumulative_rewards.get(agent_id, 0))\n self.reward_buffer.appendleft(self.cumulative_rewards.get(agent_id, 0))\n self.stats['episode_length'].append(\n self.episode_steps.get(agent_id, 0))\n self.cumulative_rewards[agent_id] = 0\n self.episode_steps[agent_id] = 0\n if self.use_curiosity:\n self.stats['intrinsic_reward'].append(\n self.intrinsic_rewards.get(agent_id, 0))\n self.intrinsic_rewards[agent_id] = 0\n\n def end_episode(self):\n \"\"\"\n A signal that the Episode has ended. The buffer must be reset. \n Get only called when the academy resets.\n \"\"\"\n self.training_buffer.reset_all()\n for agent_id in self.cumulative_rewards:\n self.cumulative_rewards[agent_id] = 0\n for agent_id in self.episode_steps:\n self.episode_steps[agent_id] = 0\n if self.use_curiosity:\n for agent_id in self.intrinsic_rewards:\n self.intrinsic_rewards[agent_id] = 0\n\n def is_ready_update(self):\n \"\"\"\n Returns whether or not the trainer has enough elements to run update model\n :return: A boolean corresponding to whether or not update_model() can be run\n \"\"\"\n size_of_buffer = len(self.training_buffer.update_buffer['actions'])\n return size_of_buffer > max(int(self.trainer_parameters['buffer_size'] / self.policy.sequence_length), 1)\n\n def update_policy(self):\n \"\"\"\n Uses training_buffer to update the policy.\n \"\"\"\n n_sequences = max(int(self.trainer_parameters['batch_size'] / self.policy.sequence_length), 1)\n value_total, policy_total, forward_total, inverse_total = [], [], [], []\n advantages = self.training_buffer.update_buffer['advantages'].get_batch()\n self.training_buffer.update_buffer['advantages'].set(\n (advantages - advantages.mean()) / (advantages.std() + 1e-10))\n num_epoch = self.trainer_parameters['num_epoch']\n for k in range(num_epoch):\n self.training_buffer.update_buffer.shuffle()\n buffer = self.training_buffer.update_buffer\n for l in range(len(self.training_buffer.update_buffer['actions']) // n_sequences):\n start = l * n_sequences\n end = (l + 1) * n_sequences\n run_out = self.policy.update(buffer.make_mini_batch(start, end), n_sequences)\n value_total.append(run_out['value_loss'])\n policy_total.append(np.abs(run_out['policy_loss']))\n if self.use_curiosity:\n inverse_total.append(run_out['inverse_loss'])\n forward_total.append(run_out['forward_loss'])\n self.stats['value_loss'].append(np.mean(value_total))\n self.stats['policy_loss'].append(np.mean(policy_total))\n if self.use_curiosity:\n self.stats['forward_loss'].append(np.mean(forward_total))\n self.stats['inverse_loss'].append(np.mean(inverse_total))\n self.training_buffer.reset_update_buffer()\n\n\ndef discount_rewards(r, gamma=0.99, value_next=0.0):\n \"\"\"\n Computes discounted sum of future rewards for use in updating value estimate.\n :param r: List of rewards.\n :param gamma: Discount factor.\n :param value_next: T+1 value estimate for returns calculation.\n :return: discounted sum of future rewards as list.\n \"\"\"\n discounted_r = np.zeros_like(r)\n running_add = value_next\n for t in reversed(range(0, r.size)):\n running_add = running_add * gamma + r[t]\n discounted_r[t] = running_add\n return discounted_r\n\n\ndef get_gae(rewards, value_estimates, value_next=0.0, gamma=0.99, lambd=0.95):\n \"\"\"\n Computes generalized advantage estimate for use in updating policy.\n :param rewards: list of rewards for time-steps t to T.\n :param value_next: Value estimate for time-step T+1.\n :param value_estimates: list of value estimates for time-steps t to T.\n :param gamma: Discount factor.\n :param lambd: GAE weighing factor.\n :return: list of advantage estimates for time-steps t to T.\n \"\"\"\n value_estimates = np.asarray(value_estimates.tolist() + [value_next])\n delta_t = rewards + gamma * value_estimates[1:] - value_estimates[:-1]\n advantage = discount_rewards(r=delta_t, gamma=gamma * lambd)\n return advantage\n" ]
[ [ "tensorflow.summary.FileWriter", "numpy.abs", "numpy.zeros_like", "numpy.mean", "numpy.vstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
AideenByrne/python-project
[ "90100295360db810d49cda3f0377fcca3866e33a" ]
[ "project1.py" ]
[ "\n#Aideen Byrne 26th March 2018 \n#Code for investigating Iris Data Set for Programming & Scripting module project \n\n#Select only the rows of the Virginica flowers and assign it to virginica \nimport pandas as pd #import pandas library \ndf1 = pd.read_csv(\"data/iris.csv\") #label contents of iris.csv file as dataframe\nmy_columns = [\"Sepal Length\", \"Sepal Width\", \"Petal Length\", \"Petal Width\", \"Species\"] #borrowed from Jeff Tratner at https://stackoverflow.com/questions/17018638/assigning-column-names-from-a-list-to-a-table\ndf1.columns = my_columns\nspecies = df1[[\"Species\"]] #to select column named Species\nvirginica = df1.loc[(df1[\"Species\"] == \"Iris-virginica\")] #select rows that contain virginica flowers in species column\nprint (virginica)#prints only rows that contain virginica flowers in species column\n\n#Select only the Sepal Length of the Virginica flowers and assign it \nvsepallength = virginica[\"Sepal Length\"]\nprint (vsepallength)\n\n#Calculate the mean, median, variance and standard deviation of the Virginica Sepal Length\nprint (\"The mean of Virginica Sepal Length is\", vsepallength.mean())\nprint (\"The median of Virginica Sepal Length is\", vsepallength.median())\nprint (\"The variance of Virginica Sepal Length is\", vsepallength.var())\nprint (\"The standard deviation of Virginica Sepal Length is\", vsepallength.std())\n\n#Select only the numerical columns\nselectnumcol = df1[[\"Sepal Length\", \"Sepal Width\", \"Petal Length\", \"Petal Width\"]]\nprint (selectnumcol)\n\n#Calculate the mean of all the numerical variables\nprint (\"The mean per numerical column is\", selectnumcol.mean())\nprint (\"The mean of all numerical columns is\", selectnumcol.mean().mean())\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
andreaconti/torch_k
[ "a5bf09b22d3bef9092d7313dda529af83da15dc6" ]
[ "integration_tests/test_transforms_torchvision.py" ]
[ "\"\"\"\ntesting integration between torchvision and transformations\n\"\"\"\n\nimport torch\nimport torchvision.transforms as V\n\nimport torch_kitti.transforms as K\n\n\ndef test_random_crop():\n\n # simulate input\n fake_img_1 = torch.randn(1, 600, 600)\n fake_img_2 = fake_img_1.clone()\n x = {\"img_left\": fake_img_1, \"img_right\": fake_img_2}\n\n output = K.functional.apply_to_features(V.RandomCrop([200, 200]), x)\n assert torch.all(output[\"img_left\"] == output[\"img_right\"])\n" ]
[ [ "torch.all", "torch.randn" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pierreandrieu/corankcolight
[ "dce9d05c62f25faae5f73f150f44cc20bfa87b33", "dce9d05c62f25faae5f73f150f44cc20bfa87b33" ]
[ "corankco/experiments/stats/bootstrap_experiment.py", "corankco/partitioning/parfront.py" ]
[ "from corankco.algorithms.median_ranking import MedianRanking\nfrom corankco.dataset import DatasetSelector, Dataset\nfrom corankco.scoringscheme import ScoringScheme\nfrom corankco.experiments.experiment import ExperimentFromDataset, ExperimentFromOrphanetDataset\nfrom corankco.experiments.stats.bootstrap import bootstrap_dataset\nfrom numpy import asarray, std, mean, quantile, fromstring, round, isnan\nfrom corankco.utils import create_dir\nfrom random import shuffle\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nfrom corankco.algorithms.algorithmChoice import get_algorithm, Algorithm\n# from corankco.rankings generation.rankings generate import create_rankings\n\n\nclass BootstrapExperiment(ExperimentFromDataset):\n\n def __init__(self,\n dataset_folder: str,\n algo: MedianRanking,\n scoring_scheme: ScoringScheme,\n nb_bootstrap: int = 1000,\n dataset_selector: DatasetSelector = None,\n ):\n super().__init__(dataset_folder=dataset_folder, dataset_selector=dataset_selector)\n self._algo = algo\n self._scoring_cheme = scoring_scheme\n self._nb_bootstrap = nb_bootstrap\n self.__path_hist = \"/home/pierre/Bureau/bootstrap_converge/hist\"\n self.__path_ic = \"/home/pierre/Bureau/bootstrap_converge/ic\"\n self.__rate_presence_min = 0.\n self.__ic_rate = 0.05\n\n def _get_algo(self) -> MedianRanking:\n return self._algo\n\n def _get_scoring_scheme(self) -> ScoringScheme:\n return self._scoring_cheme\n\n def _get_nb_bootstrap(self) -> int:\n return self._nb_bootstrap\n\n algo = property(_get_algo)\n scoring_scheme = property(_get_scoring_scheme)\n nb_bootstrap = property(_get_nb_bootstrap)\n\n def _run_raw_data(self) -> str:\n res = \"\"\n position_consensus = {}\n h_max_copeland_score = {}\n create_dir(self.__path_hist)\n create_dir(self.__path_ic)\n\n for dataset in self.datasets:\n max_score = 0\n h_dataset = {}\n h_dataset_victories = {}\n dataset.remove_empty_rankings()\n if self.__rate_presence_min > 0.:\n dataset.remove_elements_rate_presence_lower_than(self.__rate_presence_min)\n\n dataset.remove_empty_rankings()\n create_dir(self.__path_hist + \"/\" + dataset.name.replace(\" \", \"_\").replace(\".txt\", \"\")\n + \"_nb_elements=\" + str(dataset.nb_elements) + \"_nb_rankings=\" + str(dataset.nb_rankings))\n print(dataset.name + \" \" + str(dataset.nb_elements) + \" \" + str(dataset.nb_rankings))\n consensus_initial = self.algo.compute_consensus_rankings(dataset, self.scoring_scheme, True)\n consensus_ranking = consensus_initial.consensus_rankings[0]\n position_element = 1\n for bucket_i in consensus_ranking:\n for element in bucket_i:\n position_consensus[element] = position_element\n position_element += len(bucket_i)\n score_copeland_elements = consensus_initial.copeland_scores\n victories_copeland_elements = consensus_initial.copeland_victories\n\n for gene in dataset.elements:\n score_gene = score_copeland_elements[gene]\n victories_gene = victories_copeland_elements[gene]\n h_dataset[gene] = [score_gene]\n h_dataset_victories[gene] = {}\n h_dataset_victories[gene][\"initial\"] = victories_gene\n h_dataset_victories[gene][\"bootstrap_victories\"] = []\n h_dataset_victories[gene][\"bootstrap_equalities\"] = []\n h_dataset_victories[gene][\"bootstrap_defeats\"] = []\n\n if score_gene > max_score:\n max_score = score_gene\n\n for i in range(self._nb_bootstrap):\n if (i + 1) % (self._nb_bootstrap / 10) == 0:\n print(i + 1)\n dataset_bootstrap = bootstrap_dataset(dataset)\n lost_elements = dataset.nb_elements - dataset_bootstrap.nb_elements\n consensus = self._algo.compute_consensus_rankings(dataset_bootstrap, self._scoring_cheme, True)\n cop_scores = consensus.copeland_scores\n cop_victories = consensus.copeland_victories\n for element in dataset.elements:\n if element in cop_scores:\n h_dataset[element].append(cop_scores[element] + lost_elements)\n victories_equalities_defeat = cop_victories[element]\n h_dataset_victories[element][\"bootstrap_victories\"].append(victories_equalities_defeat[0])\n h_dataset_victories[element][\"bootstrap_equalities\"].append(victories_equalities_defeat[1])\n h_dataset_victories[element][\"bootstrap_defeats\"].append(victories_equalities_defeat[2])\n\n # h_dataset[gene].append(cop_scores[gene])\n\n if cop_scores[element] > max_score:\n max_score = cop_scores[element]\n else:\n h_dataset[element].append((lost_elements - 1) * 0.5)\n # h_dataset[gene].append(nan)\n\n for element in dataset.elements:\n array_scores = asarray(h_dataset[element])\n array_scores_without_nan = array_scores[~isnan(array_scores)]\n h_dataset[element] = list(array_scores_without_nan)\n\n h_max_copeland_score[dataset.name] = max_score\n h_elements_mean_score = {}\n for element in dataset.elements:\n h_elements_mean_score[element] = mean(asarray(h_dataset[element][1:]))\n for element, value in sorted(h_elements_mean_score.items(), key=lambda item: item[1]):\n res += dataset.name + \";\" \\\n + str(dataset.nb_elements) + \";\" \\\n + str(dataset.nb_rankings) + \";\" \\\n + str(element) + \";1;\" \\\n + str(h_dataset[element][0]) + \";\" \\\n + str(h_max_copeland_score[dataset.name]) + \";\" \\\n + str(mean(asarray(h_dataset_victories[element][\"bootstrap_victories\"]))) + \";\" \\\n + str(mean(asarray(h_dataset_victories[element][\"bootstrap_equalities\"]))) + \";\" \\\n + str(mean(asarray(h_dataset_victories[element][\"bootstrap_defeats\"]))) + \";\" \\\n + str(position_consensus[element]) + \";\" \\\n + str(h_dataset[element][1:])[1:-1] + \"\\n\"\n print(res)\n return res\n\n def _run_final_data(self, raw_data: str) -> str:\n ic_rate = self.__ic_rate\n h_rank_genes = {}\n h_ic_genes = {}\n\n for line in raw_data.split(\"\\n\"):\n if len(line) > 1:\n cols = line.split(\";\")\n disease = cols[0]\n nb_elements = cols[1]\n nb_rankings = cols[2]\n id_gene = cols[3]\n score_cop_initial = cols[5]\n max_copeland_dataset = cols[6]\n victories = cols[7]\n equalities = cols[8]\n defeats = cols[9]\n rank_gene_consensus = cols[10]\n scores_bootstrap = cols[11]\n\n path_output = self.__path_hist + \"/\" + cols[0].replace(\" \", \"_\") + \"_nb_elements=\" \\\n + str(nb_elements) + \"_nb_rankings=\" + str(nb_rankings) + \"/\"\n\n col = \"blue\"\n\n score_bootstrap = fromstring(scores_bootstrap, sep=\",\")\n score_initial = float(score_cop_initial)\n scores_boot_centered = score_bootstrap - score_initial\n quantiles_ic = quantile(scores_boot_centered, [ic_rate/2, 1-ic_rate/2])\n ic_gene = score_initial - quantiles_ic[1], score_initial - quantiles_ic[0]\n if disease not in h_rank_genes:\n h_rank_genes[disease] = {}\n h_ic_genes[disease] = {}\n h_rank_genes[disease][id_gene] = int(rank_gene_consensus)\n h_ic_genes[disease][id_gene] = ic_gene\n\n plt.hist(score_bootstrap, color=col)\n plt.xlim(xmin=0, xmax=round(int(float(max_copeland_dataset))) + 1)\n plt.axvline(float(score_cop_initial), color=\"green\")\n plt.xlabel(\"Cop score for elem \" + str(int(id_gene)))\n plt.title(\"quartiles:\" + str(quantile(score_bootstrap, [0.25, 0.5, 0.75])) + \";mean=\" + str(\n round(mean(score_bootstrap), 2)) + \";std=\" + str(round(std(score_bootstrap), 2))\n +\"\\nvict-eq-def=\" + str(victories) + \" \"+ str(equalities) + \" \" + str(defeats))\n plt.savefig(fname=path_output + '{0:03}'.format(int(rank_gene_consensus)) +\"_elem_\" + str(int(id_gene)), format=\"png\")\n plt.clf()\n\n data_dict = {}\n for disease in h_rank_genes.keys():\n h_disease_ranks = sorted(h_rank_genes[disease].items(), key=lambda item: item[1])[:25]\n print(h_disease_ranks)\n h_disease_ic = h_ic_genes[disease]\n data_dict.clear()\n data_dict['category'] = []\n data_dict['lower'] = []\n data_dict['upper'] = []\n min_x = 0\n max_x = 0\n for gene_with_pos in h_disease_ranks:\n gene = gene_with_pos[0]\n data_dict['category'].append(gene)\n data_dict['lower'].append(h_disease_ic[gene][0])\n data_dict['upper'].append(h_disease_ic[gene][1])\n if h_disease_ic[gene][0] < min_x:\n min_x = h_disease_ic[gene][0]\n if h_disease_ic[gene][1] > max_x:\n max_x = h_disease_ic[gene][1]\n\n dataset = pd.DataFrame(data_dict)\n\n for lower, upper, y in zip(dataset['lower'], dataset['upper'], range(len(dataset))):\n plt.xlim(min_x, max_x)\n\n plt.plot((lower, upper), (y, y), 'ro-')\n path_output = self.__path_ic + \"/\" + disease.replace(\" \", \"_\")\n plt.yticks(range(len(dataset)), list(dataset['category']))\n plt.savefig(fname=path_output, format=\"png\")\n plt.clf()\n\n return \"\"\n\n\nclass BootstrapExperimentBiologicalIC(BootstrapExperiment, ExperimentFromOrphanetDataset):\n def __init__(self,\n dataset_folder: str,\n algo: MedianRanking,\n scoring_scheme: ScoringScheme,\n nb_bootstrap: int = 1000,\n dataset_selector: DatasetSelector = None,\n ic_rate: float = 0.05,\n rate_presence_minimal: float = 0.\n ):\n super().__init__(dataset_folder, algo, scoring_scheme, nb_bootstrap, dataset_selector)\n super()._remove_datasets_empty_goldstandard()\n self.__ic_rate = ic_rate\n self.__rate_presence_min = rate_presence_minimal\n self.__path_hist = \"/home/pierre/Bureau/expe_bootstrap_bio/ic=\" \\\n + str(self.__ic_rate) + \"_pres=\" + str(self.__rate_presence_min) + \"_hist\"\n self.__path_ic = \"/home/pierre/Bureau/expe_bootstrap_bio/ic=\" \\\n + str(self.__ic_rate) + \"_pres=\" + str(self.__rate_presence_min) + \"_ic\"\n\n def _run_raw_data(self) -> str:\n res = \"\"\n position_consensus = {}\n h_max_copeland_score = {}\n create_dir(self.__path_hist)\n create_dir(self.__path_ic)\n\n for dataset in self.datasets:\n max_score = 0\n\n h_dataset = {}\n h_dataset_victories = {}\n\n dataset.remove_empty_rankings()\n if self.__rate_presence_min > 0.:\n dataset.remove_elements_rate_presence_lower_than(self.__rate_presence_min)\n\n dataset.remove_empty_rankings()\n create_dir(self.__path_hist + \"/\" + dataset.name.replace(\" \", \"_\") + \"_nb_genes=\" + str(\n dataset.nb_elements) + \"_nb_rankings=\" + str(dataset.nb_rankings))\n print(dataset.name + \" \" + str(dataset.nb_elements) + \" \" + str(dataset.nb_rankings))\n\n consensus_initial = self.algo.compute_consensus_rankings(dataset, self.scoring_scheme, True)\n consensus_ranking = consensus_initial.consensus_rankings[0]\n position_element = 1\n for bucket_i in consensus_ranking:\n for element in bucket_i:\n position_consensus[element] = position_element\n position_element += len(bucket_i)\n\n score_copeland_elements = consensus_initial.copeland_scores\n victories_gene = consensus_initial.copeland_victories\n\n for gene in dataset.elements:\n score_gene = score_copeland_elements[gene]\n h_dataset[gene] = [score_gene]\n\n h_dataset_victories[gene] = {}\n h_dataset_victories[gene][\"initial\"] = victories_gene\n h_dataset_victories[gene][\"bootstrap_victories\"] = []\n h_dataset_victories[gene][\"bootstrap_equalities\"] = []\n h_dataset_victories[gene][\"bootstrap_defeats\"] = []\n\n if score_gene > max_score:\n max_score = score_gene\n\n for i in range(self._nb_bootstrap):\n if (i + 1) % (self._nb_bootstrap / 10) == 0:\n print(i + 1)\n dataset_bootstrap = bootstrap_dataset(dataset)\n lost_elements = dataset.nb_elements - dataset_bootstrap.nb_elements\n consensus = self._algo.compute_consensus_rankings(dataset_bootstrap, self._scoring_cheme, True)\n cop_scores = consensus.copeland_scores\n cop_victories = consensus.copeland_victories\n for gene in dataset.elements:\n if gene in cop_scores:\n h_dataset[gene].append(cop_scores[gene] + lost_elements)\n # h_dataset[gene].append(cop_scores[gene])\n victories_equalities_defeat = cop_victories[gene]\n h_dataset_victories[gene][\"bootstrap_victories\"].append(victories_equalities_defeat[0]+lost_elements)\n h_dataset_victories[gene][\"bootstrap_equalities\"].append(victories_equalities_defeat[1])\n h_dataset_victories[gene][\"bootstrap_defeats\"].append(victories_equalities_defeat[2])\n\n if cop_scores[gene] > max_score:\n max_score = cop_scores[gene]\n else:\n h_dataset[gene].append((lost_elements - 1) * 0.5)\n # h_dataset[gene].append(nan)\n h_dataset_victories[gene][\"bootstrap_victories\"].append(0)\n h_dataset_victories[gene][\"bootstrap_equalities\"].append(lost_elements - 1)\n h_dataset_victories[gene][\"bootstrap_defeats\"].append(dataset_bootstrap.nb_elements)\n\n for gene in dataset.elements:\n array_scores = asarray(h_dataset[gene])\n array_scores_without_nan = array_scores[~isnan(array_scores)]\n h_dataset[gene] = list(array_scores_without_nan)\n\n h_max_copeland_score[dataset.name] = max_score\n h_elements_mean_score = {}\n for gene in dataset.elements:\n h_elements_mean_score[gene] = mean(asarray(h_dataset[gene][1:]))\n for gene, value in sorted(h_elements_mean_score.items(), key=lambda item: item[1]):\n if gene in self.datasets_gs[dataset.name]:\n res += dataset.name + \";\" \\\n + str(dataset.nb_elements) + \";\" \\\n + str(dataset.nb_rankings) + \";\" \\\n + str(gene) + \";1;\" \\\n + str(h_dataset[gene][0]) + \";\" \\\n + str(h_max_copeland_score[dataset.name]) + \";\" \\\n + str(mean(asarray(h_dataset_victories[gene][\"bootstrap_victories\"]))) + \";\" \\\n + str(mean(asarray(h_dataset_victories[gene][\"bootstrap_equalities\"]))) + \";\" \\\n + str(mean(asarray(h_dataset_victories[gene][\"bootstrap_defeats\"]))) + \";\" \\\n + str(position_consensus[gene]) + \";\" \\\n + str(h_dataset[gene][1:])[1:-1] + \"\\n\"\n else:\n res += dataset.name + \";\" \\\n + str(dataset.nb_elements) + \";\" \\\n + str(dataset.nb_rankings) + \";\" \\\n + str(gene) + \";0;\" \\\n + str(h_dataset[gene][0]) + \";\" \\\n + str(h_max_copeland_score[dataset.name]) + \";\" \\\n + str(mean(asarray(h_dataset_victories[gene][\"bootstrap_victories\"]))) + \";\" \\\n + str(mean(asarray(h_dataset_victories[gene][\"bootstrap_equalities\"]))) + \";\" \\\n + str(mean(asarray(h_dataset_victories[gene][\"bootstrap_defeats\"]))) + \";\" \\\n + str(position_consensus[gene]) + \";\" \\\n + str(h_dataset[gene][1:])[1:-1] + \"\\n\"\n return res\n\n def _run_final_data(self, raw_data: str) -> str:\n ic_rate = self.__ic_rate\n h_rank_genes = {}\n h_ic_genes = {}\n\n for line in raw_data.split(\"\\n\"):\n if len(line) > 1:\n cols = line.split(\";\")\n disease = cols[0]\n nb_elements = cols[1]\n nb_rankings = cols[2]\n id_gene = cols[3]\n score_cop_initial = cols[5]\n max_copeland_dataset = cols[6]\n victories = cols[7]\n equalities = cols[8]\n defeats = cols[9]\n rank_gene_consensus = cols[10]\n scores_bootstrap = cols[11]\n\n path_output = self.__path_hist + \"/\" + cols[0].replace(\" \", \"_\") + \"_nb_genes=\" \\\n + str(nb_elements) + \"_nb_rankings=\" + str(nb_rankings) + \"/\"\n\n col = \"blue\"\n\n score_bootstrap = fromstring(scores_bootstrap, sep=\",\")\n score_initial = float(score_cop_initial)\n scores_boot_centered = score_bootstrap - score_initial\n quantiles_ic = quantile(scores_boot_centered, [ic_rate/2, 1-ic_rate/2])\n ic_gene = score_initial - quantiles_ic[1], score_initial - quantiles_ic[0]\n if disease not in h_rank_genes:\n h_rank_genes[disease] = {}\n h_ic_genes[disease] = {}\n h_rank_genes[disease][id_gene] = int(rank_gene_consensus)\n h_ic_genes[disease][id_gene] = ic_gene\n\n plt.hist(score_bootstrap, color=col)\n plt.xlim(xmin=0, xmax=round(int(float(max_copeland_dataset))) + 1)\n plt.axvline(float(score_cop_initial), color=\"green\")\n plt.xlabel(\"Cop score for gene \" + str(int(id_gene)))\n plt.title(\"quartiles:\" + str(quantile(score_bootstrap, [0.25, 0.5, 0.75])) + \";mean=\" + str(\n round(mean(score_bootstrap), 2)) + \";std=\" + str(round(std(score_bootstrap), 2))\n +\"\\nvict-eq-def=\" + str(victories) + \" \"+ str(equalities) + \" \" + str(defeats))\n plt.savefig(fname=path_output + '{0:03}'.format(int(rank_gene_consensus)) +\"_elem_\" + str(int(id_gene)), format=\"png\")\n plt.clf()\n\n data_dict = {}\n for disease in h_rank_genes.keys():\n h_disease_ranks = sorted(h_rank_genes[disease].items(), key=lambda item: item[1])\n h_disease_ic = h_ic_genes[disease]\n data_dict.clear()\n data_dict['category'] = []\n data_dict['lower'] = []\n data_dict['upper'] = []\n for gene_with_pos in h_disease_ranks:\n gene = gene_with_pos[0]\n data_dict['category'].append(gene)\n data_dict['lower'].append(h_disease_ic[gene][0])\n data_dict['upper'].append(h_disease_ic[gene][1])\n\n dataset = pd.DataFrame(data_dict)\n\n for lower, upper, y in zip(dataset['lower'], dataset['upper'], range(len(dataset))):\n plt.plot((lower, upper), (y, y), 'ro-')\n path_output = self.__path_ic + \"/\" + disease.replace(\" \", \"_\")\n plt.yticks(range(len(dataset)), list(dataset['category']))\n plt.savefig(fname=path_output, format=\"png\")\n plt.clf()\n\n return \"\"\n\n\nclass EvolutionNbRankings(ExperimentFromDataset):\n\n def __init__(self,\n dataset_folder: str,\n algo: MedianRanking,\n scoring_scheme: ScoringScheme,\n dataset_selector: DatasetSelector = None,\n ):\n super().__init__(dataset_folder=dataset_folder, dataset_selector=dataset_selector)\n self._algo = algo\n self._scoring_cheme = scoring_scheme\n\n\n def _run_final_data(self, raw_data: str) -> str:\n return raw_data\n\n def _run_raw_data(self) -> str:\n to_test = list(range(10, 100, 10))\n to_test.extend(list(range(100, 1001, 100)))\n res = \"\"\n for dataset in self.datasets:\n print(dataset.name)\n h_gene_list_scores = {}\n for element in dataset.elements:\n h_gene_list_scores[element] = []\n shuffle(dataset.rankings)\n for i in to_test:\n dataset_new = Dataset(dataset.rankings[0:i])\n dataset_new.name = dataset.name\n consensus = self._algo.compute_consensus_rankings(dataset_new, self._scoring_cheme, True)\n copeland_scores = consensus.copeland_scores\n for element in dataset_new.elements:\n cop_score_element = copeland_scores.get(element)\n h_gene_list_scores[element].append(cop_score_element)\n for element in dataset.elements:\n res += dataset.name + \";\" + str(element) + \";\" + str(h_gene_list_scores[element]) + \"\\n\"\n return res\n\nalgor = get_algorithm(Algorithm.CopelandMethod)\nscoring_scheme_exp = ScoringScheme.get_pseudodistance_scoring_scheme_p(1.)\n\n\"\"\"\nrates_presence_min = [0.2]\nic_rates = [0.05]\n\nfor rate_presence_minimal in rates_presence_min:\n for ic_rate in ic_rates:\n print(ic_rate)\n print(rate_presence_minimal)\n b = BootstrapExperimentBiologicalIC(dataset_folder=\"/home/pierre/Bureau/vldb_data/datasets/biological_dataset\",\n algo=algor,\n scoring_scheme=scoring_scheme_exp,\n nb_bootstrap=10000,\n dataset_selector=DatasetSelector(\n nb_rankings_min=20, nb_elem_min=200, nb_elem_max=219),\n rate_presence_minimal=rate_presence_minimal,\n ic_rate=ic_rate)\n\n\n b.run(display_all=True, figures=True)\n\n\nseed(1)\nrdm.seed(1)\n\nrepeat = 1\nnb_steps = [0, 50, 100, 150, 200, 250, 300, 600, 900, 1200, 1500, 3000, 6000, 9000, 12000, 15000, 18000, 21000, 30000, 40000, 50000, 60000]\nnb_elem = 50\nnb_rankings_to_generate = 10\n \nfor step in nb_steps:\n for i in range(repeat):\n\n new_rankings = create_rankings(nb_elements=nb_elem, nb_rankings=nb_rankings, steps=step, complete=True)\n f = open(\"/home/pierre/Bureau/datasets_bootstrap_permutations/\"+\"n=\" + str(nb_elem) + \"_m=\" + str(nb_rankings) \n + \"_s=\" + str(step) +\"_\" + '{0:03}'.format(i), \"w\")\n for ranking_new in new_rankings:\n f.write(str(ranking_new))\n f.write(\"\\n\")\n f.close()\n\n\n\nfrom corankco.dataset import Dataset, EmptyDatasetException\ne = ExperimentFromDataset(\"/home/pierre/Bureau/vldb_data/datasets/biological_dataset\")\nfor d in e.datasets:\n print(d)\n changes = True\n d_bis = Dataset(d.rankings)\n d_bis.remove_empty_rankings()\n print(d_bis)\n str_save = str(d_bis)\n try:\n while changes:\n print(\"encore un tour\")\n d_bis.remove_elements_rate_presence_lower_than(0.333)\n print(d_bis)\n rankings_dataset = d_bis.rankings\n rankings_copy = []\n for ranking_dataset in rankings_dataset:\n s = set()\n for bucket_dataset in ranking_dataset:\n s.update(bucket_dataset)\n if len(s) >= 10:\n rankings_copy.append(ranking_dataset)\n\n d_copy = Dataset(rankings_copy)\n print(\"copie\")\n print(d_copy)\n if str(d_copy) == str_save:\n changes = False\n else:\n str_save = str(d_copy)\n d_bis = d_copy\n except EmptyDatasetException:\n continue\n\n if d_bis.nb_rankings >= 10:\n d_bis.write(\"/home/pierre/Bureau/data_converge/\" + d.name)\n\n\"\"\"\nfor i in range(60, 150):\n b = BootstrapExperiment(\n dataset_folder=\"/home/pierre/Bureau/data_converge\",\n algo=algor,\n scoring_scheme=scoring_scheme_exp,\n nb_bootstrap=1000,\n dataset_selector=DatasetSelector(nb_elem_min=i, nb_elem_max=i),\n )\n b.run()\n\n\"\"\"\"\nb = EvolutionNbRankings(\n dataset_folder=\"/home/pierre/Bureau/datasets_bootstrap_permutations\",\n algo=algor,\n scoring_scheme=scoring_scheme_exp,\n )\n\nb.run(display_all=True) \"\"\"", "from corankco.dataset import Dataset\nfrom corankco.scoringscheme import ScoringScheme\nfrom corankco.partitioning.orderedPartition import OrderedPartition\nfrom typing import Tuple, Set\nfrom numpy import vdot, ndarray, count_nonzero, shape, array, zeros, asarray\nfrom igraph import Graph\n\n\nclass ParFront:\n\n def __init__(self):\n pass\n\n def compute_frontiers(\n self,\n dataset: Dataset,\n scoring_scheme: ScoringScheme\n ) -> OrderedPartition:\n \"\"\"\n :param dataset: A dataset containing the rankings to aggregate\n :type dataset: Dataset (class Dataset in package 'datasets')\n :param scoring_scheme: The penalty vectors to consider\n :type scoring_scheme: ScoringScheme (class ScoringScheme in package 'distances')\n :return a list of sets of elements such that any exact consensus respects this partitioning\n \"\"\"\n sc = asarray(scoring_scheme.penalty_vectors)\n rankings = dataset.rankings\n res = []\n elem_id = {}\n id_elements = {}\n id_elem = 0\n for ranking in rankings:\n for bucket in ranking:\n for element in bucket:\n if element not in elem_id:\n elem_id[element] = id_elem\n id_elements[id_elem] = element\n id_elem += 1\n\n positions = dataset.get_positions(elem_id)\n gr1, mat_score, robust_arcs = self.__graph_of_elements(positions, sc)\n sccs = gr1.components()\n partition = []\n for scc in sccs:\n partition.append(set(scc))\n i = 0\n while i < len(partition) - 1:\n set1 = partition[i]\n set2 = partition[i+1]\n fusion = False\n for x in set1:\n for y in set2:\n if (x, y) not in robust_arcs:\n fusion = True\n break\n if fusion:\n break\n if fusion:\n for x in set2:\n set1.add(x)\n partition.pop(i+1)\n i = max(i-1, 1)\n else:\n i += 1\n\n res = []\n for group in partition:\n g = set()\n res.append(g)\n for elem in group:\n g.add(id_elements[elem])\n\n return OrderedPartition(res)\n\n @staticmethod\n def __graph_of_elements(positions: ndarray, matrix_scoring_scheme: ndarray) -> Tuple[Graph, ndarray, Set[Tuple]]:\n graph_of_elements = Graph(directed=True)\n robust_arcs = set()\n cost_before = matrix_scoring_scheme[0]\n cost_tied = matrix_scoring_scheme[1]\n cost_after = array([cost_before[1], cost_before[0], cost_before[2], cost_before[4], cost_before[3],\n cost_before[5]])\n n = shape(positions)[0]\n m = shape(positions)[1]\n for i in range(n):\n graph_of_elements.add_vertex(name=str(i))\n\n matrix = zeros((n, n, 3))\n edges = []\n for e1 in range(n):\n mem = positions[e1]\n d = count_nonzero(mem == -1)\n for e2 in range(e1 + 1, n):\n a = count_nonzero(mem + positions[e2] == -2)\n b = count_nonzero(mem == positions[e2])\n c = count_nonzero(positions[e2] == -1)\n e = count_nonzero(mem < positions[e2])\n relative_positions = array([e - d + a, m - e - b - c + a, b - a, c - a, d - a, a])\n put_before = vdot(relative_positions, cost_before)\n put_after = vdot(relative_positions, cost_after)\n put_tied = vdot(relative_positions, cost_tied)\n if put_before > put_after or put_before > put_tied:\n edges.append((e2, e1))\n if put_after > put_before or put_after > put_tied:\n edges.append((e1, e2))\n if put_before < put_after and put_before < put_tied:\n robust_arcs.add((e1, e2))\n if put_after < put_before and put_after < put_tied:\n robust_arcs.add((e2, e1))\n matrix[e1][e2] = [put_before, put_after, put_tied]\n matrix[e2][e1] = [put_after, put_before, put_tied]\n graph_of_elements.add_edges(edges)\n return graph_of_elements, matrix, robust_arcs\n" ]
[ [ "numpy.asarray", "numpy.isnan", "numpy.quantile", "matplotlib.pyplot.savefig", "pandas.DataFrame", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlim", "matplotlib.pyplot.clf", "numpy.fromstring", "numpy.std", "numpy.mean", "matplotlib.pyplot.hist" ], [ "numpy.asarray", "numpy.shape", "numpy.count_nonzero", "numpy.array", "numpy.zeros", "numpy.vdot" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
QUVA-Lab/lang-tracker
[ "6cb3630471765565b6f2d34a160f0cd51d95a082" ]
[ "util/eval_tools.py" ]
[ "from __future__ import absolute_import, division, print_function\n\nimport numpy as np\nimport pyximport; pyximport.install()\nfrom util.nms import cpu_nms as nms\n\n# all boxes are [xmin, ymin, xmax, ymax] format, 0-indexed, including xmax and ymax\ndef compute_bbox_iou(bboxes, target):\n if isinstance(bboxes, list):\n bboxes = np.array(bboxes)\n bboxes = bboxes.reshape((-1, 4))\n\n if isinstance(target, list):\n target = np.array(target)\n target = target.reshape((-1, 4))\n\n A_bboxes = (bboxes[..., 2]-bboxes[..., 0]+1) * (bboxes[..., 3]-bboxes[..., 1]+1)\n A_target = (target[..., 2]-target[..., 0]+1) * (target[..., 3]-target[..., 1]+1)\n assert(np.all(A_bboxes >= 0))\n assert(np.all(A_target >= 0))\n I_x1 = np.maximum(bboxes[..., 0], target[..., 0])\n I_y1 = np.maximum(bboxes[..., 1], target[..., 1])\n I_x2 = np.minimum(bboxes[..., 2], target[..., 2])\n I_y2 = np.minimum(bboxes[..., 3], target[..., 3])\n A_I = np.maximum(I_x2 - I_x1 + 1, 0) * np.maximum(I_y2 - I_y1 + 1, 0)\n IoUs = A_I / (A_bboxes + A_target - A_I)\n assert(np.all(0 <= IoUs) and np.all(IoUs <= 1))\n return IoUs\n\n# # all boxes are [num, height, width] binary array\ndef compute_mask_IU(masks, target):\n assert(target.shape[-2:] == masks.shape[-2:])\n I = np.sum(np.logical_and(masks, target))\n U = np.sum(np.logical_or(masks, target))\n return I, U\n\ndef compute_bbox_max(bbox_file):\n with open(bbox_file) as f:\n for line in f:\n items = [int(x) for x in line.strip().split()]\n\n box1 = np.array(items[0::4]).T\n box2 = np.array(items[1::4]).T\n box3 = np.array(items[2::4]).T\n box4 = np.array(items[3::4]).T\n bboxes = np.array([box1, box2, box1+box3-1, box2+box4-1]).T\n\n col1 = np.min(np.array([bboxes[:,0], bboxes[:,2]]), axis=0)\n col2 = np.min(np.array([bboxes[:,1], bboxes[:,3]]), axis=0)\n col3 = np.max(np.array([bboxes[:,0], bboxes[:,2]]), axis=0)\n col4 = np.max(np.array([bboxes[:,1], bboxes[:,3]]), axis=0)\n bboxes = np.array([col1, col2, col3, col4]).T\n\n max_sz = 0\n max_box = bboxes[0, :]\n for i in range(bboxes.shape[0]): # for each bbox\n pred_box = bboxes[i, :]\n box_sz = (pred_box[2] - pred_box[0])*(pred_box[3] - pred_box[1])\n if box_sz > max_sz:\n max_sz = box_sz\n max_box = pred_box\n\n return max_box\n\n" ]
[ [ "numpy.maximum", "numpy.minimum", "numpy.logical_and", "numpy.all", "numpy.logical_or", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
katsugeneration/ml-project-template
[ "fe68c2f3fa6b6e51cc29b340cb2a1aeeca221322" ]
[ "model/utils/losses.py" ]
[ "# Copyright 2020 Katsuya Shimabukuro. All rights reserved.\n# Licensed under the MIT License.\nimport tensorflow as tf\n\n\nclass MaskedSparseCategoricalCrossentropy():\n \"\"\"SparseCategoricalCrossentropy without padding mask.\"\"\"\n\n def __call__(self, label, pred, **kwargs):\n \"\"\"Calculate loss.\n\n Args:\n label (tf.Tensor): sequence label with shape (B, Seq).\n pred (tf.Tensor): sequence label prediction likelihood with shape (B, Seq, Token) in [0, 1].\n\n Return:\n loss (tf.Tensor): mean loss float value without padding mask.\n\n \"\"\"\n mask = tf.math.logical_not(tf.math.equal(label, 0))\n loss = tf.keras.losses.sparse_categorical_crossentropy(label, pred)\n\n mask = tf.cast(mask, dtype=loss.dtype)\n loss *= mask\n\n return tf.reduce_mean(tf.reduce_sum(loss, axis=1) / tf.reduce_sum(mask, axis=1))\n" ]
[ [ "tensorflow.cast", "tensorflow.math.equal", "tensorflow.keras.losses.sparse_categorical_crossentropy", "tensorflow.reduce_sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
Liyulingyue/Paddle
[ "f3f0824df52b6051ac365268a3a8cd2682c6c8d5" ]
[ "python/paddle/tensor/search.py" ]
[ "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import print_function\nimport numpy as np\nimport paddle\nfrom ..framework import LayerHelper\nfrom ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype\nfrom ..fluid import layers\nfrom ..framework import core, in_dygraph_mode, _non_static_mode\nfrom ..fluid.framework import _in_legacy_dygraph\nfrom paddle.common_ops_import import convert_np_dtype_to_dtype_\nfrom paddle.common_ops_import import Variable\nfrom paddle.common_ops_import import VarDesc\nfrom paddle import _C_ops\nfrom .logic import logical_not\n\n# TODO: define searching & indexing functions of a tensor \n# from ..fluid.layers import has_inf #DEFINE_ALIAS\n# from ..fluid.layers import has_nan #DEFINE_ALIAS\n\n__all__ = []\n\n\ndef argsort(x, axis=-1, descending=False, name=None):\n \"\"\"\n This OP sorts the input along the given axis, and returns the corresponding index tensor for the sorted output values. The default sort algorithm is ascending, if you want the sort algorithm to be descending, you must set the :attr:`descending` as True.\n\n Args:\n x(Tensor): An input N-D Tensor with type float32, float64, int16,\n int32, int64, uint8.\n axis(int, optional): Axis to compute indices along. The effective range\n is [-R, R), where R is Rank(x). when axis<0, it works the same way\n as axis+R. Default is -1.\n descending(bool, optional) : Descending is a flag, if set to true,\n algorithm will sort by descending order, else sort by\n ascending order. Default is false.\n name(str, optional): The default value is None. Normally there is no\n need for user to set this property. For more information, please\n refer to :ref:`api_guide_Name`.\n\n Returns:\n Tensor: sorted indices(with the same shape as ``x``\n and with data type int64).\n\n Examples:\n\n .. code-block:: python\n\n import paddle\n \n x = paddle.to_tensor([[[5,8,9,5],\n [0,0,1,7],\n [6,9,2,4]],\n [[5,2,4,2],\n [4,7,7,9],\n [1,7,0,6]]], \n dtype='float32')\n out1 = paddle.argsort(x, axis=-1)\n out2 = paddle.argsort(x, axis=0)\n out3 = paddle.argsort(x, axis=1)\n \n print(out1)\n #[[[0 3 1 2]\n # [0 1 2 3]\n # [2 3 0 1]]\n # [[1 3 2 0]\n # [0 1 2 3]\n # [2 0 3 1]]]\n \n print(out2)\n #[[[0 1 1 1]\n # [0 0 0 0]\n # [1 1 1 0]]\n # [[1 0 0 0]\n # [1 1 1 1]\n # [0 0 0 1]]]\n \n print(out3)\n #[[[1 1 1 2]\n # [0 0 2 0]\n # [2 2 0 1]]\n # [[2 0 2 0]\n # [1 1 0 2]\n # [0 2 1 1]]]\n \"\"\"\n if in_dygraph_mode():\n _, ids = _C_ops.final_state_argsort(x, axis, descending)\n return ids\n\n if _in_legacy_dygraph():\n _, ids = _C_ops.argsort(x, 'axis', axis, 'descending', descending)\n return ids\n check_variable_and_dtype(\n x, 'x', ['float32', 'float64', 'int16', 'int32', 'int64', 'uint8'],\n 'argsort')\n\n helper = LayerHelper(\"argsort\", **locals())\n out = helper.create_variable_for_type_inference(\n dtype=x.dtype, stop_gradient=True)\n ids = helper.create_variable_for_type_inference(\n VarDesc.VarType.INT64, stop_gradient=True)\n helper.append_op(\n type='argsort',\n inputs={'X': x},\n outputs={'Out': out,\n 'Indices': ids},\n attrs={'axis': axis,\n 'descending': descending})\n return ids\n\n\ndef argmax(x, axis=None, keepdim=False, dtype=\"int64\", name=None):\n \"\"\"\n Computes the indices of the max elements of the input tensor's\n element along the provided axis.\n\n Args:\n x(Tensor): An input N-D Tensor with type float32, float64, int16,\n int32, int64, uint8.\n axis(int, optional): Axis to compute indices along. The effective range\n is [-R, R), where R is x.ndim. when axis < 0, it works the same way\n as axis + R. Default is None, the input `x` will be into the flatten tensor, and selecting the min value index.\n keepdim(bool, optional): Whether to keep the given axis in output. If it is True, the dimensions will be same as input x and with size one in the axis. Otherwise the output dimentions is one fewer than x since the axis is squeezed. Default is False.\n dtype(str|np.dtype, optional): Data type of the output tensor which can\n be int32, int64. The default value is ``int64`` , and it will\n return the int64 indices.\n name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n Tensor, return the tensor of int32 if set :attr:`dtype` is int32, otherwise return the tensor of int64.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n x = paddle.to_tensor([[5,8,9,5],\n [0,0,1,7],\n [6,9,2,4]])\n out1 = paddle.argmax(x)\n print(out1) # 2\n out2 = paddle.argmax(x, axis=0)\n print(out2) \n # [2, 2, 0, 1]\n out3 = paddle.argmax(x, axis=-1)\n print(out3) \n # [2, 3, 1]\n out4 = paddle.argmax(x, axis=0, keepdim=True)\n print(out4)\n # [[2, 2, 0, 1]]\n \"\"\"\n if axis is not None and not isinstance(axis, int):\n raise TypeError(\n \"The type of 'axis' must be int or None in argmax, but received %s.\"\n % (type(axis)))\n\n if dtype is None:\n raise ValueError(\n \"the value of 'dtype' in argmax could not be None, but received None\"\n )\n\n var_dtype = convert_np_dtype_to_dtype_(dtype)\n flatten = False\n if axis is None:\n flatten = True\n axis = 0\n\n if in_dygraph_mode():\n return _C_ops.final_state_argmax(x, axis, keepdim, flatten, var_dtype)\n if _in_legacy_dygraph():\n out = _C_ops.arg_max(x, 'axis', axis, 'dtype', var_dtype, 'keepdims',\n keepdim, 'flatten', flatten)\n return out\n\n helper = LayerHelper(\"argmax\", **locals())\n check_variable_and_dtype(\n x, 'x', ['float32', 'float64', 'int16', 'int32', 'int64', 'uint8'],\n 'paddle.argmax')\n check_dtype(var_dtype, 'dtype', ['int32', 'int64'], 'argmin')\n attrs = {}\n out = helper.create_variable_for_type_inference(var_dtype)\n attrs['keepdims'] = keepdim\n attrs['axis'] = axis\n attrs['flatten'] = flatten\n attrs['dtype'] = var_dtype\n helper.append_op(\n type='arg_max', inputs={'X': x}, outputs={'Out': [out]}, attrs=attrs)\n out.stop_gradient = True\n return out\n\n\ndef argmin(x, axis=None, keepdim=False, dtype=\"int64\", name=None):\n \"\"\"\n Computing the indices of the min elements of the input tensor's\n element along the provided axis.\n\n Args:\n x(Tensor): An input N-D Tensor with type float32, float64, int16,\n int32, int64, uint8.\n axis(int, optional): Axis to compute indices along. The effective range\n is [-R, R), where R is x.ndim. when axis < 0, it works the same way\n as axis + R. Default is None, the input `x` will be into the flatten tensor, and selecting the min value index.\n keepdim(bool, optional): Whether to keep the given axis in output. If it is True, the dimensions will be same as input x and with size one in the axis. Otherwise the output dimentions is one fewer than x since the axis is squeezed. Default is False.\n dtype(str, optional): Data type of the output tensor which can\n be int32, int64. The default value is 'int64', and it will\n return the int64 indices.\n name(str, optional): The default value is None. Normally there is no\n need for user to set this property. For more information, please\n refer to :ref:`api_guide_Name`.\n\n Returns:\n Tensor, return the tensor of `int32` if set :attr:`dtype` is `int32`, otherwise return the tensor of `int64`.\n\n Examples:\n .. code-block:: python\n :name: code-example1\n import paddle\n\n x = paddle.to_tensor([[5,8,9,5],\n [0,0,1,7],\n [6,9,2,4]])\n out1 = paddle.argmin(x)\n print(out1) # 4\n out2 = paddle.argmin(x, axis=0)\n print(out2) \n # [1, 1, 1, 2]\n out3 = paddle.argmin(x, axis=-1)\n print(out3) \n # [0, 0, 2]\n out4 = paddle.argmin(x, axis=0, keepdim=True)\n print(out4)\n # [[1, 1, 1, 2]]\n \"\"\"\n if axis is not None and not isinstance(axis, int):\n raise TypeError(\n \"The type of 'axis' must be int or None in argmin, but received %s.\"\n % (type(axis)))\n\n if dtype is None:\n raise ValueError(\n \"the value of 'dtype' in argmin could not be None, but received None\"\n )\n\n var_dtype = convert_np_dtype_to_dtype_(dtype)\n flatten = False\n if axis is None:\n flatten = True\n axis = 0\n\n if in_dygraph_mode():\n return _C_ops.final_state_argmin(x, axis, keepdim, flatten, var_dtype)\n if _in_legacy_dygraph():\n out = _C_ops.arg_min(x, 'axis', axis, 'dtype', var_dtype, 'keepdims',\n keepdim, 'flatten', flatten)\n return out\n\n helper = LayerHelper(\"argmin\", **locals())\n check_variable_and_dtype(\n x, 'x', ['float32', 'float64', 'int16', 'int32', 'int64', 'uint8'],\n 'paddle.argmin')\n check_dtype(var_dtype, 'dtype', ['int32', 'int64'], 'argmin')\n out = helper.create_variable_for_type_inference(var_dtype)\n attrs = {}\n attrs['keepdims'] = keepdim\n attrs['axis'] = axis\n attrs['flatten'] = flatten\n attrs['dtype'] = var_dtype\n helper.append_op(\n type='arg_min', inputs={'X': x}, outputs={'Out': [out]}, attrs=attrs)\n out.stop_gradient = True\n return out\n\n\ndef index_select(x, index, axis=0, name=None):\n \"\"\"\n\n Returns a new tensor which indexes the ``input`` tensor along dimension ``axis`` using \n the entries in ``index`` which is a Tensor. The returned tensor has the same number \n of dimensions as the original ``x`` tensor. The dim-th dimension has the same \n size as the length of ``index``; other dimensions have the same size as in the ``x`` tensor. \n\n Args:\n x (Tensor): The input Tensor to be operated. The data of ``x`` can be one of float32, float64, int32, int64.\n index (Tensor): The 1-D Tensor containing the indices to index. The data type of ``index`` must be int32 or int64.\n axis (int, optional): The dimension in which we index. Default: if None, the ``axis`` is 0.\n name(str, optional): The default value is None. Normally there is no\n need for user to set this property. For more information, please\n refer to :ref:`api_guide_Name`.\n\n Returns:\n Tensor: A Tensor with same data type as ``x``.\n \n Examples:\n .. code-block:: python\n \n import paddle\n\n x = paddle.to_tensor([[1.0, 2.0, 3.0, 4.0],\n [5.0, 6.0, 7.0, 8.0],\n [9.0, 10.0, 11.0, 12.0]])\n index = paddle.to_tensor([0, 1, 1], dtype='int32')\n out_z1 = paddle.index_select(x=x, index=index)\n #[[1. 2. 3. 4.]\n # [5. 6. 7. 8.]\n # [5. 6. 7. 8.]]\n out_z2 = paddle.index_select(x=x, index=index, axis=1)\n #[[ 1. 2. 2.]\n # [ 5. 6. 6.]\n # [ 9. 10. 10.]]\n \"\"\"\n\n if in_dygraph_mode():\n return _C_ops.final_state_index_select(x, index, axis)\n\n if _in_legacy_dygraph():\n return _C_ops.index_select(x, index, 'dim', axis)\n\n helper = LayerHelper(\"index_select\", **locals())\n check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],\n 'paddle.tensor.search.index_select')\n check_variable_and_dtype(index, 'index', ['int32', 'int64'],\n 'paddle.tensor.search.index_select')\n\n out = helper.create_variable_for_type_inference(x.dtype)\n\n helper.append_op(\n type='index_select',\n inputs={'X': x,\n 'Index': index},\n outputs={'Out': out},\n attrs={'dim': axis})\n return out\n\n\ndef nonzero(x, as_tuple=False):\n \"\"\"\n Return a tensor containing the indices of all non-zero elements of the `input` \n tensor. If as_tuple is True, return a tuple of 1-D tensors, one for each dimension \n in `input`, each containing the indices (in that dimension) of all non-zero elements \n of `input`. Given a n-Dimensional `input` tensor with shape [x_1, x_2, ..., x_n], If \n as_tuple is False, we can get a output tensor with shape [z, n], where `z` is the \n number of all non-zero elements in the `input` tensor. If as_tuple is True, we can get \n a 1-D tensor tuple of length `n`, and the shape of each 1-D tensor is [z, 1].\n\n Args:\n x (Tensor): The input tensor variable.\n as_tuple (bool): Return type, Tensor or tuple of Tensor.\n\n Returns:\n Tensor. The data type is int64.\n\n Examples:\n\n .. code-block:: python\n\n import paddle\n\n x1 = paddle.to_tensor([[1.0, 0.0, 0.0],\n [0.0, 2.0, 0.0],\n [0.0, 0.0, 3.0]])\n x2 = paddle.to_tensor([0.0, 1.0, 0.0, 3.0])\n out_z1 = paddle.nonzero(x1)\n print(out_z1)\n #[[0 0]\n # [1 1]\n # [2 2]]\n out_z1_tuple = paddle.nonzero(x1, as_tuple=True)\n for out in out_z1_tuple:\n print(out)\n #[[0]\n # [1]\n # [2]]\n #[[0]\n # [1]\n # [2]]\n out_z2 = paddle.nonzero(x2)\n print(out_z2)\n #[[1]\n # [3]]\n out_z2_tuple = paddle.nonzero(x2, as_tuple=True)\n for out in out_z2_tuple:\n print(out)\n #[[1]\n # [3]]\n\n \"\"\"\n list_out = []\n shape = x.shape\n rank = len(shape)\n\n if in_dygraph_mode():\n outs = _C_ops.final_state_where_index(x)\n elif paddle.in_dynamic_mode():\n outs = _C_ops.where_index(x)\n else:\n helper = LayerHelper(\"where_index\", **locals())\n\n outs = helper.create_variable_for_type_inference(\n dtype=core.VarDesc.VarType.INT64)\n\n helper.append_op(\n type='where_index',\n inputs={'Condition': x},\n outputs={'Out': [outs]})\n\n if not as_tuple:\n return outs\n elif rank == 1:\n return tuple([outs])\n else:\n for i in range(rank):\n list_out.append(\n paddle.slice(\n outs, axes=[1], starts=[i], ends=[i + 1]))\n return tuple(list_out)\n\n\ndef sort(x, axis=-1, descending=False, name=None):\n \"\"\"\n\n This OP sorts the input along the given axis, and returns the sorted output tensor. The default sort algorithm is ascending, if you want the sort algorithm to be descending, you must set the :attr:`descending` as True.\n\n Args:\n x(Tensor): An input N-D Tensor with type float32, float64, int16,\n int32, int64, uint8.\n axis(int, optional): Axis to compute indices along. The effective range\n is [-R, R), where R is Rank(x). when axis<0, it works the same way\n as axis+R. Default is 0.\n descending(bool, optional) : Descending is a flag, if set to true,\n algorithm will sort by descending order, else sort by\n ascending order. Default is false.\n name(str, optional): The default value is None. Normally there is no\n need for user to set this property. For more information, please\n refer to :ref:`api_guide_Name`.\n Returns:\n Tensor: sorted tensor(with the same shape and data type as ``x``).\n Examples:\n\n .. code-block:: python\n\n import paddle\n\n x = paddle.to_tensor([[[5,8,9,5],\n [0,0,1,7],\n [6,9,2,4]],\n [[5,2,4,2],\n [4,7,7,9],\n [1,7,0,6]]], \n dtype='float32')\n out1 = paddle.sort(x=x, axis=-1)\n out2 = paddle.sort(x=x, axis=0)\n out3 = paddle.sort(x=x, axis=1)\n print(out1)\n #[[[5. 5. 8. 9.]\n # [0. 0. 1. 7.]\n # [2. 4. 6. 9.]]\n # [[2. 2. 4. 5.]\n # [4. 7. 7. 9.]\n # [0. 1. 6. 7.]]]\n print(out2)\n #[[[5. 2. 4. 2.]\n # [0. 0. 1. 7.]\n # [1. 7. 0. 4.]]\n # [[5. 8. 9. 5.]\n # [4. 7. 7. 9.]\n # [6. 9. 2. 6.]]]\n print(out3)\n #[[[0. 0. 1. 4.]\n # [5. 8. 2. 5.]\n # [6. 9. 9. 7.]]\n # [[1. 2. 0. 2.]\n # [4. 7. 4. 6.]\n # [5. 7. 7. 9.]]]\n \"\"\"\n if in_dygraph_mode():\n outs, _ = _C_ops.final_state_argsort(x, axis, descending)\n return outs\n\n if _in_legacy_dygraph():\n outs, _ = _C_ops.argsort(x, 'axis', axis, 'descending', descending)\n return outs\n helper = LayerHelper(\"sort\", **locals())\n out = helper.create_variable_for_type_inference(\n dtype=x.dtype, stop_gradient=False)\n ids = helper.create_variable_for_type_inference(\n VarDesc.VarType.INT64, stop_gradient=True)\n helper.append_op(\n type='argsort',\n inputs={'X': x},\n outputs={'Out': out,\n 'Indices': ids},\n attrs={'axis': axis,\n 'descending': descending})\n return out\n\n\ndef mode(x, axis=-1, keepdim=False, name=None):\n \"\"\"\n This OP is used to find values and indices of the modes at the optional axis.\n\n Args:\n x(Tensor): Tensor, an input N-D Tensor with type float32, float64, int32, int64.\n axis(int, optional): Axis to compute indices along. The effective range\n is [-R, R), where R is x.ndim. when axis < 0, it works the same way\n as axis + R. Default is -1.\n keepdim(bool, optional): Whether to keep the given axis in output. If it is True, the dimensions will be same as input x and with size one in the axis. Otherwise the output dimentions is one fewer than x since the axis is squeezed. Default is False.\n name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n tuple(Tensor), return the values and indices. The value data type is the same as the input `x`. The indices data type is int64.\n\n Examples:\n\n .. code-block:: python\n\n import paddle\n \n tensor = paddle.to_tensor([[[1,2,2],[2,3,3]],[[0,5,5],[9,9,0]]], dtype=paddle.float32)\n res = paddle.mode(tensor, 2)\n print(res)\n # (Tensor(shape=[2, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,\n # [[2., 3.],\n # [5., 9.]]), Tensor(shape=[2, 2], dtype=int64, place=CUDAPlace(0), stop_gradient=True,\n # [[1, 1],\n # [1, 0]]))\n \n \"\"\"\n if in_dygraph_mode():\n return _C_ops.final_state_mode(x, axis, keepdim)\n if _in_legacy_dygraph():\n return _C_ops.mode(x, \"axis\", axis, \"keepdim\", keepdim)\n\n helper = LayerHelper(\"mode\", **locals())\n inputs = {\"X\": [x]}\n attrs = {}\n attrs['axis'] = axis\n attrs['keepdim'] = keepdim\n\n values = helper.create_variable_for_type_inference(dtype=x.dtype)\n indices = helper.create_variable_for_type_inference(dtype=\"int64\")\n\n helper.append_op(\n type=\"mode\",\n inputs=inputs,\n outputs={\"Out\": [values],\n \"Indices\": [indices]},\n attrs=attrs)\n indices.stop_gradient = True\n return values, indices\n\n\ndef where(condition, x=None, y=None, name=None):\n r\"\"\"\n Return a tensor of elements selected from either $x$ or $y$, depending on $condition$.\n\n **Note**:\n ``paddle.where(condition)`` is identical to ``paddle.nonzero(condition, as_tuple=True)``.\n\n .. math::\n\n out_i =\n \\begin{cases}\n x_i, \\quad \\text{if} \\ condition_i \\ is \\ True \\\\\n y_i, \\quad \\text{if} \\ condition_i \\ is \\ False \\\\\n \\end{cases}\n\n\n Args:\n condition(Tensor): The condition to choose x or y. When True(nonzero), yield x, otherwise yield y.\n x(Tensor or Scalar, optional): x is a Tensor or Scalar with data type float32, float64, int32, int64. Either both or neither of x and y should be given.\n y(Tensor or Scalar, optional): y is a Tensor or Scalar with data type float32, float64, int32, int64. Either both or neither of x and y should be given.\n\n name(str, optional): The default value is None. Normally there is no\n need for user to set this property. For more information, please\n refer to :ref:`api_guide_Name`.\n\n Returns:\n Tensor: A Tensor with the same data dype as x. \n\n Examples:\n .. code-block:: python\n\n import paddle\n\n x = paddle.to_tensor([0.9383, 0.1983, 3.2, 1.2])\n y = paddle.to_tensor([1.0, 1.0, 1.0, 1.0])\n out = paddle.where(x>1, x, y)\n\n print(out)\n #out: [1.0, 1.0, 3.2, 1.2]\n\n out = paddle.where(x>1)\n print(out)\n #out: (Tensor(shape=[2, 1], dtype=int64, place=CPUPlace, stop_gradient=True,\n # [[2],\n # [3]]),)\n \"\"\"\n if np.isscalar(x):\n x = paddle.full([1], x, np.array([x]).dtype.name)\n\n if np.isscalar(y):\n y = paddle.full([1], y, np.array([y]).dtype.name)\n\n if x is None and y is None:\n return nonzero(condition, as_tuple=True)\n\n if x is None or y is None:\n raise ValueError(\"either both or neither of x and y should be given\")\n\n if not paddle.in_dynamic_mode():\n check_variable_and_dtype(condition, 'condition', ['bool'], 'where')\n check_variable_and_dtype(\n x, 'x', ['float32', 'float64', 'int32', 'int64'], 'where')\n check_variable_and_dtype(\n y, 'y', ['float32', 'float64', 'int32', 'int64'], 'where')\n\n condition_shape = list(condition.shape)\n x_shape = list(x.shape)\n y_shape = list(y.shape)\n\n if x_shape == y_shape and condition_shape == x_shape:\n broadcast_condition = condition\n broadcast_x = x\n broadcast_y = y\n else:\n if core.is_compiled_with_xpu():\n cond_int = paddle.cast(condition, x.dtype)\n cond_not_int = paddle.cast(logical_not(condition), x.dtype)\n out1 = paddle.multiply(x, cond_int)\n out2 = paddle.multiply(y, cond_not_int)\n out = paddle.add(out1, out2)\n return out\n\n zeros_like_x = paddle.zeros_like(x)\n zeros_like_y = paddle.zeros_like(y)\n zeros_like_condition = paddle.zeros_like(condition)\n zeros_like_condition = paddle.cast(zeros_like_condition, x.dtype)\n cast_cond = paddle.cast(condition, x.dtype)\n\n broadcast_zeros = paddle.add(zeros_like_x, zeros_like_y)\n broadcast_zeros = paddle.add(broadcast_zeros, zeros_like_condition)\n broadcast_x = paddle.add(x, broadcast_zeros)\n broadcast_y = paddle.add(y, broadcast_zeros)\n broadcast_condition = paddle.add(cast_cond, broadcast_zeros)\n broadcast_condition = paddle.cast(broadcast_condition, 'bool')\n\n if in_dygraph_mode():\n return _C_ops.final_state_where(broadcast_condition, broadcast_x,\n broadcast_y)\n else:\n if _in_legacy_dygraph():\n return _C_ops.where(broadcast_condition, broadcast_x, broadcast_y)\n else:\n helper = LayerHelper(\"where\", **locals())\n out = helper.create_variable_for_type_inference(dtype=x.dtype)\n\n helper.append_op(\n type='where',\n inputs={\n 'Condition': broadcast_condition,\n 'X': broadcast_x,\n 'Y': broadcast_y\n },\n outputs={'Out': [out]})\n\n return out\n\n\ndef index_sample(x, index):\n \"\"\"\n **IndexSample Layer**\n\n IndexSample OP returns the element of the specified location of X, \n and the location is specified by Index. \n\n .. code-block:: text\n\n\n Given:\n\n X = [[1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10]]\n\n Index = [[0, 1, 3],\n [0, 2, 4]]\n\n Then:\n\n Out = [[1, 2, 4],\n [6, 8, 10]]\n\n Args:\n x (Tensor): The source input tensor with 2-D shape. Supported data type is \n int32, int64, float32, float64.\n index (Tensor): The index input tensor with 2-D shape, first dimension should be same with X. \n Data type is int32 or int64.\n\n Returns:\n output (Tensor): The output is a tensor with the same shape as index.\n\n Examples:\n\n .. code-block:: python\n\n import paddle\n\n x = paddle.to_tensor([[1.0, 2.0, 3.0, 4.0],\n [5.0, 6.0, 7.0, 8.0],\n [9.0, 10.0, 11.0, 12.0]], dtype='float32')\n index = paddle.to_tensor([[0, 1, 2],\n [1, 2, 3],\n [0, 0, 0]], dtype='int32')\n target = paddle.to_tensor([[100, 200, 300, 400],\n [500, 600, 700, 800],\n [900, 1000, 1100, 1200]], dtype='int32')\n out_z1 = paddle.index_sample(x, index)\n print(out_z1)\n #[[1. 2. 3.]\n # [6. 7. 8.]\n # [9. 9. 9.]]\n\n # Use the index of the maximum value by topk op\n # get the value of the element of the corresponding index in other tensors\n top_value, top_index = paddle.topk(x, k=2)\n out_z2 = paddle.index_sample(target, top_index)\n print(top_value)\n #[[ 4. 3.]\n # [ 8. 7.]\n # [12. 11.]]\n\n print(top_index)\n #[[3 2]\n # [3 2]\n # [3 2]]\n\n print(out_z2)\n #[[ 400 300]\n # [ 800 700]\n # [1200 1100]]\n\n \"\"\"\n if in_dygraph_mode():\n return _C_ops.final_state_index_sample(x, index)\n else:\n if _in_legacy_dygraph():\n return _C_ops.index_sample(x, index)\n else:\n helper = LayerHelper(\"index_sample\", **locals())\n check_variable_and_dtype(x, 'x',\n ['float32', 'float64', 'int32', 'int64'],\n 'paddle.tensor.search.index_sample')\n check_variable_and_dtype(index, 'index', ['int32', 'int64'],\n 'paddle.tensor.search.index_sample')\n out = helper.create_variable_for_type_inference(dtype=x.dtype)\n\n helper.append_op(\n type='index_sample',\n inputs={'X': x,\n 'Index': index},\n outputs={'Out': out})\n return out\n\n\ndef masked_select(x, mask, name=None):\n \"\"\"\n Returns a new 1-D tensor which indexes the input tensor according to the ``mask``\n which is a tensor with data type of bool.\n\n Args:\n x (Tensor): The input Tensor, the data type can be int32, int64, float32, float64. \n mask (Tensor): The Tensor containing the binary mask to index with, it's data type is bool.\n name(str, optional): The default value is None. Normally there is no\n need for user to set this property. For more information, please\n refer to :ref:`api_guide_Name`.\n\n Returns: A 1-D Tensor which is the same data type as ``x``.\n \n Examples:\n\n .. code-block:: python\n\n import paddle\n\n x = paddle.to_tensor([[1.0, 2.0, 3.0, 4.0],\n [5.0, 6.0, 7.0, 8.0],\n [9.0, 10.0, 11.0, 12.0]])\n mask = paddle.to_tensor([[True, False, False, False],\n [True, True, False, False],\n [True, False, False, False]])\n out = paddle.masked_select(x, mask)\n #[1.0 5.0 6.0 9.0]\n \"\"\"\n\n if in_dygraph_mode():\n return _C_ops.final_state_masked_select(x, mask)\n\n if _in_legacy_dygraph():\n return _C_ops.masked_select(x, mask)\n\n helper = LayerHelper(\"masked_select\", **locals())\n check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],\n 'paddle.tensor.search.mask_select')\n check_variable_and_dtype(mask, 'mask', ['bool'],\n 'paddle.tensor.search.masked_select')\n out = helper.create_variable_for_type_inference(dtype=x.dtype)\n helper.append_op(\n type='masked_select', inputs={'X': x,\n 'Mask': mask}, outputs={'Y': out})\n return out\n\n\ndef topk(x, k, axis=None, largest=True, sorted=True, name=None):\n \"\"\"\n Return values and indices of the k largest or smallest at the optional axis.\n If the input is a 1-D Tensor, finds the k largest or smallest values and indices.\n If the input is a Tensor with higher rank, this operator computes the top k values and indices along the :attr:`axis`.\n\n Args:\n x(Tensor): Tensor, an input N-D Tensor with type float32, float64, int32, int64.\n k(int, Tensor): The number of top elements to look for along the axis.\n axis(int, optional): Axis to compute indices along. The effective range\n is [-R, R), where R is x.ndim. when axis < 0, it works the same way\n as axis + R. Default is -1.\n largest(bool, optional) : largest is a flag, if set to true,\n algorithm will sort by descending order, otherwise sort by\n ascending order. Default is True.\n sorted(bool, optional): controls whether to return the elements in sorted order, default value is True. In gpu device, it always return the sorted value. \n name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n tuple(Tensor), return the values and indices. The value data type is the same as the input `x`. The indices data type is int64.\n\n Examples:\n\n .. code-block:: python\n :name: code-example1\n import paddle\n\n data_1 = paddle.to_tensor([1, 4, 5, 7])\n value_1, indices_1 = paddle.topk(data_1, k=1)\n print(value_1) # [7]\n print(indices_1) # [3]\n\n data_2 = paddle.to_tensor([[1, 4, 5, 7], [2, 6, 2, 5]])\n value_2, indices_2 = paddle.topk(data_2, k=1)\n print(value_2) # [[7], [6]]\n print(indices_2) # [[3], [1]]\n\n value_3, indices_3 = paddle.topk(data_2, k=1, axis=-1)\n print(value_3) # [[7], [6]]\n print(indices_3) # [[3], [1]]\n\n value_4, indices_4 = paddle.topk(data_2, k=1, axis=0)\n print(value_4) # [[2, 6, 5, 7]]\n print(indices_4) # [[1, 1, 0, 0]]\n\n\n \"\"\"\n\n if in_dygraph_mode():\n if axis == None:\n axis = -1\n out, indices = _C_ops.final_state_top_k(x, k, axis, largest, sorted)\n return out, indices\n\n if _non_static_mode():\n if axis is None:\n out, indices = _C_ops.top_k_v2(x, 'k',\n int(k), 'largest', largest, 'sorted',\n sorted)\n else:\n out, indices = _C_ops.top_k_v2(x, 'k',\n int(k), 'axis', axis, 'largest',\n largest, 'sorted', sorted)\n return out, indices\n\n helper = LayerHelper(\"top_k_v2\", **locals())\n inputs = {\"X\": [x]}\n attrs = {}\n if isinstance(k, Variable):\n inputs['K'] = [k]\n else:\n attrs = {'k': k}\n attrs['largest'] = largest\n attrs['sorted'] = sorted\n if axis is not None:\n attrs['axis'] = axis\n\n values = helper.create_variable_for_type_inference(dtype=x.dtype)\n indices = helper.create_variable_for_type_inference(dtype=\"int64\")\n\n helper.append_op(\n type=\"top_k_v2\",\n inputs=inputs,\n outputs={\"Out\": [values],\n \"Indices\": [indices]},\n attrs=attrs)\n indices.stop_gradient = True\n return values, indices\n\n\ndef searchsorted(sorted_sequence,\n values,\n out_int32=False,\n right=False,\n name=None):\n \"\"\"\n This OP is used to find the index of the corresponding `sorted_sequence` in the innermost dimension based on the given `values`.\n\n Args:\n sorted_sequence(Tensor): An input N-D or 1-D tensor with type int32, int64, float32, float64. The value of the tensor monotonically increases in the innermost dimension. \n values(Tensor): An input N-D tensor value with type int32, int64, float32, float64.\n out_int32(bool, optional): Data type of the output tensor which can be int32, int64. The default value is False, and it indicates that the output data type is int64.\n right(bool, optional): Find the upper or lower bounds of the sorted_sequence range in the innermost dimension based on the given `values`. If the value of the sorted_sequence is nan or inf, return the size of the innermost dimension.\n The default value is False and it shows the lower bounds. \n name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`.\n \n Returns:\n Tensor(the same sizes of the `values`), return the tensor of int32 if set :attr:`out_int32` is True, otherwise return the tensor of int64. \n \n Examples:\n\n .. code-block:: python\n \n import paddle\n\n sorted_sequence = paddle.to_tensor([[1, 3, 5, 7, 9, 11],\n [2, 4, 6, 8, 10, 12]], dtype='int32')\n values = paddle.to_tensor([[3, 6, 9, 10], [3, 6, 9, 10]], dtype='int32')\n out1 = paddle.searchsorted(sorted_sequence, values)\n print(out1)\n # Tensor(shape=[2, 4], dtype=int64, place=CUDAPlace(0), stop_gradient=True,\n # [[1, 3, 4, 5],\n # [1, 2, 4, 4]])\n out2 = paddle.searchsorted(sorted_sequence, values, right=True)\n print(out2)\n # Tensor(shape=[2, 4], dtype=int64, place=CUDAPlace(0), stop_gradient=True,\n # [[2, 3, 5, 5],\n # [1, 3, 4, 5]])\n sorted_sequence_1d = paddle.to_tensor([1, 3, 5, 7, 9, 11, 13])\n out3 = paddle.searchsorted(sorted_sequence_1d, values) \n print(out3)\n # Tensor(shape=[2, 4], dtype=int64, place=CUDAPlace(0), stop_gradient=True,\n # [[1, 3, 4, 5],\n # [1, 3, 4, 5]])\n \n \"\"\"\n if in_dygraph_mode():\n return _C_ops.final_state_searchsorted(sorted_sequence, values,\n out_int32, right)\n\n if _in_legacy_dygraph():\n return _C_ops.searchsorted(sorted_sequence, values, \"out_int32\",\n out_int32, \"right\", right)\n\n check_variable_and_dtype(sorted_sequence, 'SortedSequence',\n ['float32', 'float64', 'int32', 'int64'],\n 'paddle.searchsorted')\n check_variable_and_dtype(values, 'Values',\n ['float32', 'float64', 'int32', 'int64'],\n 'paddle.searchsorted')\n\n helper = LayerHelper('searchsorted', **locals())\n out_type = 'int32' if out_int32 else 'int64'\n out = helper.create_variable_for_type_inference(dtype=out_type)\n helper.append_op(\n type='searchsorted',\n inputs={'SortedSequence': sorted_sequence,\n \"Values\": values},\n outputs={'Out': out},\n attrs={\"out_int32\": out_int32,\n \"right\": right})\n\n return out\n\n\ndef kthvalue(x, k, axis=None, keepdim=False, name=None):\n \"\"\"\n This OP is used to find values and indices of the k-th smallest at the axis.\n\n Args:\n x(Tensor): A N-D Tensor with type float32, float64, int32, int64.\n k(int): The k for the k-th smallest number to look for along the axis.\n axis(int, optional): Axis to compute indices along. The effective range\n is [-R, R), where R is x.ndim. when axis < 0, it works the same way\n as axis + R. The default is None. And if the axis is None, it will computed as -1 by default.\n keepdim(bool, optional): Whether to keep the given axis in output. If it is True, the dimensions will be same as input x and with size one in the axis. Otherwise the output dimentions is one fewer than x since the axis is squeezed. Default is False.\n name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.\n\n Returns:\n tuple(Tensor), return the values and indices. The value data type is the same as the input `x`. The indices data type is int64.\n \n Examples:\n\n .. code-block:: python\n \n import paddle\n \n x = paddle.randn((2,3,2))\n # Tensor(shape=[2, 3, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,\n # [[[ 0.22954939, -0.01296274],\n # [ 1.17135799, -0.34493217],\n # [-0.19550551, -0.17573971]],\n #\n # [[ 0.15104349, -0.93965352],\n # [ 0.14745511, 0.98209465],\n # [ 0.10732264, -0.55859774]]]) \n y = paddle.kthvalue(x, 2, 1) \n # (Tensor(shape=[2, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,\n # [[ 0.22954939, -0.17573971],\n # [ 0.14745511, -0.55859774]]), Tensor(shape=[2, 2], dtype=int64, place=CUDAPlace(0), stop_gradient=True,\n # [[0, 2],\n # [1, 2]]))\n \"\"\"\n if _non_static_mode():\n if axis is not None:\n if _in_legacy_dygraph():\n return _C_ops.kthvalue(x, 'k', k, \"axis\", axis, \"keepdim\",\n keepdim)\n return _C_ops.final_state_kthvalue(x, k, axis, keepdim)\n else:\n if _in_legacy_dygraph():\n return _C_ops.kthvalue(x, 'k', k, \"keepdim\", keepdim)\n return _C_ops.final_state_kthvalue(x, k, -1, keepdim)\n\n helper = LayerHelper(\"kthvalue\", **locals())\n inputs = {\"X\": [x]}\n attrs = {'k': k}\n if axis is not None:\n attrs['axis'] = axis\n values = helper.create_variable_for_type_inference(dtype=x.dtype)\n indices = helper.create_variable_for_type_inference(dtype=\"int64\")\n\n helper.append_op(\n type=\"kthvalue\",\n inputs=inputs,\n outputs={\"Out\": [values],\n \"Indices\": [indices]},\n attrs=attrs)\n indices.stop_gradient = True\n return values, indices\n" ]
[ [ "numpy.array", "numpy.isscalar" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
liguowang/epage
[ "2ce60ddbcd23f06dc4d635681e8e52b66ba519f9", "2ce60ddbcd23f06dc4d635681e8e52b66ba519f9" ]
[ "bin/gComposite.py", "lib/pacmodule/iMatrix.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 30 10:04:05 2019\n\n@author: m102324\n\nDescription\n-----------\nThis program Calculates the Composite Expression Scores:\n\t* Gene Set Variation Analysis (GSVA). (Hänzelmann et al, 2013)\n\t* Single Sample GSEA (ssGSEA). (Barbie et al, 2009)\n\t* zscore (Lee et al, 2008)\n\t* plage (Tomfohr et al, 2005)\n\"\"\"\nimport sys\n#import numpy as np\n#import pandas as pd\nfrom time import strftime\nimport pandas as pd\nfrom optparse import OptionParser\nfrom pacmodule import iList,iMatrix,iPas,gsva,cpca\n\n__author__ = \"Liguo Wang\"\n__copyright__ = \"Copyleft\"\n__credits__ = []\n__license__ = \"MIT\"\n__version__=\"1.0.0\"\n__maintainer__ = \"Liguo Wang\"\n__email__ = \"[email protected]\"\n__status__ = \"Development\"\n\ndef main():\n\tusage=\"%prog [options]\" + \"\\n\"\n\tparser = OptionParser(usage,version=\"%prog \" + __version__)\n\tparser.add_option(\"-e\",\"--expr_matrix\",action=\"store\",type=\"string\",dest=\"expr_file\",help=\"Tab-separated data matrix file containing gene expression values. The 1st row containing sample/patient IDs and the 1st column containing gene symbols(mut be unique). File can be compressed (.gz, .Z, .z, .bz, .bz2, bzip2).\")\n\tparser.add_option(\"-g\",\"--gene\",action=\"store\",type=\"string\",dest=\"gene_file\",help=\"GMT file. The GMT file format is a tab delimited file format that describes gene sets (Each gene set is described by a name, a description, and the genes in the gene set). In the GMT format, each row represents a gene set. The first column is get set name (must be unique). The second column is brief description (can be 'na').\")\n\tparser.add_option(\"-k\",\"--group\",action=\"store\",type=\"string\",dest=\"group_file\",help=\"Group file (in CSV format). First column is sample ID, second column is group ID\")\n\tparser.add_option(\"-s\",\"--sample\",action=\"store\",type='string', dest=\"sample_file\",default=None, help=\"Sample list file containing sample IDs. Each row can be a single sample ID, a comma-separated sample IDs or a space-separated sample IDs. Sample IDs must match exactly to those in the data matrix file. If omitted, calculated activity scores for *all* the samples. File can be compressed (.gz, .Z, .z, .bz, .bz2, bzip2). default=%default (All samples will be used)\")\n\tparser.add_option(\"-l\",\"--log\",action=\"store_true\",default=False,dest=\"log2\",help=\"If True, will do log2(x+1) transformation for gene experssion values. Must set to 'True' if expressin values are RNA-seq count. default=%default\")\n\tparser.add_option(\"-p\",\"--processor\",action=\"store\", type='int',default=0,dest=\"n_thread\",help=\"Number of processors to use when doing the calculations in parallel. default=%default (use all available processors)\")\n\tparser.add_option(\"-o\",\"--output\",action=\"store\",type='string', dest=\"out_file\",help=\"The prefix of the output file.\")\n\t\n\n\t(options,args)=parser.parse_args()\n\t \n\t\n\tif not (options.expr_file):\n\t\tprint (\"-e/--expr_matrix: gene expression file must be specified.\", file=sys.stderr)\n\t\tparser.print_help()\n\t\tsys.exit()\n\tif not (options.gene_file):\n\t\tprint (\"-g/--gene GMT file must be specified.\", file=sys.stderr)\n\t\tparser.print_help()\n\t\tsys.exit()\n\tif not (options.out_file):\n\t\tprint (\"-o/--output: output prefix must be specified.\", file=sys.stderr)\n\t\tparser.print_help()\n\t\tsys.exit()\n\n\tif not (options.group_file):\n\t\tprint (\"-k/--group: group must be specified.\", file=sys.stderr)\n\t\tparser.print_help()\n\t\tsys.exit()\n\t\t\n\t# read gene set(s)\n\tprint (\"@ \" + strftime(\"%Y-%m-%d %H:%M:%S : \") + \"Read gene list from GMT file \\\"%s\\\" ...\" % options.gene_file)\n\tgene_sets = iList.get_list(options.gene_file)\n\tall_genes = []\t# combine gene sets\n\tprint (\"\\tTotal %d gene sets loaded.\" % len(gene_sets), file=sys.stderr)\n\tfor k in gene_sets:\n\t\tprint (\"\\tGene set '%s': Total genes = %d, Unique genes = %d\" % (k, len(gene_sets[k]), len(set(gene_sets[k]))), file=sys.stderr)\n\t\tfor g in gene_sets[k]:\n\t\t\tprint (\"\\t\" + g)\n\t\tall_genes += gene_sets[k]\n\tall_genes = list(set(all_genes))\n \n \n # read sample list\n\tsample_list = []\n\tif (options.sample_file):\n\t\tprint (\"@ \" + strftime(\"%Y-%m-%d %H:%M:%S : \") + \"Read sample list from \\\"%s\\\" ...\" % options.sample_file)\t\n\t\tsample_list = iList.get_list(options.sample_file)\n\t\tprint (\"\\tTotal %d samples loaded.\" % len(sample_list))\n\t\tiList.print_list(sample_list)\n\telse:\n\t\tprint (\"@ \" + strftime(\"%Y-%m-%d %H:%M:%S : \") + \"Calculate activity score for **all samples** in \\\"%s\\\"\" % options.expr_file)\n\t\n\t# read gene expression matrix\n\tprint (\"@ \" + strftime(\"%Y-%m-%d %H:%M:%S : \") + \"Read gene expression matrix from \\\"%s\\\" ...\" % options.expr_file)\n\tgenes_not_found = iMatrix.read_matrix(infile = options.expr_file, g_list = all_genes, s_list = sample_list, outfile = options.out_file + '.mat.tsv', zfile = None,log = options.log2)\n\t\n\t# run PCA\n\tprint (\"@ \" + strftime(\"%Y-%m-%d %H:%M:%S : \") + \"Calculate the first two principal components (saved to '%s') ...\" % ((options.out_file + '_pca.csv')))\n\tcpca.run_PCA(options.out_file + '.mat.tsv', options.out_file)\n\t\n\t# rebuild GMT file by removing unfound genes\n\tif len(genes_not_found) > 0:\n\t\tprint (\"@ \" + strftime(\"%Y-%m-%d %H:%M:%S : \") + \"Rebuild GMT file as \\\"%s\\\" ...\" % (options.out_file + '.New.gmt'))\n\t\tiList.rebuild_gmt(oldfile = options.gene_file, newfile = options.out_file + '.New.gmt', genes = genes_not_found)\n\t\t\n\t\tprint (\"@ \" + strftime(\"%Y-%m-%d %H:%M:%S : \") + \"Calculate GSVA (saved to '%s'), ssGSEA (saved to '%s'), Z-SCORE (saved to '%s') and PLAGE (saved to '%s') ...\" % ((options.out_file + '_gsva.csv'), (options.out_file + '_ssgsea.csv'), (options.out_file + '_zscore.csv'), (options.out_file + '_plage.csv')))\n\t\tgsva.run_gsva(routfile = options.out_file + '.R', gmtfile = options.out_file + '.New.gmt', expr_file = options.out_file + '.mat.tsv', outfile = options.out_file, n_proc = options.n_thread)\n\n\telse:\n\t\tprint (\"@ \" + strftime(\"%Y-%m-%d %H:%M:%S : \") + \"Calculate GSVA (saved to '%s'), ssGSEA (saved to '%s'), Z-SCORE (saved to '%s') and PLAGE (saved to '%s') ...\" % ((options.out_file + '_gsva.csv'), (options.out_file + '_ssgsea.csv'), (options.out_file + '_zscore.csv'), (options.out_file + '_plage.csv')))\n\t\tgsva.run_gsva(routfile = options.out_file + '.R', gmtfile = options.gene_file, expr_file = options.out_file + '.mat.tsv', outfile = options.out_file, n_proc = options.n_thread)\n\t\n\t\n\t\n\t\n\t# combine\n\tdf_group = pd.read_csv(options.group_file,index_col = 0)\n\tdf_gsva = pd.read_csv(options.out_file + '_gsva.csv',index_col = 0)\n\tdf_ssgsea = pd.read_csv(options.out_file + '_ssgsea.csv',index_col = 0)\n\tdf_zscore = pd.read_csv(options.out_file + '_zscore.csv',index_col = 0)\n\tdf_plage = pd.read_csv(options.out_file + '_plage.csv',index_col = 0)\n\tdf_pca = pd.read_csv(options.out_file + '_pca.csv',index_col = 0)\n\t\n\tdata_frames = pd.concat([df_group, df_gsva, df_ssgsea,df_pca, df_zscore, df_plage],axis=1, join='inner')\n\tdata_frames.to_csv(options.out_file + '_combined.tsv', index=True, sep=\"\\t\")\n\t\n\t#data_frames = pd.concat([df_gsva, df_ssgsea,df_zscore, df_plage],axis=1, join='inner')\n\t#data_frames.to_csv(options.out_file + '_combined.tsv', index=True,sep=\"\\t\")\n\t\nif __name__=='__main__':\n\tmain()\n\t\t\t\n", "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 30 11:47:21 2019\n\n@author: m102324\n\"\"\"\nimport sys\nimport math\nfrom pacmodule import iReader\nfrom scipy import stats\n\ndef read_matrix(infile, g_list, s_list, outfile, zfile,log=False):\n '''\n Slice a subset from matrix file.\n \n Parameters\n ----------\n infile : str\n Input matrix file.\n g_list : list\n List containing gene symbols. Symbols not contained in infile will be \n skipped.\n s_list : list\n List containing sample IDs. IDs not contained in infile will be skipped.\n outfile : str\n Output file containing the orignal gene expression scores.\n zfile : str\n Output file containing Z-scores.\n log : bool\n If ture, we will do log2(x+1) transformation for expression values. \n '''\n \n OUT = open(outfile, 'w')\n if zfile is not None:\n ZOUT = open(zfile, 'w')\n g_list = set(g_list)\n s_list = set(s_list)\n genes_not_found = []\n\t\n line_num = 0\n genes_found = set()\n for l in iReader.reader(infile):\n l = l.strip()\n line_num += 1\n if line_num == 1:\n all_samples = l.split()[1:]\n \n #output a subset of samples\n if len(s_list) > 0:\n subset_index = []\n for i in range(0,len(all_samples)):\n if all_samples[i] in s_list:\n subset_index.append(i)\n \n subset_samples = [all_samples[i] for i in subset_index]\n print ('sample\\t' + '\\t'.join(subset_samples), file=OUT)\n if zfile is not None:\n print ('sample\\t' + '\\t'.join(subset_samples), file=ZOUT)\n #output all samples\n else:\n print ('sample\\t' + '\\t'.join(all_samples), file=OUT)\n if zfile is not None:\n print ('sample\\t' + '\\t'.join(all_samples), file=ZOUT)\n else: \n tmp = l.split()\n geneID = tmp[0]\n if len(g_list) > 0:\n if geneID not in g_list:\n continue\n genes_found.add(geneID)\n \n #convert str into floats\n try:\n all_scores = list(map(float,tmp[1:]))\n except:\n print (\"Skip line with missing values:\" + l, file=sys.stderr)\n continue\n \n #do log2(x+1) transformation\n if log:\n all_scores = [math.log2(i+1) for i in all_scores]\n \n if len(s_list) > 0:\n subset_scores = [all_scores[i] for i in subset_index]\n print (geneID + '\\t' + '\\t'.join([str(i) for i in subset_scores]), file=OUT)\n if zfile is not None:\n subset_z_scores = stats.zscore([i for i in subset_scores])\n print (geneID + '\\t' + '\\t'.join([str(i) for i in subset_z_scores]), file=ZOUT)\n else:\n print (geneID + '\\t' + '\\t'.join([str(i) for i in all_scores]), file=OUT)\n if zfile is not None:\n all_z_scores = stats.zscore([i for i in all_scores])\n print (geneID + '\\t' + '\\t'.join([str(i) for i in all_z_scores]), file=ZOUT)\n \n if len(g_list) > 0:\n genes_not_found = list(g_list - genes_found)\n if len(genes_not_found) > 0:\n print (\"\\t%d Genes not found:\" % len(genes_not_found), genes_not_found)\n else:\n print (\"\\tAll the genes were found.\")\n OUT.close()\n return genes_not_found\n \n " ]
[ [ "pandas.concat", "pandas.read_csv" ], [ "scipy.stats.zscore" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
lifehouse11amber2/Machine-Learning-A-Z-hands-on-Python-And-R-in-data-Science
[ "d804e7eeace1e5187b156b2fa3e71125b2a3448a", "d804e7eeace1e5187b156b2fa3e71125b2a3448a" ]
[ "Machine Learning A-Z/Part 7 - Natural Language Processing/Section 36 - Natural Language Processing/natural_language_processing.py", "Machine Learning A-Z/keras-fire-detection/pyimagesearch/learningratefinder.py" ]
[ "# Natural Language Processing\n\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv('Restaurant_Reviews.tsv', delimiter = '\\t', quoting = 3)\n\n# Cleaning the texts\nimport re\nimport nltk\nnltk.download('stopwords')\nfrom nltk.corpus import stopwords\nfrom nltk.stem.porter import PorterStemmer\ncorpus = []\nfor i in range(0, 1000):\n review = re.sub('[^a-zA-Z]', ' ', dataset['Review'][i])\n review = review.lower()\n review = review.split()\n ps = PorterStemmer()\n review = [ps.stem(word) for word in review if not word in set(stopwords.words('english'))]\n review = ' '.join(review)\n corpus.append(review)\n\n# Creating the Bag of Words model\nfrom sklearn.feature_extraction.text import CountVectorizer\ncv = CountVectorizer(max_features = 1500)\nX = cv.fit_transform(corpus).toarray()\ny = dataset.iloc[:, 1].values\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 0)\n\n# Fitting Naive Bayes to the Training set\nfrom sklearn.naive_bayes import GaussianNB\nclassifier = GaussianNB()\nclassifier.fit(X_train, y_train)\n\n# Predicting the Test set results\ny_pred = classifier.predict(X_test)\n\n# Making the Confusion Matrix\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, y_pred)\n\n# naive based classifer\n# true positive= 55\n# false positive = 42\n# true Negative = 91\n# false negative = 12\n\n#Accuracy score\nAS=(55+91)/200 #.73\n\n#Precision\nP=54/(55+42) #0.57\n\n#Recall\nR=55/(55+12) # 0.82\n\n#F1 Score\n2*P*R/(P+R) #0.67", "# import the necessary packages\nfrom tensorflow.keras.callbacks import LambdaCallback\nfrom tensorflow.keras import backend as K\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tempfile\n\nclass LearningRateFinder:\n\tdef __init__(self, model, stopFactor=4, beta=0.98):\n\t\t# store the model, stop factor, and beta value (for computing\n\t\t# a smoothed, average loss)\n\t\tself.model = model\n\t\tself.stopFactor = stopFactor\n\t\tself.beta = beta\n\n\t\t# initialize our list of learning rates and losses,\n\t\t# respectively\n\t\tself.lrs = []\n\t\tself.losses = []\n\n\t\t# initialize our learning rate multiplier, average loss, best\n\t\t# loss found thus far, current batch number, and weights file\n\t\tself.lrMult = 1\n\t\tself.avgLoss = 0\n\t\tself.bestLoss = 1e9\n\t\tself.batchNum = 0\n\t\tself.weightsFile = None\n\n\tdef reset(self):\n\t\t# re-initialize all variables from our constructor\n\t\tself.lrs = []\n\t\tself.losses = []\n\t\tself.lrMult = 1\n\t\tself.avgLoss = 0\n\t\tself.bestLoss = 1e9\n\t\tself.batchNum = 0\n\t\tself.weightsFile = None\n\n\tdef is_data_iter(self, data):\n\t\t# define the set of class types we will check for\n\t\titerClasses = [\"NumpyArrayIterator\", \"DirectoryIterator\",\n\t\t\t \"DataFrameIterator\", \"Iterator\", \"Sequence\"]\n\n\t\t# return whether our data is an iterator\n\t\treturn data.__class__.__name__ in iterClasses\n\n\tdef on_batch_end(self, batch, logs):\n\t\t# grab the current learning rate and add log it to the list of\n\t\t# learning rates that we've tried\n\t\tlr = K.get_value(self.model.optimizer.lr)\n\t\tself.lrs.append(lr)\n\n\t\t# grab the loss at the end of this batch, increment the total\n\t\t# number of batches processed, compute the average average\n\t\t# loss, smooth it, and update the losses list with the\n\t\t# smoothed value\n\t\tl = logs[\"loss\"]\n\t\tself.batchNum += 1\n\t\tself.avgLoss = (self.beta * self.avgLoss) + ((1 - self.beta) * l)\n\t\tsmooth = self.avgLoss / (1 - (self.beta ** self.batchNum))\n\t\tself.losses.append(smooth)\n\n\t\t# compute the maximum loss stopping factor value\n\t\tstopLoss = self.stopFactor * self.bestLoss\n\n\t\t# check to see whether the loss has grown too large\n\t\tif self.batchNum > 1 and smooth > stopLoss:\n\t\t\t# stop returning and return from the method\n\t\t\tself.model.stop_training = True\n\t\t\treturn\n\n\t\t# check to see if the best loss should be updated\n\t\tif self.batchNum == 1 or smooth < self.bestLoss:\n\t\t\tself.bestLoss = smooth\n\n\t\t# increase the learning rate\n\t\tlr *= self.lrMult\n\t\tK.set_value(self.model.optimizer.lr, lr)\n\n\tdef find(self, trainData, startLR, endLR, epochs=None,\n\t\tstepsPerEpoch=None, batchSize=32, sampleSize=2048,\n\t\tclassWeight=None, verbose=1):\n\t\t# reset our class-specific variables\n\t\tself.reset()\n\n\t\t# determine if we are using a data generator or not\n\t\tuseGen = self.is_data_iter(trainData)\n\n\t\t# if we're using a generator and the steps per epoch is not\n\t\t# supplied, raise an error\n\t\tif useGen and stepsPerEpoch is None:\n\t\t\tmsg = \"Using generator without supplying stepsPerEpoch\"\n\t\t\traise Exception(msg)\n\n\t\t# if we're not using a generator then our entire dataset must\n\t\t# already be in memory\n\t\telif not useGen:\n\t\t\t# grab the number of samples in the training data and\n\t\t\t# then derive the number of steps per epoch\n\t\t\tnumSamples = len(trainData[0])\n\t\t\tstepsPerEpoch = np.ceil(numSamples / float(batchSize))\n\n\t\t# if no number of training epochs are supplied, compute the\n\t\t# training epochs based on a default sample size\n\t\tif epochs is None:\n\t\t\tepochs = int(np.ceil(sampleSize / float(stepsPerEpoch)))\n\n\t\t# compute the total number of batch updates that will take\n\t\t# place while we are attempting to find a good starting\n\t\t# learning rate\n\t\tnumBatchUpdates = epochs * stepsPerEpoch\n\n\t\t# derive the learning rate multiplier based on the ending\n\t\t# learning rate, starting learning rate, and total number of\n\t\t# batch updates\n\t\tself.lrMult = (endLR / startLR) ** (1.0 / numBatchUpdates)\n\n\t\t# create a temporary file path for the model weights and\n\t\t# then save the weights (so we can reset the weights when we\n\t\t# are done)\n\t\tself.weightsFile = tempfile.mkstemp()[1]\n\t\tself.model.save_weights(self.weightsFile)\n\n\t\t# grab the *original* learning rate (so we can reset it\n\t\t# later), and then set the *starting* learning rate\n\t\torigLR = K.get_value(self.model.optimizer.lr)\n\t\tK.set_value(self.model.optimizer.lr, startLR)\n\n\t\t# construct a callback that will be called at the end of each\n\t\t# batch, enabling us to increase our learning rate as training\n\t\t# progresses\n\t\tcallback = LambdaCallback(on_batch_end=lambda batch, logs:\n\t\t\tself.on_batch_end(batch, logs))\n\n\t\t# check to see if we are using a data iterator\n\t\tif useGen:\n\t\t\tself.model.fit_generator(\n\t\t\t\ttrainData,\n\t\t\t\tsteps_per_epoch=stepsPerEpoch,\n\t\t\t\tepochs=epochs,\n\t\t\t\tclass_weight=classWeight,\n\t\t\t\tverbose=verbose,\n\t\t\t\tcallbacks=[callback])\n\n\t\t# otherwise, our entire training data is already in memory\n\t\telse:\n\t\t\t# train our model using Keras' fit method\n\t\t\tself.model.fit(\n\t\t\t\ttrainData[0], trainData[1],\n\t\t\t\tbatch_size=batchSize,\n\t\t\t\tepochs=epochs,\n\t\t\t\tclass_weight=classWeight,\n\t\t\t\tcallbacks=[callback],\n\t\t\t\tverbose=verbose)\n\n\t\t# restore the original model weights and learning rate\n\t\tself.model.load_weights(self.weightsFile)\n\t\tK.set_value(self.model.optimizer.lr, origLR)\n\n\tdef plot_loss(self, skipBegin=10, skipEnd=1, title=\"\"):\n\t\t# grab the learning rate and losses values to plot\n\t\tlrs = self.lrs[skipBegin:-skipEnd]\n\t\tlosses = self.losses[skipBegin:-skipEnd]\n\n\t\t# plot the learning rate vs. loss\n\t\tplt.plot(lrs, losses)\n\t\tplt.xscale(\"log\")\n\t\tplt.xlabel(\"Learning Rate (Log Scale)\")\n\t\tplt.ylabel(\"Loss\")\n\n\t\t# if the title is not empty, add it to the plot\n\t\tif title != \"\":\n\t\t\tplt.title(title)" ]
[ [ "pandas.read_csv", "sklearn.naive_bayes.GaussianNB", "sklearn.metrics.confusion_matrix", "sklearn.model_selection.train_test_split", "sklearn.feature_extraction.text.CountVectorizer" ], [ "tensorflow.keras.backend.set_value", "matplotlib.pyplot.title", "tensorflow.keras.backend.get_value", "matplotlib.pyplot.xscale", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] } ]
Emmyphung/flexible-input-slu
[ "a2c7fff640b2b4aec830f3ca1b447c28dc506bb4" ]
[ "experiments/experiment_base_original.py" ]
[ "# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\n\nimport torch\nimport numpy as np\nfrom utils.utils import AverageMeter\nfrom tqdm import tqdm\n# from utils.visualize import plot_confusion_matrix\nfrom sklearn.metrics import confusion_matrix\n\n\nclass ExperimentRunnerBase:\n def __init__(self, args):\n # Set the LR Scheduler and Loss Parameters\n if args.scheduler == 'plateau':\n self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizer,\n factor=0.5,\n patience=3,\n mode='max',\n verbose=True)\n elif args.scheduler == 'cycle':\n self.scheduler = torch.optim.lr_scheduler.OneCycleLR(self.optimizer,\n max_lr=args.learning_rate,\n steps_per_epoch=len(self.train_loader),\n epochs=args.num_epochs)\n self.criterion = torch.nn.CrossEntropyLoss()\n self.visualize = args.visualize\n if self.visualize:\n print(\"if visualize is true this line will run\")\n from torch.utils.tensorboard import SummaryWriter\n self.writer = SummaryWriter()\n\n # Training specific params\n self.args = args\n self.num_epochs = args.num_epochs\n self.print_every = args.print_every\n self.val_every = args.val_every\n self.model_dir = args.model_dir\n self.save_every = args.save_every\n\n def train(self):\n # Setting the variables before starting the training\n avg_train_loss = AverageMeter()\n avg_train_acc = AverageMeter()\n best_val_acc = -np.inf\n\n for epoch in range(self.num_epochs):\n avg_train_loss.reset()\n avg_train_acc.reset()\n\n # Mini batch loop\n for batch_idx, batch in enumerate(tqdm(self.train_loader)):\n step = epoch * len(self.train_loader) + batch_idx\n\n # Get the model output for the batch and update the loss and accuracy meters\n train_loss, train_acc = self.train_step(batch)\n if self.args.scheduler == 'cycle':\n self.scheduler.step()\n avg_train_loss.update([train_loss.item()])\n avg_train_acc.update([train_acc])\n\n # Save the step checkpoint if needed\n # if step % self.save_every == 0:\n # step_chkpt_path = os.path.join(self.model_dir,\n # 'step_chkpt_{}_{}.pth'.format(epoch, step))\n # print(\"Saving the model checkpoint for epoch {} at step {}\".format(epoch, step))\n # torch.save(self.model.state_dict(), step_chkpt_path)\n\n # Logging and validation check\n if step % self.print_every == 0:\n print('Epoch {}, batch {}, step {}, '\n 'loss = {:.4f}, acc = {:.4f}, '\n 'running averages: loss = {:.4f}, acc = {:.4f}'.format(epoch,\n batch_idx,\n step,\n train_loss.item(),\n train_acc,\n avg_train_loss.get(),\n avg_train_acc.get()))\n\n if step % self.val_every == 0:\n val_loss, val_acc = self.val()\n print('Val acc = {:.4f}, Val loss = {:.4f}'.format(val_acc, val_loss))\n if self.visualize:\n self.writer.add_scalar('Val/loss', val_loss, step)\n self.writer.add_scalar('Val/acc', val_acc, step)\n\n # Update the save the best validation checkpoint if needed\n if val_acc > best_val_acc:\n best_val_acc = val_acc\n best_chkpt_path = os.path.join(self.model_dir,\n 'best_ckpt.pth')\n torch.save(self.model.state_dict(), best_chkpt_path)\n if self.args.scheduler == 'plateau':\n self.scheduler.step(val_acc)\n\n if self.visualize:\n # Log data to\n self.writer.add_scalar('Train/loss', train_loss.item(), step)\n self.writer.add_scalar('Train/acc', train_acc, step)\n\n def compute_loss(self, batch):\n \"\"\" This function is specific to the kind of model we are training and must be implemented \"\"\"\n raise NotImplementedError\n\n def train_step(self, batch):\n self.model.train()\n self.optimizer.zero_grad()\n metrics = self.compute_loss(batch)\n metrics['loss'].backward()\n self.optimizer.step()\n return metrics['loss'], metrics['accuracy']\n\n def load_model_for_eval(self):\n chkpt_path = os.path.join(self.model_dir, 'best_ckpt.pth') \\\n if self.args.eval_checkpoint_path is None else self.args.eval_checkpoint_path\n self.model.load_state_dict(torch.load(chkpt_path))\n self.model.eval()\n\n @torch.no_grad()\n def val(self):\n print('VALIDATING:')\n avg_val_loss = AverageMeter()\n avg_val_acc = AverageMeter()\n\n self.model.eval()\n for batch_idx, batch in enumerate(tqdm(self.val_loader)):\n metrics = self.compute_loss(batch)\n avg_val_acc.update(metrics['correct'].cpu().numpy())\n avg_val_loss.update([metrics['loss']])\n return avg_val_loss.get(), avg_val_acc.get()\n\n @torch.no_grad()\n def infer(self):\n self.load_model_for_eval()\n avg_test_loss = AverageMeter()\n avg_test_acc = AverageMeter()\n all_true_labels = []\n all_pred_labels = []\n all_audio_embeddings = []\n all_text_embeddings = []\n\n for batch_idx, batch in enumerate(tqdm(self.test_loader)):\n # Get the model output and update the meters\n output = self.compute_loss(batch)\n avg_test_acc.update(output['correct'].cpu().numpy())\n avg_test_loss.update([output['loss']])\n\n # Store the Predictions\n all_true_labels.append(batch['label'].cpu())\n all_pred_labels.append(output['predicted'].cpu())\n all_audio_embeddings.append(output['model_output']['audio_embed'].cpu())\n all_text_embeddings.append(output['model_output']['text_embed'].cpu())\n\n # Collect the predictions and embeddings for the full set\n all_true_labels = torch.cat(all_true_labels).numpy()\n all_pred_labels = torch.cat(all_pred_labels).numpy()\n all_audio_embeddings = torch.cat(all_audio_embeddings).numpy()\n all_text_embeddings = torch.cat(all_text_embeddings).numpy()\n\n # Save the embeddings and plot the confusion matrix\n np.savez_compressed('embeddings.npz',\n audio=all_audio_embeddings,\n text=all_text_embeddings,\n labels=all_true_labels)\n # cm = confusion_matrix(all_true_labels, all_pred_labels)\n # plot_confusion_matrix(cm, self.test_loader.dataset.labels_list(), normalize=True)\n\n print('Final test acc = {:.4f}, test loss = {:.4f}'.format(avg_test_acc.get(), avg_test_loss.get()))\n return avg_test_loss.get(), avg_test_acc.get()\n" ]
[ [ "torch.nn.CrossEntropyLoss", "torch.optim.lr_scheduler.ReduceLROnPlateau", "torch.load", "torch.cat", "numpy.savez_compressed", "torch.no_grad", "torch.utils.tensorboard.SummaryWriter" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Monster880/pytorch_py
[ "9c5ac5974f48edb5ea3d897a1100a63d488c61d9" ]
[ "load_cifar10.py" ]
[ "import glob\n\nfrom torchvision import transforms\nfrom torch.utils.data import DataLoader, Dataset\nimport os\nfrom PIL import Image\nimport numpy as np\n\nlabel_name = [\n \"airplane\",\n \"automobile\",\n \"bird\",\n \"cat\",\n \"deer\",\n \"dog\",\n \"frog\",\n \"horse\",\n \"ship\",\n \"truck\"\n]\n\nlabel_dict = {}\n\nfor idx, name in enumerate(label_name):\n label_dict[name] = idx\n\ndef default_loader(path):\n return Image.open(path).convert(\"RGB\")\n\n# train_transform = transforms.Compose([\n# transforms.RandomResizedCrop((28 , 28)),\n# transforms.RandomHorizontalFlip(),\n# transforms.RandomVerticalFlip(),\n# transforms.RandomRotation(90),\n# transforms.RandomGrayscale(0.1),\n# transforms.ColorJitter(0.3, 0.3, 0.3, 0.3),\n# transforms.ToTensor()\n# ])\n\ntrain_transform = transforms.Compose([\n transforms.RandomCrop(28),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor()\n])\n\ntest_transform = transforms.Compose([\n transforms.Resize((28 , 28)),\n transforms.ToTensor()\n])\n\nclass MyDataSet(Dataset):\n def __init__(self, im_list, transform=None, loader = default_loader):\n super(MyDataSet, self).__init__()\n imgs = []\n\n for im_item in im_list:\n im_label_name = im_item.split(\"/\")[-2]\n imgs.append([im_item, label_dict[im_label_name]])\n\n self.imgs = imgs\n self.transfrom = transform\n self.loader = loader\n\n\n def __getitem__(self, index):\n im_path, im_label = self.imgs[index]\n im_data = self.loader(im_path)\n\n if self.transfrom is not None:\n im_data = self.transfrom(im_data)\n\n return im_data,im_label\n\n def __len__(self):\n return len(self.imgs)\n\nim_train_list = glob.glob(\"/Users/liding/Documents/pytorch_py/train/*/*.png\")\nim_test_list = glob.glob(\"/Users/liding/Documents/pytorch_py/test/*/*.png\")\n\ntrain_dataset = MyDataSet(im_train_list, transform= train_transform)\ntest_dataset = MyDataSet(im_test_list, transform= transforms.ToTensor())\n\ntrain_data_loader = DataLoader(dataset = train_dataset, batch_size=6, shuffle=True, num_workers=4)\ntest_data_loader = DataLoader(dataset = test_dataset, batch_size=6, shuffle=False, num_workers=4)\n\nprint(\"num_of_train\", len(train_dataset))\nprint(\"num_of_test\", len(test_dataset))" ]
[ [ "torch.utils.data.DataLoader" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
svenvanderburg/EEG_age_prediction
[ "958e8d6445bf277a445608e05d779315dbd9b376", "958e8d6445bf277a445608e05d779315dbd9b376", "958e8d6445bf277a445608e05d779315dbd9b376" ]
[ "scripts/DL_final_Encoder_regressor.py", "scripts/DL_final_InceptionTime_regressor.py", "scripts/ML_train_03.py" ]
[ "#!/usr/bin/env python\n\n# ================ IMPORT LIBRARIES ================ #\nimport sys, os, fnmatch, time\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.model_selection import train_test_split\n\nsys.path.insert(0, os.path.dirname(os.getcwd()))\n\nfrom dataset_generator import DataGenerator\n\nimport tensorflow as tf\nimport tensorflow_addons as tfa\nfrom tensorflow import keras\nfrom tensorflow.keras import layers, Input, Sequential\nfrom tensorflow.keras.layers import Bidirectional, LSTM, Dropout, BatchNormalization, Dense, Conv1D, LeakyReLU, AveragePooling1D, Flatten, Reshape, MaxPooling1D\nfrom tensorflow.keras.optimizers import Adam, Adadelta, SGD\nfrom tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau\nfrom tensorflow.keras.metrics import RootMeanSquaredError, MeanAbsoluteError\n\nn_timesteps = 501\nn_features = 30 \nn_outputs = 1\n\nCOUNT_MODEL = \"FINAL\" # This will be appended to the saved model's name. To make sure to not overwrite models, increase this.\nMAX_QUEUE_SIZE = 5000\nWORKERS = 6\n\ninput_shape = (n_timesteps, n_features)\n\n# Input and output folders\nPATH_DATA_PROCESSED_DL = sys.argv[1]\nPATH_OUTPUT = sys.argv[2]\n\n# ================ INITIAL LOGS ================ #\n\nprint(\"LOGGING: Imported all modules\")\n\n# ================ LOAD PREPROCESSED DATA ================ #\n\n# Step 1: Get all the files in the output folder\nfile_names = os.listdir(PATH_DATA_PROCESSED_DL)\n\n# Step 2: Get the full paths of the files (without extensions)\nfiles = [os.path.splitext(os.path.join(PATH_DATA_PROCESSED_DL, file_name))[0] for file_name in fnmatch.filter(file_names, \"*.zarr\")]\n\n# Step 3: Load all the metadata\nframes = []\n\nfor idx, feature_file in enumerate(files):\n df_metadata = pd.read_csv(feature_file.replace(\"processed_raw_\", \"processed_metadata_\") + \".csv\")\n frames.append(df_metadata)\n\ndf_metadata = pd.concat(frames) \n\n# Step 4: Add missing age information based on the age group the subject is in\ndf_metadata['age_months'].fillna(df_metadata['age_group'], inplace=True)\ndf_metadata['age_days'].fillna(df_metadata['age_group']*30, inplace=True)\ndf_metadata['age_years'].fillna(df_metadata['age_group']/12, inplace=True)\n\n# Step 5: List all the unique subject IDs\nsubject_ids = sorted(list(set(df_metadata[\"code\"].tolist())))\n\n# Step 6: Split the subjects into train, val and test\nIDs_train, IDs_temp = train_test_split(subject_ids, test_size=0.3, random_state=42)\nIDs_test, IDs_val = train_test_split(IDs_temp, test_size=0.5, random_state=42)\n\n# Step 7: Initialize DataGenerators\ntrain_generator_noise = DataGenerator(list_IDs = IDs_train,\n BASE_PATH = PATH_DATA_PROCESSED_DL,\n metadata = df_metadata,\n n_average = 30,\n batch_size = 10,\n gaussian_noise=0.01,\n iter_per_epoch = 30,\n n_timepoints = 501, \n n_channels=30, \n shuffle=True)\n\nval_generator = DataGenerator(list_IDs = IDs_val,\n BASE_PATH = PATH_DATA_PROCESSED_DL,\n metadata = df_metadata,\n n_average = 30,\n batch_size = 10,\n iter_per_epoch = 100,\n n_timepoints = 501,\n n_channels=30,\n shuffle=True)\n\nprint(\"LOGGING: Loaded all data and created generators\")\n\n# ================ Encoder model ================ #\n\ntry:\n def encoder_model():\n \"\"\" Returns the Encoder model from Ismail Fawaz et al. (2019). \"\"\"\n input_layer = keras.layers.Input(input_shape)\n\n # conv block -1\n conv1 = keras.layers.Conv1D(filters=128,kernel_size=5,strides=1,padding='same')(input_layer)\n conv1 = tfa.layers.InstanceNormalization()(conv1)\n conv1 = keras.layers.PReLU(shared_axes=[1])(conv1)\n conv1 = keras.layers.Dropout(rate=0.2)(conv1)\n conv1 = keras.layers.MaxPooling1D(pool_size=2)(conv1)\n # conv block -2\n conv2 = keras.layers.Conv1D(filters=256,kernel_size=11,strides=1,padding='same')(conv1)\n conv2 = tfa.layers.InstanceNormalization()(conv2)\n conv2 = keras.layers.PReLU(shared_axes=[1])(conv2)\n conv2 = keras.layers.Dropout(rate=0.2)(conv2)\n conv2 = keras.layers.MaxPooling1D(pool_size=2)(conv2)\n # conv block -3\n conv3 = keras.layers.Conv1D(filters=512,kernel_size=21,strides=1,padding='same')(conv2)\n conv3 = tfa.layers.InstanceNormalization()(conv3)\n conv3 = keras.layers.PReLU(shared_axes=[1])(conv3)\n conv3 = keras.layers.Dropout(rate=0.2)(conv3)\n # split for attention\n attention_data = keras.layers.Lambda(lambda x: x[:,:,:256])(conv3)\n attention_softmax = keras.layers.Lambda(lambda x: x[:,:,256:])(conv3)\n # attention mechanism\n attention_softmax = keras.layers.Softmax()(attention_softmax)\n multiply_layer = keras.layers.Multiply()([attention_softmax,attention_data])\n # last layer\n dense_layer = keras.layers.Dense(units=256,activation='sigmoid')(multiply_layer)\n dense_layer = tfa.layers.InstanceNormalization()(dense_layer)\n # output layer\n flatten_layer = keras.layers.Flatten()(dense_layer)\n output_layer = keras.layers.Dense(1)(flatten_layer)\n\n model = keras.models.Model(inputs=input_layer, outputs=output_layer)\n\n return model\n\n model = encoder_model()\n\n optimizer = Adam(learning_rate=0.00001) \n \n model.compile(loss='mean_squared_error', \n optimizer=optimizer, \n metrics=[RootMeanSquaredError(), MeanAbsoluteError()])\n\n output_filename = f'Encoder_regressor_{COUNT_MODEL}'\n output_file = os.path.join(PATH_OUTPUT, output_filename)\n\n checkpointer = ModelCheckpoint(filepath = output_file + \".hdf5\", monitor='val_loss', verbose=1, save_best_only=True)\n\n epochs = 500\n\n print(\"LOGGING: Starting Encoder model training\")\n # fit network\n history = model.fit(x=train_generator_noise,\n validation_data=val_generator,\n epochs=epochs, \n verbose=2, \n max_queue_size=MAX_QUEUE_SIZE,\n workers=WORKERS, \n callbacks=[checkpointer])\n print(\"LOGGING: Finished Encoder model training\")\nexcept Exception as e:\n print(\"LOGGING: Failed Encoder model training:\")\n print(e)\n pass\n", "#!/usr/bin/env python\n\n# ================ IMPORT LIBRARIES ================ #\nimport sys, os, fnmatch, time\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.model_selection import train_test_split\n\nsys.path.insert(0, os.path.dirname(os.getcwd()))\n\nfrom dataset_generator import DataGenerator\n\nimport tensorflow as tf\nimport tensorflow_addons as tfa\nfrom tensorflow import keras\nfrom tensorflow.keras import layers, Input, Sequential\nfrom tensorflow.keras.layers import Bidirectional, LSTM, Dropout, BatchNormalization, Dense, Conv1D, LeakyReLU, AveragePooling1D, Flatten, Reshape, MaxPooling1D\nfrom tensorflow.keras.optimizers import Adam, Adadelta, SGD\nfrom tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau\nfrom tensorflow.keras.metrics import RootMeanSquaredError, MeanAbsoluteError\n\nn_timesteps = 501\nn_features = 30 \nn_outputs = 1\n\nCOUNT_MODEL = \"FINAL\" # This will be appended to the saved model's name. To make sure to not overwrite models, increase this.\nMAX_QUEUE_SIZE = 5000\nWORKERS = 6\n\ninput_shape = (n_timesteps, n_features)\n\n# Input and output folders\nPATH_DATA_PROCESSED_DL = sys.argv[1]\nPATH_OUTPUT = sys.argv[2]\n\n# ================ INITIAL LOGS ================ #\n\nprint(\"LOGGING: Imported all modules\")\n\n# ================ LOAD PREPROCESSED DATA ================ #\n\n# Step 1: Get all the files in the output folder\nfile_names = os.listdir(PATH_DATA_PROCESSED_DL)\n\n# Step 2: Get the full paths of the files (without extensions)\nfiles = [os.path.splitext(os.path.join(PATH_DATA_PROCESSED_DL, file_name))[0] for file_name in fnmatch.filter(file_names, \"*.zarr\")]\n\n# Step 3: Load all the metadata\nframes = []\n\nfor idx, feature_file in enumerate(files):\n df_metadata = pd.read_csv(feature_file.replace(\"processed_raw_\", \"processed_metadata_\") + \".csv\")\n frames.append(df_metadata)\n\ndf_metadata = pd.concat(frames) \n\n# Step 4: Add missing age information based on the age group the subject is in\ndf_metadata['age_months'].fillna(df_metadata['age_group'], inplace=True)\ndf_metadata['age_days'].fillna(df_metadata['age_group']*30, inplace=True)\ndf_metadata['age_years'].fillna(df_metadata['age_group']/12, inplace=True)\n\n# Step 5: List all the unique subject IDs\nsubject_ids = sorted(list(set(df_metadata[\"code\"].tolist())))\n\n# Step 6: Split the subjects into train, val and test\nIDs_train, IDs_temp = train_test_split(subject_ids, test_size=0.3, random_state=42)\nIDs_test, IDs_val = train_test_split(IDs_temp, test_size=0.5, random_state=42)\n\n# Step 7: Initialize DataGenerators\ntrain_generator_noise = DataGenerator(list_IDs = IDs_train,\n BASE_PATH = PATH_DATA_PROCESSED_DL,\n metadata = df_metadata,\n n_average = 30,\n batch_size = 10,\n gaussian_noise=0.01,\n iter_per_epoch = 30,\n n_timepoints = 501, \n n_channels=30, \n shuffle=True)\n\nval_generator = DataGenerator(list_IDs = IDs_val,\n BASE_PATH = PATH_DATA_PROCESSED_DL,\n metadata = df_metadata,\n n_average = 30,\n batch_size = 10,\n iter_per_epoch = 100,\n n_timepoints = 501,\n n_channels=30,\n shuffle=True)\n\nprint(\"LOGGING: Loaded all data and created generators\")\n\n# ================ InceptionTime model ================ #\n\ntry:\n class Regressor_Inception:\n\n def __init__(self, output_directory, input_shape, verbose=False, build=True, batch_size=64,\n nb_filters=32, use_residual=True, use_bottleneck=True, depth=6, kernel_size=41, nb_epochs=1500):\n\n self.output_directory = output_directory\n\n self.nb_filters = nb_filters\n self.use_residual = use_residual\n self.use_bottleneck = use_bottleneck\n self.depth = depth\n self.kernel_size = kernel_size - 1\n self.callbacks = None\n self.batch_size = batch_size\n self.bottleneck_size = 32\n self.nb_epochs = nb_epochs\n\n if build == True:\n self.model = self.build_model(input_shape)\n if (verbose == True):\n self.model.summary()\n self.verbose = verbose\n self.model.save_weights(self.output_directory + '/inception_model_init.hdf5')\n\n def _inception_module(self, input_tensor, stride=1, activation='linear'):\n\n if self.use_bottleneck and int(input_tensor.shape[-1]) > 1:\n input_inception = tf.keras.layers.Conv1D(filters=self.bottleneck_size, kernel_size=1,\n padding='same', activation=activation, use_bias=False)(input_tensor)\n else:\n input_inception = input_tensor\n\n # kernel_size_s = [3, 5, 8, 11, 17]\n kernel_size_s = [self.kernel_size // (2 ** i) for i in range(3)]\n\n conv_list = []\n\n for i in range(len(kernel_size_s)):\n conv_list.append(tf.keras.layers.Conv1D(filters=self.nb_filters, kernel_size=kernel_size_s[i],\n strides=stride, padding='same', activation=activation, use_bias=False)(\n input_inception))\n\n max_pool_1 = tf.keras.layers.MaxPool1D(pool_size=3, strides=stride, padding='same')(input_tensor)\n\n conv_6 = tf.keras.layers.Conv1D(filters=self.nb_filters, kernel_size=1,\n padding='same', activation=activation, use_bias=False)(max_pool_1)\n\n conv_list.append(conv_6)\n\n x = tf.keras.layers.Concatenate(axis=2)(conv_list)\n x = tf.keras.layers.BatchNormalization()(x)\n x = tf.keras.layers.Activation(activation='relu')(x)\n return x\n\n def _shortcut_layer(self, input_tensor, out_tensor):\n shortcut_y = tf.keras.layers.Conv1D(filters=int(out_tensor.shape[-1]), kernel_size=1,\n padding='same', use_bias=False)(input_tensor)\n shortcut_y = tf.keras.layers.BatchNormalization()(shortcut_y)\n\n x = tf.keras.layers.Add()([shortcut_y, out_tensor])\n x = tf.keras.layers.Activation('relu')(x)\n return x\n\n def build_model(self, input_shape):\n input_layer = tf.keras.layers.Input(input_shape)\n\n x = input_layer\n input_res = input_layer\n\n for d in range(self.depth):\n\n x = self._inception_module(x)\n\n if self.use_residual and d % 3 == 2:\n x = self._shortcut_layer(input_res, x)\n input_res = x\n\n pooling_layer = tf.keras.layers.AveragePooling1D(pool_size=50)(x)\n flat_layer = tf.keras.layers.Flatten()(pooling_layer)\n dense_layer = tf.keras.layers.Dense(128, activation='relu')(flat_layer)\n output_layer = tf.keras.layers.Dense(1)(dense_layer)\n\n model = tf.keras.models.Model(inputs=input_layer, outputs=output_layer)\n\n return model\n\n model = Regressor_Inception(PATH_OUTPUT, input_shape, verbose=False).model\n\n optimizer = Adam(learning_rate=0.01) \n \n model.compile(loss='mean_squared_error', \n optimizer=optimizer, \n metrics=[RootMeanSquaredError(), MeanAbsoluteError()])\n\n output_filename = f'Inception_regressor_{COUNT_MODEL}'\n output_file = os.path.join(PATH_OUTPUT, output_filename)\n\n checkpointer = ModelCheckpoint(filepath = output_file + \".hdf5\", monitor='val_loss', verbose=1, save_best_only=True)\n earlystopper = EarlyStopping(monitor='val_loss', patience=100, verbose=1)\n reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=20, min_lr=0.0001, verbose=1)\n\n epochs = 1500\n\n # fit network\n print(\"LOGGING: Starting InceptionTime model training\")\n history = model.fit(x=train_generator_noise,\n validation_data=val_generator,\n epochs=epochs,\n verbose=2, \n max_queue_size=MAX_QUEUE_SIZE,\n workers=WORKERS, \n callbacks = [checkpointer, earlystopper, reduce_lr])\n print(\"LOGGING: Finished InceptionTime model training\")\nexcept Exception as e:\n print(\"LOGGING: Failed InceptionTime model training:\")\n print(e)\n pass", "import sys, os, fnmatch, csv\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.model_selection import train_test_split\n\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.utils import shuffle\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras.layers import Dropout, Dense, BatchNormalization\nfrom tensorflow.keras.optimizers import Adam, Adadelta\nfrom tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau\nfrom tensorflow.keras.metrics import RootMeanSquaredError, MeanAbsoluteError\n\nsys.path.insert(0, os.path.dirname(os.getcwd()))\n\n# Input and output folders\nPATH_DATA_PROCESSED_ML= sys.argv[1]\nPATH_OUTPUT = sys.argv[2]\n\nMAX_QUEUE_SIZE = 5000\nWORKERS = 6\n\n# Step 1: Get all the files in the output folder\nfile_names = os.listdir(PATH_DATA_PROCESSED_ML)\n\n# Step 2: Get the full paths of the files (without extensions)\nfiles = [os.path.splitext(os.path.join(PATH_DATA_PROCESSED_ML, file_name))[0] for file_name in fnmatch.filter(file_names, \"*.h5\")]\n\n# Step 3: Load the features\nframes = []\n\nfor idx, feature_file in enumerate(files):\n df_features = pd.read_hdf(feature_file + \".h5\")\n df_metadata = pd.read_csv(feature_file.replace(\"extracted_features_\", \"processed_data_\") + \".csv\")\n \n # Step 4: Assign labels\n df_features['label'] = df_metadata['age_months'][0]\n \n # Step 5: Assign subject code\n df_features['code'] = df_metadata['code'][0]\n frames.append(df_features)\n\ndf = pd.concat(frames) \n\n# Step 6: List all the unique subject IDs\nsubject_ids = sorted(list(set(df[\"code\"].tolist())))\n\nIDs_train, IDs_temp = train_test_split(subject_ids, test_size=0.3, random_state=42)\nIDs_test, IDs_val = train_test_split(IDs_temp, test_size=0.5, random_state=42)\n\n# Step 7: Split the DataFrames into train, validation and test\ndf_train = df[df['code'].isin(IDs_train)]\ndf_val = df[df['code'].isin(IDs_val)]\ndf_test = df[df['code'].isin(IDs_test)]\n\nfeature_names = df.columns.values\n\nX_train = df_train.drop(['label', 'code'], axis=1).reset_index(drop=True)\ny_train = df_train['label'].reset_index(drop=True)\ncodes_train = df_train['code'].reset_index(drop=True)\n\nX_val = df_val.drop(['label', 'code'], axis=1).reset_index(drop=True)\ny_val = df_val['label'].reset_index(drop=True)\ncodes_val = df_val['code'].reset_index(drop=True)\n\nX_test = df_test.drop(['label', 'code'], axis=1).reset_index(drop=True)\ny_test = df_test['label'].reset_index(drop=True)\ncodes_test = df_test['code'].reset_index(drop=True)\n\nscaler = StandardScaler()\n\n# MARK: reducing from 64 bit float to 32 bit float, to reduce memory usage\nX_train = pd.DataFrame(scaler.fit_transform(X_train)).astype('float32')\nX_val = pd.DataFrame(scaler.fit_transform(X_val)).astype('float32')\nX_test = pd.DataFrame(scaler.fit_transform(X_test)).astype('float32')\n\ndel(file_names, files, df, frames, df_features, df_metadata, df_train, df_test, df_val, IDs_train, IDs_val, IDs_test, IDs_temp)\n\ninput_shape=(450, )\n\ntry:\n def fully_connected_model():\n model = keras.Sequential()\n \n model.add(Dense(300, activation='tanh', input_shape=input_shape))\n model.add(BatchNormalization())\n model.add(Dropout(0.3))\n \n model.add(Dense(200, activation='tanh'))\n model.add(BatchNormalization())\n\n model.add(Dense(1))\n \n return model\n \n model = fully_connected_model()\n\n optimizer = Adadelta(learning_rate=0.01) \n model.compile(loss='mean_squared_error', \n optimizer=optimizer, \n metrics=[RootMeanSquaredError(), MeanAbsoluteError()])\n\n output_filename = 'FC_regressor_03'\n output_file = os.path.join(PATH_OUTPUT, output_filename)\n\n checkpointer = ModelCheckpoint(filepath = output_file + \".hdf5\", monitor='val_loss', verbose=1, save_best_only=True)\n earlystopper = EarlyStopping(monitor='val_loss', patience=1000, verbose=1)\n reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=50, min_lr=0.0001, verbose=1)\n\n epochs = 5000\n\n print(\"LOGGING: Starting FC_regressor_03 training\")\n\n # fit network\n history = model.fit(x=X_train,\n y=y_train,\n validation_data=(X_val, y_val),\n epochs=epochs,\n verbose=2,\n max_queue_size=MAX_QUEUE_SIZE,\n workers=WORKERS, \n callbacks = [checkpointer, earlystopper, reduce_lr])\nexcept Exception as e:\n print(\"LOGGING: Failed FC_regressor_03 training:\")\n print(e)\n pass\n" ]
[ [ "tensorflow.keras.callbacks.ModelCheckpoint", "pandas.concat", "tensorflow.keras.layers.Lambda", "tensorflow.keras.models.Model", "tensorflow.keras.layers.PReLU", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Conv1D", "sklearn.model_selection.train_test_split", "tensorflow.keras.layers.MaxPooling1D", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.layers.Multiply", "tensorflow.keras.metrics.MeanAbsoluteError", "tensorflow.keras.metrics.RootMeanSquaredError", "tensorflow.keras.layers.Dropout", "tensorflow.keras.layers.Flatten", "tensorflow.keras.layers.Softmax", "tensorflow.keras.layers.Input" ], [ "tensorflow.keras.layers.Concatenate", "tensorflow.keras.layers.Add", "tensorflow.keras.callbacks.EarlyStopping", "tensorflow.keras.layers.Flatten", "tensorflow.keras.callbacks.ModelCheckpoint", "pandas.concat", "tensorflow.keras.layers.AveragePooling1D", "tensorflow.keras.models.Model", "tensorflow.keras.layers.Dense", "tensorflow.keras.callbacks.ReduceLROnPlateau", "sklearn.model_selection.train_test_split", "tensorflow.keras.layers.Activation", "tensorflow.keras.layers.MaxPool1D", "tensorflow.keras.layers.Conv1D", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.layers.BatchNormalization", "tensorflow.keras.metrics.MeanAbsoluteError", "tensorflow.keras.metrics.RootMeanSquaredError", "tensorflow.keras.layers.Input" ], [ "tensorflow.keras.callbacks.ModelCheckpoint", "pandas.concat", "pandas.read_hdf", "tensorflow.keras.layers.Dense", "tensorflow.keras.callbacks.ReduceLROnPlateau", "tensorflow.keras.Sequential", "sklearn.model_selection.train_test_split", "tensorflow.keras.optimizers.Adadelta", "tensorflow.keras.layers.BatchNormalization", "tensorflow.keras.metrics.MeanAbsoluteError", "tensorflow.keras.metrics.RootMeanSquaredError", "tensorflow.keras.layers.Dropout", "sklearn.preprocessing.StandardScaler", "tensorflow.keras.callbacks.EarlyStopping" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [ "2.7", "2.2", "2.3", "2.4", "2.5", "2.6" ] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] } ]
qw85639229/hardest
[ "ef86536dbbe1089248e34afbbb7bb513f97f58f1", "ef86536dbbe1089248e34afbbb7bb513f97f58f1", "ef86536dbbe1089248e34afbbb7bb513f97f58f1" ]
[ "torchreid/data/datasets/video/mars.py", "torchreid/models/inceptionresnetv2.py", "torchreid/models/squeezenet.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport sys\nimport os\nimport os.path as osp\nfrom scipy.io import loadmat\nimport warnings\n\nfrom torchreid.data.datasets import VideoDataset\n\n\nclass Mars(VideoDataset):\n \"\"\"MARS.\n\n Reference:\n Zheng et al. MARS: A Video Benchmark for Large-Scale Person Re-identification. ECCV 2016.\n\n URL: `<http://www.liangzheng.com.cn/Project/project_mars.html>`_\n \n Dataset statistics:\n - identities: 1261.\n - tracklets: 8298 (train) + 1980 (query) + 9330 (gallery).\n - cameras: 6.\n \"\"\"\n dataset_dir = 'mars'\n dataset_url = None\n\n def __init__(self, root='', **kwargs):\n self.root = osp.abspath(osp.expanduser(root))\n self.dataset_dir = osp.join(self.root, self.dataset_dir)\n self.download_dataset(self.dataset_dir, self.dataset_url)\n\n self.train_name_path = osp.join(self.dataset_dir, 'info/train_name.txt')\n self.test_name_path = osp.join(self.dataset_dir, 'info/test_name.txt')\n self.track_train_info_path = osp.join(self.dataset_dir, 'info/tracks_train_info.mat')\n self.track_test_info_path = osp.join(self.dataset_dir, 'info/tracks_test_info.mat')\n self.query_IDX_path = osp.join(self.dataset_dir, 'info/query_IDX.mat')\n\n required_files = [\n self.dataset_dir,\n self.train_name_path,\n self.test_name_path,\n self.track_train_info_path,\n self.track_test_info_path,\n self.query_IDX_path\n ]\n self.check_before_run(required_files)\n\n train_names = self.get_names(self.train_name_path)\n test_names = self.get_names(self.test_name_path)\n track_train = loadmat(self.track_train_info_path)['track_train_info'] # numpy.ndarray (8298, 4)\n track_test = loadmat(self.track_test_info_path)['track_test_info'] # numpy.ndarray (12180, 4)\n query_IDX = loadmat(self.query_IDX_path)['query_IDX'].squeeze() # numpy.ndarray (1980,)\n query_IDX -= 1 # index from 0\n track_query = track_test[query_IDX,:]\n gallery_IDX = [i for i in range(track_test.shape[0]) if i not in query_IDX]\n track_gallery = track_test[gallery_IDX,:]\n\n train = self.process_data(train_names, track_train, home_dir='bbox_train', relabel=True)\n query = self.process_data(test_names, track_query, home_dir='bbox_test', relabel=False)\n gallery = self.process_data(test_names, track_gallery, home_dir='bbox_test', relabel=False)\n\n super(Mars, self).__init__(train, query, gallery, **kwargs)\n\n def get_names(self, fpath):\n names = []\n with open(fpath, 'r') as f:\n for line in f:\n new_line = line.rstrip()\n names.append(new_line)\n return names\n\n def process_data(self, names, meta_data, home_dir=None, relabel=False, min_seq_len=0):\n assert home_dir in ['bbox_train', 'bbox_test']\n num_tracklets = meta_data.shape[0]\n pid_list = list(set(meta_data[:,2].tolist()))\n num_pids = len(pid_list)\n\n if relabel: pid2label = {pid:label for label, pid in enumerate(pid_list)}\n tracklets = []\n\n for tracklet_idx in range(num_tracklets):\n data = meta_data[tracklet_idx,...]\n start_index, end_index, pid, camid = data\n if pid == -1:\n continue # junk images are just ignored\n assert 1 <= camid <= 6\n if relabel: pid = pid2label[pid]\n camid -= 1 # index starts from 0\n img_names = names[start_index - 1:end_index]\n\n # make sure image names correspond to the same person\n pnames = [img_name[:4] for img_name in img_names]\n assert len(set(pnames)) == 1, 'Error: a single tracklet contains different person images'\n\n # make sure all images are captured under the same camera\n camnames = [img_name[5] for img_name in img_names]\n assert len(set(camnames)) == 1, 'Error: images are captured under different cameras!'\n\n # append image names with directory information\n img_paths = [osp.join(self.dataset_dir, home_dir, img_name[:4], img_name) for img_name in img_names]\n if len(img_paths) >= min_seq_len:\n img_paths = tuple(img_paths)\n tracklets.append((img_paths, pid, camid))\n\n return tracklets\n\n def combine_all(self):\n warnings.warn('Some query IDs do not appear in gallery. Therefore, combineall '\n 'does not make any difference to Mars')", "from __future__ import absolute_import\nfrom __future__ import division\n\n__all__ = ['inceptionresnetv2']\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\nimport torch.utils.model_zoo as model_zoo\nimport os\nimport sys\n\n\n\"\"\"\nCode imported from https://github.com/Cadene/pretrained-models.pytorch\n\"\"\"\n\n\npretrained_settings = {\n 'inceptionresnetv2': {\n 'imagenet': {\n 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/inceptionresnetv2-520b38e4.pth',\n 'input_space': 'RGB',\n 'input_size': [3, 299, 299],\n 'input_range': [0, 1],\n 'mean': [0.5, 0.5, 0.5],\n 'std': [0.5, 0.5, 0.5],\n 'num_classes': 1000\n },\n 'imagenet+background': {\n 'url': 'http://data.lip6.fr/cadene/pretrainedmodels/inceptionresnetv2-520b38e4.pth',\n 'input_space': 'RGB',\n 'input_size': [3, 299, 299],\n 'input_range': [0, 1],\n 'mean': [0.5, 0.5, 0.5],\n 'std': [0.5, 0.5, 0.5],\n 'num_classes': 1001\n }\n }\n}\n\n\nclass BasicConv2d(nn.Module):\n\n def __init__(self, in_planes, out_planes, kernel_size, stride, padding=0):\n super(BasicConv2d, self).__init__()\n self.conv = nn.Conv2d(in_planes, out_planes,\n kernel_size=kernel_size, stride=stride,\n padding=padding, bias=False) # verify bias false\n self.bn = nn.BatchNorm2d(out_planes,\n eps=0.001, # value found in tensorflow\n momentum=0.1, # default pytorch value\n affine=True)\n self.relu = nn.ReLU(inplace=False)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n x = self.relu(x)\n return x\n\n\nclass Mixed_5b(nn.Module):\n\n def __init__(self):\n super(Mixed_5b, self).__init__()\n\n self.branch0 = BasicConv2d(192, 96, kernel_size=1, stride=1)\n\n self.branch1 = nn.Sequential(\n BasicConv2d(192, 48, kernel_size=1, stride=1),\n BasicConv2d(48, 64, kernel_size=5, stride=1, padding=2)\n ) \n\n self.branch2 = nn.Sequential(\n BasicConv2d(192, 64, kernel_size=1, stride=1),\n BasicConv2d(64, 96, kernel_size=3, stride=1, padding=1),\n BasicConv2d(96, 96, kernel_size=3, stride=1, padding=1)\n )\n\n self.branch3 = nn.Sequential(\n nn.AvgPool2d(3, stride=1, padding=1, count_include_pad=False),\n BasicConv2d(192, 64, kernel_size=1, stride=1)\n )\n\n def forward(self, x):\n x0 = self.branch0(x)\n x1 = self.branch1(x)\n x2 = self.branch2(x)\n x3 = self.branch3(x)\n out = torch.cat((x0, x1, x2, x3), 1)\n return out\n\n\nclass Block35(nn.Module):\n\n def __init__(self, scale=1.0):\n super(Block35, self).__init__()\n\n self.scale = scale\n\n self.branch0 = BasicConv2d(320, 32, kernel_size=1, stride=1)\n\n self.branch1 = nn.Sequential(\n BasicConv2d(320, 32, kernel_size=1, stride=1),\n BasicConv2d(32, 32, kernel_size=3, stride=1, padding=1)\n )\n\n self.branch2 = nn.Sequential(\n BasicConv2d(320, 32, kernel_size=1, stride=1),\n BasicConv2d(32, 48, kernel_size=3, stride=1, padding=1),\n BasicConv2d(48, 64, kernel_size=3, stride=1, padding=1)\n )\n\n self.conv2d = nn.Conv2d(128, 320, kernel_size=1, stride=1)\n self.relu = nn.ReLU(inplace=False)\n\n def forward(self, x):\n x0 = self.branch0(x)\n x1 = self.branch1(x)\n x2 = self.branch2(x)\n out = torch.cat((x0, x1, x2), 1)\n out = self.conv2d(out)\n out = out * self.scale + x\n out = self.relu(out)\n return out\n\n\nclass Mixed_6a(nn.Module):\n\n def __init__(self):\n super(Mixed_6a, self).__init__()\n \n self.branch0 = BasicConv2d(320, 384, kernel_size=3, stride=2)\n\n self.branch1 = nn.Sequential(\n BasicConv2d(320, 256, kernel_size=1, stride=1),\n BasicConv2d(256, 256, kernel_size=3, stride=1, padding=1),\n BasicConv2d(256, 384, kernel_size=3, stride=2)\n )\n\n self.branch2 = nn.MaxPool2d(3, stride=2)\n\n def forward(self, x):\n x0 = self.branch0(x)\n x1 = self.branch1(x)\n x2 = self.branch2(x)\n out = torch.cat((x0, x1, x2), 1)\n return out\n\n\nclass Block17(nn.Module):\n\n def __init__(self, scale=1.0):\n super(Block17, self).__init__()\n\n self.scale = scale\n\n self.branch0 = BasicConv2d(1088, 192, kernel_size=1, stride=1)\n\n self.branch1 = nn.Sequential(\n BasicConv2d(1088, 128, kernel_size=1, stride=1),\n BasicConv2d(128, 160, kernel_size=(1,7), stride=1, padding=(0,3)),\n BasicConv2d(160, 192, kernel_size=(7,1), stride=1, padding=(3,0))\n )\n\n self.conv2d = nn.Conv2d(384, 1088, kernel_size=1, stride=1)\n self.relu = nn.ReLU(inplace=False)\n\n def forward(self, x):\n x0 = self.branch0(x)\n x1 = self.branch1(x)\n out = torch.cat((x0, x1), 1)\n out = self.conv2d(out)\n out = out * self.scale + x\n out = self.relu(out)\n return out\n\n\nclass Mixed_7a(nn.Module):\n\n def __init__(self):\n super(Mixed_7a, self).__init__()\n \n self.branch0 = nn.Sequential(\n BasicConv2d(1088, 256, kernel_size=1, stride=1),\n BasicConv2d(256, 384, kernel_size=3, stride=2)\n )\n\n self.branch1 = nn.Sequential(\n BasicConv2d(1088, 256, kernel_size=1, stride=1),\n BasicConv2d(256, 288, kernel_size=3, stride=2)\n )\n\n self.branch2 = nn.Sequential(\n BasicConv2d(1088, 256, kernel_size=1, stride=1),\n BasicConv2d(256, 288, kernel_size=3, stride=1, padding=1),\n BasicConv2d(288, 320, kernel_size=3, stride=2)\n )\n\n self.branch3 = nn.MaxPool2d(3, stride=2)\n\n def forward(self, x):\n x0 = self.branch0(x)\n x1 = self.branch1(x)\n x2 = self.branch2(x)\n x3 = self.branch3(x)\n out = torch.cat((x0, x1, x2, x3), 1)\n return out\n\n\nclass Block8(nn.Module):\n\n def __init__(self, scale=1.0, noReLU=False):\n super(Block8, self).__init__()\n\n self.scale = scale\n self.noReLU = noReLU\n\n self.branch0 = BasicConv2d(2080, 192, kernel_size=1, stride=1)\n\n self.branch1 = nn.Sequential(\n BasicConv2d(2080, 192, kernel_size=1, stride=1),\n BasicConv2d(192, 224, kernel_size=(1,3), stride=1, padding=(0,1)),\n BasicConv2d(224, 256, kernel_size=(3,1), stride=1, padding=(1,0))\n )\n\n self.conv2d = nn.Conv2d(448, 2080, kernel_size=1, stride=1)\n if not self.noReLU:\n self.relu = nn.ReLU(inplace=False)\n\n def forward(self, x):\n x0 = self.branch0(x)\n x1 = self.branch1(x)\n out = torch.cat((x0, x1), 1)\n out = self.conv2d(out)\n out = out * self.scale + x\n if not self.noReLU:\n out = self.relu(out)\n return out\n\n\ndef inceptionresnetv2(num_classes=1000, pretrained='imagenet'):\n r\"\"\"InceptionResNetV2 model architecture from the\n `\"InceptionV4, Inception-ResNet...\" <https://arxiv.org/abs/1602.07261>`_ paper.\n \"\"\"\n if pretrained:\n settings = pretrained_settings['inceptionresnetv2'][pretrained]\n assert num_classes == settings['num_classes'], \\\n 'num_classes should be {}, but is {}'.format(settings['num_classes'], num_classes)\n\n # both 'imagenet'&'imagenet+background' are loaded from same parameters\n model = InceptionResNetV2(num_classes=1001)\n model.load_state_dict(model_zoo.load_url(settings['url']))\n \n if pretrained == 'imagenet':\n new_last_linear = nn.Linear(1536, 1000)\n new_last_linear.weight.data = model.last_linear.weight.data[1:]\n new_last_linear.bias.data = model.last_linear.bias.data[1:]\n model.last_linear = new_last_linear\n \n model.input_space = settings['input_space']\n model.input_size = settings['input_size']\n model.input_range = settings['input_range']\n \n model.mean = settings['mean']\n model.std = settings['std']\n else:\n model = InceptionResNetV2(num_classes=num_classes)\n return model\n\n\n##################### Model Definition #########################\n\n\nclass InceptionResNetV2(nn.Module):\n \"\"\"Inception-ResNet-V2.\n\n Reference:\n Szegedy et al. Inception-v4, Inception-ResNet and the Impact of Residual\n Connections on Learning. AAAI 2017.\n\n Public keys:\n - ``inceptionresnetv2``: Inception-ResNet-V2.\n \"\"\"\n \n def __init__(self, num_classes, loss='softmax', **kwargs):\n super(InceptionResNetV2, self).__init__()\n self.loss = loss\n \n # Modules\n self.conv2d_1a = BasicConv2d(3, 32, kernel_size=3, stride=2)\n self.conv2d_2a = BasicConv2d(32, 32, kernel_size=3, stride=1)\n self.conv2d_2b = BasicConv2d(32, 64, kernel_size=3, stride=1, padding=1)\n self.maxpool_3a = nn.MaxPool2d(3, stride=2)\n self.conv2d_3b = BasicConv2d(64, 80, kernel_size=1, stride=1)\n self.conv2d_4a = BasicConv2d(80, 192, kernel_size=3, stride=1)\n self.maxpool_5a = nn.MaxPool2d(3, stride=2)\n self.mixed_5b = Mixed_5b()\n self.repeat = nn.Sequential(\n Block35(scale=0.17),\n Block35(scale=0.17),\n Block35(scale=0.17),\n Block35(scale=0.17),\n Block35(scale=0.17),\n Block35(scale=0.17),\n Block35(scale=0.17),\n Block35(scale=0.17),\n Block35(scale=0.17),\n Block35(scale=0.17)\n )\n self.mixed_6a = Mixed_6a()\n self.repeat_1 = nn.Sequential(\n Block17(scale=0.10),\n Block17(scale=0.10),\n Block17(scale=0.10),\n Block17(scale=0.10),\n Block17(scale=0.10),\n Block17(scale=0.10),\n Block17(scale=0.10),\n Block17(scale=0.10),\n Block17(scale=0.10),\n Block17(scale=0.10),\n Block17(scale=0.10),\n Block17(scale=0.10),\n Block17(scale=0.10),\n Block17(scale=0.10),\n Block17(scale=0.10),\n Block17(scale=0.10),\n Block17(scale=0.10),\n Block17(scale=0.10),\n Block17(scale=0.10),\n Block17(scale=0.10)\n )\n self.mixed_7a = Mixed_7a()\n self.repeat_2 = nn.Sequential(\n Block8(scale=0.20),\n Block8(scale=0.20),\n Block8(scale=0.20),\n Block8(scale=0.20),\n Block8(scale=0.20),\n Block8(scale=0.20),\n Block8(scale=0.20),\n Block8(scale=0.20),\n Block8(scale=0.20)\n )\n \n self.block8 = Block8(noReLU=True)\n self.conv2d_7b = BasicConv2d(2080, 1536, kernel_size=1, stride=1)\n self.global_avgpool = nn.AdaptiveAvgPool2d(1)\n self.classifier = nn.Linear(1536, num_classes)\n\n def load_imagenet_weights(self):\n settings = pretrained_settings['inceptionresnetv2']['imagenet']\n pretrain_dict = model_zoo.load_url(settings['url'])\n model_dict = self.state_dict()\n pretrain_dict = {k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size()}\n model_dict.update(pretrain_dict)\n self.load_state_dict(model_dict)\n\n def featuremaps(self, x):\n x = self.conv2d_1a(x)\n x = self.conv2d_2a(x)\n x = self.conv2d_2b(x)\n x = self.maxpool_3a(x)\n x = self.conv2d_3b(x)\n x = self.conv2d_4a(x)\n x = self.maxpool_5a(x)\n x = self.mixed_5b(x)\n x = self.repeat(x)\n x = self.mixed_6a(x)\n x = self.repeat_1(x)\n x = self.mixed_7a(x)\n x = self.repeat_2(x)\n x = self.block8(x)\n x = self.conv2d_7b(x)\n return x\n\n def forward(self, x):\n f = self.featuremaps(x)\n v = self.global_avgpool(f)\n v = v.view(v.size(0), -1)\n\n if not self.training:\n return v\n\n y = self.classifier(v)\n\n if self.loss == 'softmax':\n return y\n elif self.loss == 'triplet':\n return y, v\n else:\n raise KeyError('Unsupported loss: {}'.format(self.loss))\n\n\ndef inceptionresnetv2(num_classes, loss='softmax', pretrained=True, **kwargs):\n model = InceptionResNetV2(\n num_classes=num_classes,\n loss=loss,\n **kwargs\n )\n if pretrained:\n model.load_imagenet_weights()\n return model", "\"\"\"\nCode source: https://github.com/pytorch/vision\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\n\n__all__ = [\n 'squeezenet1_0',\n 'squeezenet1_1',\n 'squeezenet1_0_fc512'\n]\n\nfrom collections import OrderedDict\nimport math\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils import model_zoo\nfrom torch.nn import functional as F\nimport torch.nn.init as init\nimport torchvision\nimport torch.utils.model_zoo as model_zoo\n\n\nmodel_urls = {\n 'squeezenet1_0': 'https://download.pytorch.org/models/squeezenet1_0-a815701f.pth',\n 'squeezenet1_1': 'https://download.pytorch.org/models/squeezenet1_1-f364aa15.pth',\n}\n\n\nclass Fire(nn.Module):\n\n def __init__(self, inplanes, squeeze_planes,\n expand1x1_planes, expand3x3_planes):\n super(Fire, self).__init__()\n self.inplanes = inplanes\n self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1)\n self.squeeze_activation = nn.ReLU(inplace=True)\n self.expand1x1 = nn.Conv2d(squeeze_planes, expand1x1_planes,\n kernel_size=1)\n self.expand1x1_activation = nn.ReLU(inplace=True)\n self.expand3x3 = nn.Conv2d(squeeze_planes, expand3x3_planes,\n kernel_size=3, padding=1)\n self.expand3x3_activation = nn.ReLU(inplace=True)\n\n def forward(self, x):\n x = self.squeeze_activation(self.squeeze(x))\n return torch.cat([\n self.expand1x1_activation(self.expand1x1(x)),\n self.expand3x3_activation(self.expand3x3(x))\n ], 1)\n\n\nclass SqueezeNet(nn.Module):\n \"\"\"SqueezeNet.\n\n Reference:\n Iandola et al. SqueezeNet: AlexNet-level accuracy with 50x fewer parameters\n and< 0.5 MB model size. arXiv:1602.07360.\n\n Public keys:\n - ``squeezenet1_0``: SqueezeNet (version=1.0).\n - ``squeezenet1_1``: SqueezeNet (version=1.1).\n - ``squeezenet1_0_fc512``: SqueezeNet (version=1.0) + FC.\n \"\"\"\n \n def __init__(self, num_classes, loss, version=1.0, fc_dims=None, dropout_p=None, **kwargs):\n super(SqueezeNet, self).__init__()\n self.loss = loss\n self.feature_dim = 512\n\n if version not in [1.0, 1.1]:\n raise ValueError('Unsupported SqueezeNet version {version}:'\n '1.0 or 1.1 expected'.format(version=version))\n\n if version == 1.0:\n self.features = nn.Sequential(\n nn.Conv2d(3, 96, kernel_size=7, stride=2),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),\n Fire(96, 16, 64, 64),\n Fire(128, 16, 64, 64),\n Fire(128, 32, 128, 128),\n nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),\n Fire(256, 32, 128, 128),\n Fire(256, 48, 192, 192),\n Fire(384, 48, 192, 192),\n Fire(384, 64, 256, 256),\n nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),\n Fire(512, 64, 256, 256),\n )\n else:\n self.features = nn.Sequential(\n nn.Conv2d(3, 64, kernel_size=3, stride=2),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),\n Fire(64, 16, 64, 64),\n Fire(128, 16, 64, 64),\n nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),\n Fire(128, 32, 128, 128),\n Fire(256, 32, 128, 128),\n nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),\n Fire(256, 48, 192, 192),\n Fire(384, 48, 192, 192),\n Fire(384, 64, 256, 256),\n Fire(512, 64, 256, 256),\n )\n\n self.global_avgpool = nn.AdaptiveAvgPool2d(1)\n self.fc = self._construct_fc_layer(fc_dims, 512, dropout_p)\n self.classifier = nn.Linear(self.feature_dim, num_classes)\n\n self._init_params()\n\n def _construct_fc_layer(self, fc_dims, input_dim, dropout_p=None):\n \"\"\"Constructs fully connected layer\n\n Args:\n fc_dims (list or tuple): dimensions of fc layers, if None, no fc layers are constructed\n input_dim (int): input dimension\n dropout_p (float): dropout probability, if None, dropout is unused\n \"\"\"\n if fc_dims is None:\n self.feature_dim = input_dim\n return None\n \n assert isinstance(fc_dims, (list, tuple)), 'fc_dims must be either list or tuple, but got {}'.format(type(fc_dims))\n \n layers = []\n for dim in fc_dims:\n layers.append(nn.Linear(input_dim, dim))\n layers.append(nn.BatchNorm1d(dim))\n layers.append(nn.ReLU(inplace=True))\n if dropout_p is not None:\n layers.append(nn.Dropout(p=dropout_p))\n input_dim = dim\n \n self.feature_dim = fc_dims[-1]\n \n return nn.Sequential(*layers)\n\n def _init_params(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm1d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, 0, 0.01)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n\n def forward(self, x):\n f = self.features(x)\n v = self.global_avgpool(f)\n v = v.view(v.size(0), -1)\n\n if self.fc is not None:\n v = self.fc(v)\n\n if not self.training:\n return v\n\n y = self.classifier(v)\n\n if self.loss == 'softmax':\n return y\n elif self.loss == 'triplet':\n return y, v\n else:\n raise KeyError('Unsupported loss: {}'.format(self.loss))\n\n\ndef init_pretrained_weights(model, model_url):\n \"\"\"Initializes model with pretrained weights.\n \n Layers that don't match with pretrained layers in name or size are kept unchanged.\n \"\"\"\n pretrain_dict = model_zoo.load_url(model_url, map_location=None)\n model_dict = model.state_dict()\n pretrain_dict = {k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size()}\n model_dict.update(pretrain_dict)\n model.load_state_dict(model_dict)\n\n\ndef squeezenet1_0(num_classes, loss='softmax', pretrained=True, **kwargs):\n model = SqueezeNet(\n num_classes,\n loss,\n version=1.0,\n fc_dims=None,\n dropout_p=None,\n **kwargs\n )\n if pretrained:\n init_pretrained_weights(model, model_urls['squeezenet1_0'])\n return model\n\n\ndef squeezenet1_0_fc512(num_classes, loss='softmax', pretrained=True, **kwargs):\n model = SqueezeNet(\n num_classes,\n loss,\n version=1.0,\n fc_dims=[512],\n dropout_p=None,\n **kwargs\n )\n if pretrained:\n init_pretrained_weights(model, model_urls['squeezenet1_0'])\n return model\n\n\ndef squeezenet1_1(num_classes, loss='softmax', pretrained=True, **kwargs):\n model = SqueezeNet(\n num_classes,\n loss,\n version=1.1,\n fc_dims=None,\n dropout_p=None,\n **kwargs\n )\n if pretrained:\n init_pretrained_weights(model, model_urls['squeezenet1_1'])\n return model" ]
[ [ "scipy.io.loadmat" ], [ "torch.cat", "torch.nn.Conv2d", "torch.nn.MaxPool2d", "torch.nn.Linear", "torch.nn.AvgPool2d", "torch.nn.AdaptiveAvgPool2d", "torch.nn.BatchNorm2d", "torch.nn.ReLU", "torch.utils.model_zoo.load_url" ], [ "torch.nn.Sequential", "torch.nn.BatchNorm1d", "torch.nn.Dropout", "torch.nn.init.constant_", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.MaxPool2d", "torch.nn.AdaptiveAvgPool2d", "torch.nn.init.normal_", "torch.nn.ReLU", "torch.utils.model_zoo.load_url", "torch.nn.init.kaiming_normal_" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
paperclip/tf2-eager-yolo3
[ "7f6b137c50525f91ed5026c6cb1d556e9b6d9bed" ]
[ "predRoC.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport tensorflow as tf\nimport argparse\nimport cv2\nimport matplotlib.pyplot as plt\nimport glob\nimport json\nimport sys\nimport os\n\nfrom yolo.utils.box import visualize_boxes\nfrom yolo.config import ConfigParser\n\nif tf.executing_eagerly():\n print(\"Executing eargerly\")\nelse:\n print(\"Executing lazily\")\n\ntf.enable_eager_execution()\n\n\nargparser = argparse.ArgumentParser(\n description='test yolov3 network with coco weights')\n\nargparser.add_argument(\n '-c',\n '--config',\n default=\"configs/predict_coco.json\",\n help='config file')\n\nargparser.add_argument(\n 'images',\n nargs='+',\n help='path to image files')\n\nCAT = []\nNOT_CAT = []\n\ndef predictImage(image_path, detector, class_labels):\n\n if \"*\" in image_path:\n images = glob.glob(image_path)\n for i in images:\n predictImage(i, detector, class_labels)\n return\n\n global CAT\n global NOT_CAT\n # 2. Load image\n image = cv2.imread(image_path)\n image = image[:,:,::-1]\n\n # 3. Run detection\n boxes, labels, probs = detector.detect(image, 0.05)\n\n # print(list(zip(labels, probs)))\n\n cat = 0.0\n\n if len(labels) == 0:\n print(image_path, \"nothing found\")\n\n for (l, p) in zip(labels, probs):\n print(image_path, class_labels[l], p)\n if class_labels[l] == \"cat\":\n cat = max(cat, p)\n\n is_cat = \"not_cat\" not in image_path\n\n if is_cat:\n CAT.append(cat)\n else:\n NOT_CAT.append(cat)\n\n # # 4. draw detected boxes\n # visualize_boxes(image, boxes, labels, probs, config_parser.get_labels())\n #\n # # 5. plot\n # plt.imshow(image)\n # plt.show()\n\ndef saveResults():\n global CAT\n global NOT_CAT\n CAT.sort()\n NOT_CAT.sort()\n if len(CAT) == 0:\n print(\"No cats found\")\n return\n if len(NOT_CAT) == 0:\n print(\"No non-cats found\")\n return\n\n sys.path.append(\n os.path.join(\n os.path.dirname(os.getcwd()),\n \"camera\"\n )\n )\n\n import tensorflow1.generate_roc_data\n results = tensorflow1.generate_roc_data.generate_roc_data(CAT, NOT_CAT)\n import json\n open(\"roc.json\",\"w\").write(json.dumps(results))\n\ndef main():\n args = argparser.parse_args()\n\n # 1. create yolo model & load weights\n config_parser = ConfigParser(args.config)\n model = config_parser.create_model(skip_detect_layer=False)\n detector = config_parser.create_detector(model)\n labels = config_parser.get_labels()\n\n for image in args.images:\n predictImage(image, detector, labels)\n\n saveResults()\n return 0\n\n\nif __name__ == '__main__':\n sys.exit(main())\n" ]
[ [ "tensorflow.enable_eager_execution", "tensorflow.executing_eagerly" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.13", "1.7", "1.10", "1.12" ] } ]
fol21/domain-adaptation-in-deforestation
[ "ae1c37b1634f54230f1d2217c209dabd6780568a", "ae1c37b1634f54230f1d2217c209dabd6780568a" ]
[ "src/ADDA/Networks.py", "src/CycleGAN/CycleGAN_DN/util/remote_sensing_visualizer.py" ]
[ "import os\nimport numpy as np\nimport tensorflow as tf\n\n\n\nclass Networks():\n def __init__(self, args):\n super(Networks, self).__init__()\n self.args = args\n \n # Wittich design\n def VNET_16L(self, I, is_train, reuse_unet=False, reuse_ada=False, adaption_net=False):\n\n def encoder_conf(name, X, filter, f_size, scale, norm, reuse, is_train, dropout=0.0, stddev=-1.0, slope=0.00,\n use_bias=True):\n \n with tf.variable_scope(name) as scope:\n if scale > 1:\n X = self.conv(name + '_downsample', X, filter, scale, scale, (not norm) and use_bias, \"VALID\", stddev)\n else:\n X = self.conv(name + '_conf', X, filter, f_size, 1, (not norm) and use_bias, \"VALID\", stddev)\n if norm == 'I':\n X = tf.contrib.layers.instance_norm(X, scope=scope, reuse=reuse)\n elif norm == 'B':\n X = tf.layers.batch_normalization(X, reuse=reuse, training=is_train, name=name)\n elif norm == 'G':\n X = tf.contrib.layers.group_norm(X, groups=16, scope=scope, reuse=reuse)\n\n if dropout > 0.0:\n X = tf.layers.dropout(X, dropout, training=is_train)\n if slope < 1.0:\n X = tf.nn.leaky_relu(X, slope) if slope > 0.0 else tf.nn.relu(X)\n return X\n\n def decoder_conf(name, X, filter, f_size, scale, norm, reuse, is_train, dropout=0.0, stddev=-1.0, slope=0.00,\n use_bias=True):\n with tf.variable_scope(name) as scope:\n if scale > 1:\n X = self.t_conv(name + '_upsample', X, filter, scale, scale, (not norm) and use_bias, \"VALID\", stddev)\n else:\n X = self.t_conv(name + '_deconf', X, filter, f_size, 1, (not norm) and use_bias, \"VALID\", stddev)\n if norm == 'I':\n X = tf.contrib.layers.instance_norm(X, scope=scope, reuse=reuse)\n elif norm == 'B':\n X = tf.layers.batch_normalization(X, reuse=reuse, training=is_train, name=name)\n elif norm == 'G':\n X = tf.contrib.layers.group_norm(X, groups=16, scope=scope, reuse=reuse)\n if dropout > 0.0:\n X = tf.layers.dropout(X, dropout, training=is_train)\n if slope < 1.0:\n X = tf.nn.leaky_relu(X, slope) if slope > 0.0 else tf.nn.relu(X)\n return X\n\n F = 3\n norm = self.args.norm\n # print('norm', norm)\n # print('skip cons', self.args.skip_connections)\n # print('VNET In:', I.get_shape().as_list())\n\n if adaption_net:\n # print('ada scope T/R', is_train, reuse_ada)\n encoderscope = 'ada_enc'\n decoderscope = 'ada_dec'\n reuse_encoder = reuse_ada\n reuse_decoder = reuse_ada\n else:\n # print('vnet scope T/R', is_train, reuse_unet)\n encoderscope = 'unet_enc'\n decoderscope = 'unet_dec'\n reuse_encoder = reuse_unet\n reuse_decoder = reuse_unet\n \n print([encoderscope, ' ', decoderscope])\n\n # ===============================================================================ENCODER\n with tf.variable_scope(encoderscope) as scope:\n if reuse_encoder: scope.reuse_variables()\n\n with tf.variable_scope('color_encoder'):\n X = encoder_conf('eI', I[:, :, :, :-1], 96, 5, 1, norm, reuse_encoder, is_train, self.args.dropout) # 128 > 124\n X0 = encoder_conf('d0', X, 96, 2, 2, norm, reuse_encoder, is_train, self.args.dropout) # 124 > 62 @2\n X = encoder_conf('e1', X0, 128, 3, 1, norm, reuse_encoder, is_train, self.args.dropout) # 62 > 60\n X_EARLY = X\n X1 = encoder_conf('d1', X, 128, 2, 2, norm, reuse_encoder, is_train, self.args.dropout) # 60 > 30 @4\n X = encoder_conf('e2', X1, 256, 3, 1, norm, reuse_encoder, is_train, self.args.dropout) # 30 > 28\n X2 = encoder_conf('d2', X, 256, 2, 2, norm, reuse_encoder, is_train, self.args.dropout) # 28 > 14 @8\n X = encoder_conf('e3', X2, 512, 3, 1, norm, reuse_encoder, is_train, self.args.dropout) # 14 > 12\n X_MIDDLE = X\n\n # ===============================================================================DECODER\n with tf.variable_scope(decoderscope) as scope:\n if reuse_decoder: scope.reuse_variables()\n # print('vnet scope', is_train, reuse_unet)\n # print('VNET Latent:', X.get_shape().as_list())\n\n with tf.variable_scope('decoder'):\n X = decoder_conf('d3', X, 512, F, 1, norm, reuse_decoder, is_train, self.args.dropout) # 12 > 14\n if self.args.skip_connections: X = tf.concat((X, X2), axis=-1)\n X = decoder_conf('u4', X, 256, F, 2, norm, reuse_decoder, is_train, self.args.dropout) # 14 > 28\n X = decoder_conf('d4', X, 256, F, 1, norm, reuse_decoder, is_train, self.args.dropout) # 28 > 30\n if self.args.skip_connections: X = tf.concat((X, X1), axis=-1)\n X = decoder_conf('u5', X, 128, F, 2, norm, reuse_decoder, is_train, self.args.dropout) # 30 > 60\n X_LATE = X\n X = decoder_conf('d5', X, 128, F, 1, norm, reuse_decoder, is_train, self.args.dropout) # 60 > 62\n if self.args.skip_connections: X = tf.concat((X, X0), axis=-1)\n X = decoder_conf('u6', X, 64, F, 2, norm, reuse_decoder, is_train, self.args.dropout) # 62 > 124\n X = decoder_conf('d6', X, 64, 5, 1, norm, reuse_decoder, is_train, self.args.dropout) # 124 > 128\n\n X = decoder_conf('out', X, self.args.num_classes, 1, 1, '', reuse_decoder, is_train, slope=1.0, stddev=0.02,\n use_bias=False)\n prediction = tf.nn.softmax(X, name = 'softmax') \n\n # ============================================================================OUT\n # print('VNET Out:', X.get_shape().as_list())\n\n # if self.args.mode == 'adapt':\n return X, X_EARLY, X_MIDDLE, X_LATE, prediction\n # else:\n # return X, prediction\n\n def D_4(self, X, reuse):\n def discrim_conv(name, X, out_channels, filtersize, stride=1, norm='', nonlin=True, init_stddev=-1):\n with tf.variable_scope(name) as scope:\n if init_stddev <= 0.0:\n init = tf.contrib.layers.variance_scaling_initializer(dtype=tf.float32)\n else:\n init = tf.truncated_normal_initializer(stddev=init_stddev)\n X = tf.layers.conv2d(X, out_channels, kernel_size=filtersize, strides=(stride, stride), padding=\"valid\",\n kernel_initializer=init)\n if norm == 'I':\n X = tf.contrib.layers.instance_norm(X, scope=scope, reuse=reuse, epsilon=0.001)\n elif norm == 'B':\n X = tf.layers.batch_normalization(X, reuse=reuse, training=True)\n elif norm == 'G':\n X = tf.contrib.layers.group_norm(X, groups=16, scope=scope, reuse=reuse)\n if nonlin:\n X = tf.nn.leaky_relu(X, 0.2)\n return X\n\n with tf.variable_scope('discriminator') as scope:\n if reuse:\n scope.reuse_variables()\n\n print('D in:', X.get_shape().as_list())\n\n X = self.conv('DZ1', X, 512, 1, 1)\n X = tf.nn.leaky_relu(X, 0.2)\n X = self.conv('DZ2', X, 512, 1, 1)\n X = tf.nn.leaky_relu(X, 0.2)\n X = self.conv('DZ3', X, 512, 1, 1)\n X = tf.nn.leaky_relu(X, 0.2)\n X = self.conv('DZ4', X, 512, 1, 1)\n X = tf.nn.leaky_relu(X, 0.2)\n\n X = discrim_conv('d_out', X, 1, 1, norm=False, nonlin=False, init_stddev=0.02)\n\n print('D out:', X.get_shape().as_list())\n\n return X\n\n\n def atrous_discriminator(self, X, reuse):\n\n def atrous_convs(net, scope, rate=None, depth=256, reuse=None):\n \"\"\"\n ASPP layer 1×1 convolution and three 3×3 atrous convolutions\n \"\"\"\n with tf.variable_scope(scope, reuse=reuse):\n \n pyram_1x1_0 = self.conv('_1x1', net, depth, size=1, stride=1, padding=\"SAME\")\n pyram_3x3_1 = self.conv('_3x3', net, depth, size=3, stride=1, padding=\"SAME\")\n pyram_3x3_2 = self.conv('_atr_3x3_1', net, depth, size=3, stride=1, padding=\"SAME\", dilation=rate[0])\n pyram_3x3_3 = self.conv('_atr_3x3_2', net, depth, size=3, stride=1, padding=\"SAME\", dilation=rate[1])\n # pyram_3x3_4 = self.z_conv('_atr_3x3_3', net, depth/2, size=3, stride=1, padding=\"SAME\", dilation=rate[2])\n \n net = tf.concat((pyram_1x1_0, pyram_3x3_1, pyram_3x3_2, pyram_3x3_3), axis=3, name=\"concat\")\n\n net = self.conv('_1x1_output', net, depth, size=1, stride=1, padding=\"SAME\")\n\n # pyram_1x1_0 = self.conv('_1x1', net, depth, size=1, stride=1, padding=\"SAME\")\n # pyram_3x3_1 = self.conv('_3x3', net, depth/2, size=3, stride=1, padding=\"SAME\")\n # pyram_3x3_2 = self.conv('_atr_3x3_1', net, depth/2, size=3, stride=1, padding=\"SAME\", dilation=rate[0])\n # pyram_3x3_3 = self.conv('_atr_3x3_2', net, depth/2, size=3, stride=1, padding=\"SAME\", dilation=rate[1])\n # # pyram_3x3_4 = self.conv('_atr_3x3_3', net, depth/2, size=3, stride=1, padding=\"SAME\", dilation=rate[2])\n \n # net = tf.concat((pyram_1x1_0, pyram_3x3_1, pyram_3x3_2, pyram_3x3_3), axis=3, name=\"concat\")\n\n # net = self.conv('_1x1_output', net, depth, size=1, stride=1, padding=\"SAME\")\n\n return net\n \n with tf.variable_scope('discriminator') as scope:\n if reuse:\n scope.reuse_variables()\n\n print('D in:', X.get_shape().as_list())\n\n rate = [2, 3, 4]\n X = atrous_convs(X, \"d_atrous_0\", rate = rate, depth=256, reuse=reuse)\n X = tf.nn.leaky_relu(X, 0.2)\n X = self.conv('d_1', X, 512, size=1, stride=1, padding=\"SAME\")\n X = tf.nn.leaky_relu(X, 0.2)\n X = self.conv('d_2', X, 512, size=1, stride=1, padding=\"SAME\")\n X = tf.nn.leaky_relu(X, 0.2)\n X = self.conv('d_3', X, 512, size=1, stride=1, padding=\"SAME\")\n X = tf.nn.leaky_relu(X, 0.2)\n \n X = self.conv('d_out', X, 1, size=1, stride=1, padding=\"SAME\")\n print('D out:', X.get_shape().as_list())\n\n return X\n\n def conv(self, id, input, channels, size=3, stride=1, use_bias=True, padding=\"SAME\", init_stddev=-1.0, dilation=1):\n\n assert padding in [\"SAME\", \"VALID\", \"REFLECT\", \"PARTIAL\"], 'valid paddings: \"SAME\", \"VALID\", \"REFLECT\", \"PARTIAL\"'\n if type(size) == int: size = [size, size]\n if init_stddev <= 0.0:\n init = tf.contrib.layers.variance_scaling_initializer(dtype=tf.float32)\n else:\n init = tf.truncated_normal_initializer(stddev=init_stddev)\n\n if padding == \"PARTIAL\":\n with tf.variable_scope('mask'):\n _, h, w, _ = input.get_shape().as_list()\n\n slide_window = size[0] * size[1]\n mask = tf.ones(shape=[1, h, w, 1])\n update_mask = tf.layers.conv2d(mask, filters=1, dilation_rate=(dilation, dilation), name='mask' + id,\n kernel_size=size, kernel_initializer=tf.constant_initializer(1.0),\n strides=stride, padding=\"SAME\", use_bias=False, trainable=False)\n mask_ratio = slide_window / (update_mask + 1e-8)\n update_mask = tf.clip_by_value(update_mask, 0.0, 1.0)\n mask_ratio = mask_ratio * update_mask\n\n with tf.variable_scope('parconv'):\n x = tf.layers.conv2d(input, filters=channels, name='conv' + id, kernel_size=size, kernel_initializer=init,\n strides=stride, padding=\"SAME\", use_bias=False)\n x = x * mask_ratio\n if use_bias:\n bias = tf.get_variable(\"bias\" + id, [channels], initializer=tf.constant_initializer(0.0))\n x = tf.nn.bias_add(x, bias)\n return x * update_mask\n\n if padding == \"REFLECT\":\n assert size[0] % 2 == 1 and size[1] % 2 == 1, \"REFLECTION PAD ONLY WORKING FOR ODD FILTER SIZE.. \" + str(size)\n pad_x = size[0] // 2\n pad_y = size[1] // 2\n input = tf.pad(input, [[0, 0], [pad_x, pad_x], [pad_y, pad_y], [0, 0]], \"REFLECT\")\n padding = \"VALID\"\n \n return tf.layers.conv2d(input, channels, kernel_size=size, strides=[stride, stride],\n padding=padding, kernel_initializer=init, name='conv' + id,\n use_bias=use_bias, dilation_rate=(dilation, dilation))\n\n def z_conv(self, id, input, channels, size, stride=1, padding=\"SAME\", use_bias=False, dilation=1):\n # zero mean conv\n if type(size) == int: size = [size, size]\n in_ch = input.get_shape().as_list()[-1]\n # init = tf.contrib.layers.variance_scaling_initializer(dtype=tf.float32)\n init = tf.truncated_normal_initializer(mean=0.0, stddev=0.02)\n filters = tf.get_variable('zero_conv_weights' + id, initializer=init, shape=[size[0], size[1], in_ch, channels])\n filters = filters - tf.reduce_mean(filters, axis=[0, 1, 2], keepdims=True)\n\n if padding == \"PARTIAL\":\n with tf.variable_scope('mask'):\n _, h, w, _ = input.get_shape().as_list()\n\n slide_window = size[0] * size[1]\n mask = tf.ones(shape=[1, h, w, 1])\n update_mask = tf.layers.conv2d(mask, filters=1, name='mask' + id,\n kernel_size=size, kernel_initializer=tf.constant_initializer(1.0),\n strides=stride, padding=\"SAME\", use_bias=False, trainable=False,\n dilation_rate=(dilation, dilation))\n mask_ratio = slide_window / (update_mask + 1e-8)\n update_mask = tf.clip_by_value(update_mask, 0.0, 1.0)\n mask_ratio = mask_ratio * update_mask\n\n with tf.variable_scope('parconv'):\n x = tf.nn.conv2d(input, filters, strides=[1, stride, stride, 1], padding=\"SAME\", name='zero-conv_' + id,\n dilations=(1, dilation, dilation, 1))\n x = x * mask_ratio\n if use_bias:\n bias = tf.get_variable(\"bias\" + id, [channels], initializer=tf.constant_initializer(0.0))\n x = tf.nn.bias_add(x, bias)\n return x * update_mask\n\n x = tf.nn.conv2d(input, filters, strides=[1, stride, stride, 1], padding=padding, name='zero-conv_' + id,\n dilations=(1, dilation, dilation, 1))\n if use_bias:\n bias = tf.get_variable(\"bias\", [channels], initializer=tf.constant_initializer(0.0))\n x = tf.nn.bias_add(x, bias)\n return x\n \n def t_conv(self, id, input, channels, size=3, stride=1, use_bias=True, padding=\"SAME\", init_stddev=-1.0):\n # good old t-conv. I love it!\n\n assert padding in [\"SAME\", \"VALID\"], 'valid paddings are \"SAME\", \"VALID\"'\n if type(size) == int:\n size = [size, size]\n if init_stddev <= 0.0:\n init = tf.contrib.layers.variance_scaling_initializer(dtype=tf.float32)\n else:\n init = tf.truncated_normal_initializer(stddev=init_stddev)\n return tf.layers.conv2d_transpose(input, channels, kernel_size=size, strides=[stride, stride],\n padding=padding, kernel_initializer=init, name='tr_conv' + id, use_bias=use_bias)\n\n\n # Traditional U-Net\n def build_Unet_Arch(self, input_data, name=\"Unet_Arch\"):\n self.base_number_of_features = 32\n with tf.variable_scope(name):\n # Encoder definition\n o_c1 = self.general_conv2d(input_data, self.base_number_of_features, 3, stride = 1, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_conv2d_1')\n o_mp1 = tf.layers.max_pooling2d(o_c1, 2, 2, name = name + '_maxpooling_1')\n o_c2 = self.general_conv2d(o_mp1, self.base_number_of_features * 2, 3, stride = 1, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_conv2d_2')\n o_mp2 = tf.layers.max_pooling2d(o_c2, 2, 2, name = name + '_maxpooling_2')\n o_c3 = self.general_conv2d(o_mp2, self.base_number_of_features * 4, 3, stride = 1, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_conv2d_3')\n o_mp3 = tf.layers.max_pooling2d(o_c3, 2, 2, name = name + '_maxpooling_3')\n o_c4 = self.general_conv2d(o_mp3, self.base_number_of_features * 8, 3, stride = 1, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_conv2d_4')\n o_mp4 = tf.layers.max_pooling2d(o_c4, 2, 2, name = name + '_maxpooling_4')\n o_c5 = self.general_conv2d(o_mp4, self.base_number_of_features * 16, 3, stride = 1, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_conv2d_5')\n \n # Decoder definition\n o_d1 = self.general_deconv2d(o_c5, self.base_number_of_features * 8, 3, stride = 2, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_deconv2d_1')\n o_me1 = tf.concat([o_d1, o_c4], 3) # Skip connection\n o_d2 = self.general_deconv2d(o_me1, self.base_number_of_features * 4, 3, stride = 2, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_deconv2d_2')\n o_me2 = tf.concat([o_d2, o_c3], 3) # Skip connection\n o_d3 = self.general_deconv2d(o_me2, self.base_number_of_features * 2, 3, stride = 2, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_deconv2d_3')\n o_me3 = tf.concat([o_d3, o_c2], 3) # Skip connection\n o_d4 = self.general_deconv2d(o_me3, self.base_number_of_features, 3, stride = 2, padding = 'SAME', activation_function = 'relu', do_norm = False, name = name + '_deconv2d_4')\n o_me4 = tf.concat([o_d4, o_c1], 3) # Skip connection\n logits = tf.layers.conv2d(o_me4, self.args.num_classes, 1, 1, 'SAME', activation = None)\n prediction = tf.nn.softmax(logits, name = name + '_softmax')\n \n return logits, prediction\n def general_conv2d(self, input_data, filters = 64, kernel_size = 7, stride = 1, stddev = 0.02, activation_function = \"relu\", padding = \"VALID\", do_norm=True, relu_factor = 0, name=\"conv2d\"):\n with tf.variable_scope(name):\n conv = tf.layers.conv2d(input_data, filters, kernel_size, stride, padding, activation=None)\n \n if do_norm:\n conv = tf.layers.batch_normalization(conv, momentum=0.9)\n \n if activation_function == \"relu\":\n conv = tf.nn.relu(conv, name = 'relu')\n if activation_function == \"leakyrelu\":\n conv = tf.nn.leaky_relu(conv, alpha=relu_factor)\n if activation_function == \"elu\":\n conv = tf.nn.elu(conv, name = 'elu')\n \n return conv\n def general_deconv2d(self, input_data, filters = 64, kernel_size = 7, stride = 1, stddev = 0.02, activation_function = \"relu\", padding = \"VALID\", do_norm = True, relu_factor = 0, name=\"deconv2d\"):\n with tf.variable_scope(name):\n deconv = tf.layers.conv2d_transpose(input_data, filters, kernel_size, (stride, stride), padding, activation = None)\n \n if do_norm:\n deconv = tf.layers.batch_normalization(deconv, momentum = 0.9)\n \n if activation_function == \"relu\":\n deconv = tf.nn.relu(deconv, name = 'relu')\n if activation_function == \"leakyrelu\":\n deconv = tf.nn.leaky_relu(deconv, alpha=relu_factor)\n if activation_function == \"elu\":\n deconv = tf.nn.elu(deconv, name = 'elu')\n \n return deconv\n\n\n", "import numpy as np\nimport scipy.io as sio\nimport os\nimport sys\nimport ntpath\nimport time\nfrom . import util, html\nfrom subprocess import Popen, PIPE\n\n\nif sys.version_info[0] == 2:\n VisdomExceptionBase = Exception\nelse:\n VisdomExceptionBase = ConnectionError\n\n\n\n\nclass RemoteSensingVisualizer():\n \"\"\"This class includes several functions that can display/save images and print/save logging information.\n\n It uses a Python library 'visdom' for display, and a Python library 'dominate' (wrapped in 'HTML') for creating HTML files with images.\n \"\"\"\n\n def __init__(self, scalers, opt):\n \"\"\"Initialize the Visualizer class\n\n Parameters:\n opt -- stores all the experiment flags; needs to be a subclass of BaseOptions\n Step 1: Cache the training/test options\n Step 2: connect to a visdom server\n Step 3: create an HTML object for saveing HTML filters\n Step 4: create a logging file to store training losses\n \"\"\"\n self.opt = opt # cache the option\n self.scalers = scalers\n self.display_id = opt.display_id\n #self.use_html = opt.isTrain and not opt.no_html\n self.win_size = opt.display_winsize\n self.name = opt.name\n self.port = opt.display_port\n self.saved = False\n if self.display_id > 0: # connect to a visdom server given <display_port> and <display_server>\n import visdom\n self.ncols = opt.display_ncols\n self.vis = visdom.Visdom(server=opt.display_server, port=opt.display_port, env=opt.display_env)\n if not self.vis.check_connection():\n self.create_visdom_connections()\n\n # if self.use_html: # create an HTML object at <checkpoints_dir>/web/; images will be saved under <checkpoints_dir>/web/images/\n self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web')\n self.img_dir = os.path.join(self.web_dir, 'images')\n print('create web directory %s...' % self.web_dir)\n util.mkdirs([self.web_dir, self.img_dir])\n # create a logging file to store training losses\n self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt')\n with open(self.log_name, \"a\") as log_file:\n now = time.strftime(\"%c\")\n log_file.write('================ Training Loss (%s) ================\\n' % now)\n\n def reset(self):\n \"\"\"Reset the self.saved status\"\"\"\n self.saved = False\n\n def create_visdom_connections(self):\n \"\"\"If the program could not connect to Visdom server, this function will start a new server at port < self.port > \"\"\"\n cmd = sys.executable + ' -m visdom.server -p %d &>/dev/null &' % self.port\n print('\\n\\nCould not connect to Visdom server. \\n Trying to start a server....')\n print('Command: %s' % cmd)\n Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)\n\n def display_current_results(self, visuals, epoch, save_result):\n \"\"\"Display current results on visdom; save current results to an HTML file.\n\n Parameters:\n visuals (OrderedDict) - - dictionary of images to display or save\n epoch (int) - - the current epoch\n save_result (bool) - - if save the current results to an HTML file\n \"\"\"\n #if self.use_html and (save_result or not self.saved): # save images to an HTML file if they haven't been saved.\n # self.saved = True\n # save images to the disk\n for label, image in visuals.items():\n image_tensor = image.data\n image_numpy = image_tensor[0].cpu().float().numpy()\n image_numpy = np.transpose(image_numpy,(1, 2, 0))\n img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.mat' % (epoch, label))\n image_reshaped = image_numpy.reshape((image_numpy.shape[0] * image_numpy.shape[1], image_numpy.shape[2]))\n if 'A' in label:\n if 'diff' not in label:\n # Taking back the scalers of the T1 combination\n scaler_1 = self.scalers[0]\n image_inv = scaler_1.inverse_transform(image_reshaped)\n if 'diff' in label:\n scaler_3 = self.scalers[2]\n image_inv = scaler_3.inverse_transform(image_reshaped)\n if 'B' in label:\n if 'diff' not in label:\n # Taking back the scalers of the T2 combination\n scaler_2 = self.scalers[1]\n image_inv = scaler_2.inverse_transform(image_reshaped)\n if 'diff' in label:\n scaler_4 = self.scalers[3]\n image_inv = scaler_4.inverse_transform(image_reshaped)\n \n image = image_inv.reshape((image_numpy.shape[0], image_numpy.shape[1], image_numpy.shape[2]))\n sio.savemat(img_path, {label: image})\n\n def plot_current_losses(self, epoch, counter_ratio, losses):\n \"\"\"display the current losses on visdom display: dictionary of error labels and values\n\n Parameters:\n epoch (int) -- current epoch\n counter_ratio (float) -- progress (percentage) in the current epoch, between 0 to 1\n losses (OrderedDict) -- training losses stored in the format of (name, float) pairs\n \"\"\"\n if not hasattr(self, 'plot_data'):\n self.plot_data = {'X': [], 'Y': [], 'legend': list(losses.keys())}\n self.plot_data['X'].append(epoch + counter_ratio)\n self.plot_data['Y'].append([losses[k] for k in self.plot_data['legend']])\n try:\n self.vis.line(\n X=np.stack([np.array(self.plot_data['X'])] * len(self.plot_data['legend']), 1),\n Y=np.array(self.plot_data['Y']),\n opts={\n 'title': self.name + ' loss over time',\n 'legend': self.plot_data['legend'],\n 'xlabel': 'epoch',\n 'ylabel': 'loss'},\n win=self.display_id)\n except VisdomExceptionBase:\n self.create_visdom_connections()\n\n # losses: same format as |losses| of plot_current_losses\n def print_current_losses(self, epoch, iters, losses, t_comp, t_data):\n \"\"\"print current losses on console; also save the losses to the disk\n\n Parameters:\n epoch (int) -- current epoch\n iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch)\n losses (OrderedDict) -- training losses stored in the format of (name, float) pairs\n t_comp (float) -- computational time per data point (normalized by batch_size)\n t_data (float) -- data loading time per data point (normalized by batch_size)\n \"\"\"\n message = '(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (epoch, iters, t_comp, t_data)\n for k, v in losses.items():\n message += '%s: %.3f ' % (k, v)\n\n print(message) # print the message\n with open(self.log_name, \"a\") as log_file:\n log_file.write('%s\\n' % message) # save the message\n\n\nclass RemoteSensingPatchesContainer():\n \n def __init__(self, scalers, opt):\n \n self.opt = opt\n self.scalers = scalers\n self.save_path = self.opt.results_dir + self.opt.name + '/images/'\n # Creating the directories\n if not os.path.exists(self.save_path):\n os.makedirs(self.save_path) \n #Computing the coordinates and prepare the container for the patches\n self.overlap_s = round(self.opt.crop_size * self.opt.overlap_porcent_s)\n self.overlap_s -= self.overlap_s % 2\n self.stride_s = self.opt.crop_size - self.overlap_s\n \n self.overlap_t = round(self.opt.crop_size * self.opt.overlap_porcent_t)\n self.overlap_t -= self.overlap_t % 2\n self.stride_t = self.opt.crop_size - self.overlap_t\n \n self.step_row_s = (self.stride_s - self.opt.rows_size_s % self.stride_s) % self.stride_s\n self.step_col_s = (self.stride_s - self.opt.cols_size_s % self.stride_s) % self.stride_s\n \n self.step_row_t = (self.stride_t - self.opt.rows_size_t % self.stride_t) % self.stride_t\n self.step_col_t = (self.stride_t - self.opt.cols_size_t % self.stride_t) % self.stride_t\n \n \n self.k1_s, self.k2_s = (self.opt.rows_size_s + self.step_row_s)//self.stride_s, (self.opt.cols_size_s + self.step_col_s)//self.stride_s\n self.coordinates_s = np.zeros((self.k1_s * self.k2_s , 4))\n \n self.k1_t, self.k2_t = (self.opt.rows_size_t + self.step_row_t)//self.stride_t, (self.opt.cols_size_t + self.step_col_t)//self.stride_t\n self.coordinates_t = np.zeros((self.k1_t * self.k2_t , 4))\n \n if self.opt.save_real:\n self.patchcontainer_real_A = np.zeros((self.k1_s * self.stride_s, self.k2_s * self.stride_s, self.opt.output_nc))\n self.patchcontainer_real_B = np.zeros((self.k1_t * self.stride_t, self.k2_t * self.stride_t, self.opt.output_nc))\n \n self.patchcontainer_fake_A = np.zeros((self.k1_t * self.stride_t, self.k2_t * self.stride_t, self.opt.output_nc))\n self.patchcontainer_fake_B = np.zeros((self.k1_s * self.stride_s, self.k2_s * self.stride_s, self.opt.output_nc))\n \n counter = 0\n for i in range(self.k1_s):\n for j in range(self.k2_s):\n self.coordinates_s[counter, 0] = i * self.stride_s\n self.coordinates_s[counter, 1] = j * self.stride_s\n self.coordinates_s[counter, 2] = i * self.stride_s + self.opt.crop_size\n self.coordinates_s[counter, 3] = j * self.stride_s + self.opt.crop_size\n counter += 1\n \n counter = 0\n for i in range(self.k1_t):\n for j in range(self.k2_t):\n self.coordinates_t[counter, 0] = i * self.stride_t\n self.coordinates_t[counter, 1] = j * self.stride_t\n self.coordinates_t[counter, 2] = i * self.stride_t + self.opt.crop_size\n self.coordinates_t[counter, 3] = j * self.stride_t + self.opt.crop_size\n counter += 1\n \n def store_current_visuals(self, visuals, index):\n\n for label, image in visuals.items():\n image_tensor = image.data\n image_numpy = image_tensor[0].cpu().float().numpy()\n image_numpy = np.transpose(image_numpy,(1, 2, 0))\n \n if label == 'fake_A':\n if index < self.opt.size_t:\n self.patchcontainer_fake_A[int(self.coordinates_t[index, 0]) : int(self.coordinates_t[index, 0]) + int(self.stride_t), \n int(self.coordinates_t[index, 1]) : int(self.coordinates_t[index, 1]) + int(self.stride_t), :] = image_numpy[int(self.overlap_t//2) : int(self.overlap_t//2) + int(self.stride_t),\n int(self.overlap_t//2) : int(self.overlap_t//2) + int(self.stride_t),:]\n if label == 'fake_B':\n if index < self.opt.size_s:\n self.patchcontainer_fake_B[int(self.coordinates_s[index, 0]) : int(self.coordinates_s[index, 0]) + int(self.stride_s), \n int(self.coordinates_s[index, 1]) : int(self.coordinates_s[index, 1]) + int(self.stride_s), :] = image_numpy[int(self.overlap_s//2) : int(self.overlap_s//2) + int(self.stride_s),\n int(self.overlap_s//2) : int(self.overlap_s//2) + int(self.stride_s),:]\n if self.opt.save_real:\n if label == 'real_A':\n if index < self.opt.size_s:\n self.patchcontainer_real_A[int(self.coordinates_s[index, 0]) : int(self.coordinates_s[index, 0]) + int(self.stride_s), \n int(self.coordinates_s[index, 1]) : int(self.coordinates_s[index, 1]) + int(self.stride_s), :] = image_numpy[int(self.overlap_s//2) : int(self.overlap_s//2) + int(self.stride_s),\n int(self.overlap_s//2) : int(self.overlap_s//2) + int(self.stride_s),:]\n if label == 'real_B':\n if index < self.opt.size_t:\n self.patchcontainer_real_B[int(self.coordinates_t[index, 0]) : int(self.coordinates_t[index, 0]) + int(self.stride_t), \n int(self.coordinates_t[index, 1]) : int(self.coordinates_t[index, 1]) + int(self.stride_t), :] = image_numpy[int(self.overlap_t//2) : int(self.overlap_t//2) + int(self.stride_t),\n int(self.overlap_t//2) : int(self.overlap_t//2) + int(self.stride_t),:]\n \n def save_images(self):\n \n fake_img_A = self.patchcontainer_fake_A[:self.k1_t*self.stride_t - self.step_row_t, :self.k2_t*self.stride_t - self.step_col_t, :]\n fake_img_B = self.patchcontainer_fake_B[:self.k1_s*self.stride_s - self.step_row_s, :self.k2_s*self.stride_s - self.step_col_s, :]\n # Applaying the normalizers back\n scaler_1 = self.scalers[0]\n scaler_2 = self.scalers[1]\n \n fake_img_A_reshaped = fake_img_A.reshape((fake_img_A.shape[0] * fake_img_A.shape[1], fake_img_A.shape[2]))\n fake_img_B_reshaped = fake_img_B.reshape((fake_img_B.shape[0] * fake_img_B.shape[1], fake_img_B.shape[2]))\n \n fake_img_inv_A = scaler_1.inverse_transform(fake_img_A_reshaped)\n fake_img_inv_B = scaler_2.inverse_transform(fake_img_B_reshaped)\n \n fake_img_norm_A = fake_img_inv_A.reshape((fake_img_A.shape[0], fake_img_A.shape[1], fake_img_A.shape[2]))\n fake_img_norm_B = fake_img_inv_B.reshape((fake_img_B.shape[0], fake_img_B.shape[1], fake_img_B.shape[2]))\n # Saving the fake images\n #saving generated images in .npy to use their in the classifier evaluation\n np.save(self.save_path + 'Adapted_Target', fake_img_norm_A)\n np.save(self.save_path + 'Adapted_Source', fake_img_norm_B)\n #saving generated images in .mat to use in visualization purposes\n sio.savemat(self.save_path + 'Adapted_Target.mat', {'fake_A': fake_img_norm_A})\n sio.savemat(self.save_path + 'Adapted_Source.mat', {'fake_B': fake_img_norm_B})\n if self.opt.save_real:\n real_img_A = self.patchcontainer_real_A[:self.k1_s*self.stride_s - self.step_row_s, :self.k2_s*self.stride_s - self.step_col_s, :]\n real_img_B = self.patchcontainer_real_B[:self.k1_t*self.stride_t - self.step_row_t, :self.k2_t*self.stride_t - self.step_col_t, :]\n real_img_A_reshaped = real_img_A.reshape((real_img_A.shape[0] * real_img_A.shape[1], real_img_A.shape[2]))\n real_img_B_reshaped = real_img_B.reshape((real_img_B.shape[0] * real_img_B.shape[1], real_img_B.shape[2]))\n real_img_inv_A = scaler_1.inverse_transform(real_img_A_reshaped)\n real_img_inv_B = scaler_2.inverse_transform(real_img_B_reshaped)\n \n real_img_norm_A = real_img_inv_A.reshape((real_img_A.shape[0], real_img_A.shape[1], real_img_A.shape[2]))\n real_img_norm_B = real_img_inv_B.reshape((real_img_B.shape[0], real_img_B.shape[1], real_img_B.shape[2]))\n #saving generated images in .mat to use in visualization purposes\n sio.savemat(self.save_path + 'Real_Source.mat', {'real_A': real_img_norm_A})\n sio.savemat(self.save_path + 'Real_Target.mat', {'real_B': real_img_norm_B})\n \n \n \n \n " ]
[ [ "tensorflow.get_variable", "tensorflow.concat", "tensorflow.layers.dropout", "tensorflow.layers.conv2d_transpose", "tensorflow.pad", "tensorflow.contrib.layers.group_norm", "tensorflow.nn.conv2d", "tensorflow.layers.batch_normalization", "tensorflow.contrib.layers.variance_scaling_initializer", "tensorflow.truncated_normal_initializer", "tensorflow.layers.conv2d", "tensorflow.nn.elu", "tensorflow.contrib.layers.instance_norm", "tensorflow.nn.leaky_relu", "tensorflow.nn.bias_add", "tensorflow.clip_by_value", "tensorflow.nn.relu", "tensorflow.nn.softmax", "tensorflow.reduce_mean", "tensorflow.layers.max_pooling2d", "tensorflow.ones", "tensorflow.constant_initializer", "tensorflow.variable_scope" ], [ "numpy.save", "numpy.transpose", "scipy.io.savemat", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
KazMorita/pydefect
[ "31e5ad774845f436554ef15000b8eba3b168a65c" ]
[ "pydefect/tests/cli/vasp/test_main_function.py" ]
[ "# -*- coding: utf-8 -*-\n# Copyright (c) 2020. Distributed under the terms of the MIT License.\nfrom argparse import Namespace\nfrom pathlib import Path\n\nimport numpy as np\nimport pytest\nfrom monty.serialization import loadfn\nfrom pydefect.analyzer.band_edge_states import EdgeCharacters, BandEdgeStates\nfrom pydefect.analyzer.calc_results import CalcResults\nfrom pydefect.analyzer.unitcell import Unitcell\nfrom pydefect.cli.vasp.main_function import make_supercell, make_defect_set, \\\n make_defect_entries, make_unitcell, make_competing_phase_dirs, \\\n make_chem_pot_diag, make_calc_results, print_file, \\\n make_efnv_correction_from_vasp, make_defect_formation_energy, \\\n make_defect_eigenvalues, make_edge_characters, \\\n append_interstitial_to_supercell_info, pop_interstitial_from_supercell_info, \\\n plot_chem_pot_diag, make_gkfo_correction_from_vasp\nfrom pydefect.corrections.efnv_correction import \\\n ExtendedFnvCorrection\nfrom pydefect.defaults import defaults\nfrom pydefect.input_maker.defect import SimpleDefect\nfrom pydefect.input_maker.defect_entry import DefectEntry\nfrom pydefect.input_maker.defect_set import DefectSet\nfrom pymatgen import IStructure, Composition, Structure, Lattice, Element\nfrom pymatgen.io.vasp import Vasprun, Outcar\n\n\ndef test_print():\n args = Namespace(obj=\"a\")\n print_file(args)\n\n\ndef test_make_unitcell(mocker):\n vasprun_band_mock = mocker.Mock(spec=Vasprun, autospec=True)\n outcar_band_mock = mocker.Mock(spec=Outcar, autospec=True)\n outcar_dielectric_mock = mocker.Mock(spec=Outcar, autospec=True)\n args = Namespace(vasprun_band=vasprun_band_mock,\n outcar_band=outcar_band_mock,\n outcar_dielectric_clamped=outcar_dielectric_mock,\n outcar_dielectric_ionic=outcar_dielectric_mock)\n\n mock = mocker.patch(\"pydefect.cli.vasp.main_function.make_unitcell_from_vasp\")\n mock.return_value = Unitcell(vbm=1.0,\n cbm=2.0,\n ele_dielectric_const=np.eye(3),\n ion_dielectric_const=np.eye(3))\n make_unitcell(args)\n mock.assert_called_once_with(vasprun_band=vasprun_band_mock,\n outcar_band=outcar_band_mock,\n outcar_dielectric_clamped=outcar_dielectric_mock,\n outcar_dielectric_ionic=outcar_dielectric_mock)\n\n\ndef test_make_competing_phase_dirs(mocker):\n args = Namespace(elements=[\"Mg\", \"O\"],\n e_above_hull=0.1)\n mock = mocker.patch(\"pydefect.cli.vasp.main_function.MpQuery\")\n mock_make = mocker.patch(\"pydefect.cli.vasp.main_function.make_poscars_from_query\")\n make_competing_phase_dirs(args)\n mock.assert_called_once_with(element_list=args.elements,\n e_above_hull=args.e_above_hull)\n mock_make.assert_called_once_with(materials_query=mock.return_value.materials, path=Path.cwd())\n\n\ndef test_make_chem_pot_diag(mocker, tmpdir):\n def side_effect(key):\n mock_vasprun = mocker.Mock()\n if key == Path(\"Mg\") / defaults.vasprun:\n mock_vasprun.final_structure.composition = Composition(\"Mg2\")\n mock_vasprun.final_energy = -10\n elif key == Path(\"O\") / defaults.vasprun:\n mock_vasprun.final_structure.composition = Composition(\"O2\")\n mock_vasprun.final_energy = -20\n elif key == Path(\"MgO\") / defaults.vasprun:\n mock_vasprun.final_structure.composition = Composition(\"MgO\")\n mock_vasprun.final_energy = -30\n elif key == Path(\"Al\") / defaults.vasprun:\n mock_vasprun.final_structure.composition = Composition(\"Al\")\n mock_vasprun.final_energy = 0\n elif key == Path(\"MgAl2O4\") / defaults.vasprun:\n mock_vasprun.final_structure.composition = Composition(\"MgAl2O4\")\n mock_vasprun.final_energy = -70\n else:\n raise ValueError\n return mock_vasprun\n\n tmpdir.chdir()\n mock = mocker.patch(\"pydefect.cli.vasp.main_function.Vasprun\", side_effect=side_effect)\n args_1 = Namespace(elements=None, functional=None, yaml=\"cpd.yaml\", update=False,\n dirs=[Path(\"Mg\"), Path(\"MgO\"), Path(\"O\"), Path(\"Al\"), Path(\"MgAl2O4\")],\n target=Composition(\"MgO\"))\n make_chem_pot_diag(args_1)\n\n args = Namespace(yaml=\"cpd.yaml\")\n plot_chem_pot_diag(args)\n\n args_2 = Namespace(elements=None, functional=None, yaml=\"cpd.yaml\", update=False,\n dirs=[Path(\"Mg\"), Path(\"MgO\"), Path(\"O\"), Path(\"Al\"), Path(\"MgAl2O4\")],\n target=Composition(\"MgAl2O4\"))\n make_chem_pot_diag(args_2)\n\n args = Namespace(yaml=\"cpd.yaml\")\n plot_chem_pot_diag(args)\n\n\ndef test_make_supercell_from_matrix(simple_cubic, simple_cubic_2x1x1, tmpdir):\n matrix = [2, 1, 1]\n args = Namespace(unitcell=simple_cubic, matrix=matrix, min_num_atoms=None, max_num_atoms=None)\n\n tmpdir.chdir()\n make_supercell(args)\n info = loadfn(\"supercell_info.json\")\n assert IStructure.from_file(\"SPOSCAR\") == simple_cubic_2x1x1\n assert info.structure == simple_cubic_2x1x1\n assert info.transformation_matrix == [[2, 0, 0], [0, 1, 0], [0, 0, 1]]\n\n\ndef test_make_recommended_supercell(simple_cubic, simple_cubic_2x2x2, tmpdir):\n args = Namespace(unitcell=simple_cubic, matrix=None, min_num_atoms=8, max_num_atoms=8)\n\n tmpdir.chdir()\n make_supercell(args)\n info = loadfn(\"supercell_info.json\")\n assert IStructure.from_file(\"SPOSCAR\") == simple_cubic_2x2x2\n assert info.structure == simple_cubic_2x2x2\n assert info.transformation_matrix == [[2, 0, 0], [0, 2, 0], [0, 0, 2]]\n\n\ndef test_add_interstitials(mocker):\n mock_1 = mocker.Mock()\n mock_2 = mocker.Mock()\n mock_3 = mocker.Mock()\n args = Namespace(supercell_info=mock_1, base_structure=mock_2, frac_coords=mock_3)\n mock = mocker.patch(\"pydefect.cli.vasp.main_function.append_interstitial\")\n\n append_interstitial_to_supercell_info(args)\n mock.assert_called_once_with(mock_1, mock_2, mock_3)\n mock.return_value.to_json_file.assert_called_once_with()\n\n\ndef test_pop_interstitials(mocker):\n mock_si = mocker.MagicMock()\n args = Namespace(supercell_info=mock_si, index=1000)\n\n pop_interstitial_from_supercell_info(args)\n mock_si.interstitials.pop.assert_called_once_with(999)\n mock_si.to_json_file.assert_called_once_with()\n\n\[email protected](\"oxi_states,he_vacancy_charge\",\n ([None, [0]], [[\"He\", 1, \"Li\", 1], [-1, 0, 1]]))\ndef test_make_defect_set(oxi_states, he_vacancy_charge, tmpdir, supercell_info):\n tmpdir.chdir()\n supercell_info.to_json_file()\n args = Namespace(oxi_states=oxi_states, dopants=[\"Li\"], kwargs=[\"Li_H1\", \"Va_He1\", \"Va_H1_-1\"])\n make_defect_set(args)\n\n simple_defects = {SimpleDefect(None, \"He1\", he_vacancy_charge),\n SimpleDefect(None, \"H1\", [-1]),\n SimpleDefect(\"Li\", \"H1\", [0])}\n\n DefectSet(defects=simple_defects).to_yaml(\"expected.yaml\")\n assert Path(\"defect_in.yaml\").read_text() == Path(\"expected.yaml\").read_text()\n\n\ndef test_make_defect_entries(tmpdir, supercell_info):\n tmpdir.chdir()\n supercell_info.to_json_file()\n defect_set = DefectSet({SimpleDefect(None, \"He1\", [-1, 0])})\n defect_set.to_yaml()\n args = Namespace()\n make_defect_entries(args)\n names = {str(name) for name in Path(\".\").glob(\"*\")}\n assert names == {'Va_He1_-1', 'defect_in.yaml', 'perfect', 'Va_He1_0', 'supercell_info.json'}\n\n perfect_structure = Structure.from_file(Path(\"perfect\") / \"POSCAR\")\n assert perfect_structure == supercell_info.structure\n\n file_names = {str(file_name.name) for file_name in Path(\"Va_He1_-1\").glob(\"*\")}\n assert file_names == {\"POSCAR\", \"defect_entry.json\", \"prior_info.yaml\"}\n\n expected = \"\"\"charge: -1\n\"\"\"\n assert Path(\"Va_He1_-1/prior_info.yaml\").read_text() == expected\n\n\ndef test_make_calc_results(tmpdir, mocker):\n tmpdir.chdir()\n mock = mocker.patch(\"pydefect.cli.vasp.main_function.make_calc_results_from_vasp\")\n mock_vasprun = mocker.patch(\"pydefect.cli.vasp.main_function.Vasprun\")\n mock_outcar = mocker.patch(\"pydefect.cli.vasp.main_function.Outcar\")\n mock_calc_results = mocker.Mock(spec=CalcResults)\n mock.return_value = mock_calc_results\n args = Namespace(dirs=[Path(\"a\")])\n make_calc_results(args)\n\n mock_vasprun.assert_called_with(Path(\"a\") / defaults.vasprun)\n mock_outcar.assert_called_with(Path(\"a\") / defaults.outcar)\n mock.assert_called_with(vasprun=mock_vasprun.return_value, outcar=mock_outcar.return_value)\n mock_calc_results.to_json_file.assert_called_with(filename=Path(\"a\") / \"calc_results.json\")\n\n\ndef test_make_efnv_correction_from_vasp(tmpdir, mocker):\n mock_defect_entry = mocker.Mock(spec=DefectEntry, autospec=True)\n mock_calc_results = mocker.Mock(spec=CalcResults, autospec=True)\n\n def side_effect(key):\n if str(key) == \"Va_O1_2/defect_entry.json\":\n mock_defect_entry.full_name = \"Va_O1_2\"\n mock_defect_entry.charge = 2\n return mock_defect_entry\n elif str(key) == \"Va_O1_2/calc_results.json\":\n return mock_calc_results\n else:\n raise ValueError\n\n mock_perfect_calc_results = mocker.Mock(spec=CalcResults)\n mock_loadfn = mocker.patch(\"pydefect.cli.vasp.main_function.loadfn\", side_effect=side_effect)\n mock_unitcell = mocker.Mock(spec=Unitcell)\n mock_make_efnv = mocker.patch(\"pydefect.cli.vasp.main_function.make_efnv_correction\")\n mock_efnv = mocker.Mock(spec=ExtendedFnvCorrection, autospec=True)\n mock_make_efnv.return_value = mock_efnv\n\n mock_pot_plotter = mocker.patch(\"pydefect.cli.vasp.main_function.SitePotentialMplPlotter\")\n\n args = Namespace(dirs=[Path(\"Va_O1_2\")],\n perfect_calc_results=mock_perfect_calc_results,\n unitcell=mock_unitcell)\n\n make_efnv_correction_from_vasp(args)\n mock_loadfn.assert_any_call(Path(\"Va_O1_2\") / \"defect_entry.json\")\n mock_loadfn.assert_any_call(Path(\"Va_O1_2\") / \"calc_results.json\")\n\n mock_make_efnv.assert_called_with(mock_defect_entry.charge, mock_calc_results, mock_perfect_calc_results, mock_unitcell.dielectric_constant)\n mock_efnv.to_json_file.assert_called_with(Path(\"Va_O1_2\") / \"correction.json\")\n\n mock_pot_plotter.from_efnv_corr.assert_called_with(title=\"Va_O1_2\", efnv_correction=mock_efnv)\n mock_pot_plotter.from_efnv_corr.return_value.construct_plot.assert_called_once_with()\n mock_pot_plotter.from_efnv_corr.return_value.plt.savefig.assert_called_once_with(fname=Path(\"Va_O1_2\") / \"correction.pdf\")\n\n\ndef test_make_gkfo_correction_from_vasp(tmpdir, mocker):\n mock_i_correction = mocker.Mock(spec=ExtendedFnvCorrection, autospec=True)\n mock_i_calc_results = mocker.Mock(spec=CalcResults, autospec=True)\n mock_f_calc_results = mocker.Mock(spec=CalcResults, autospec=True)\n mock_unitcell = mocker.Mock()\n\n mock_pot_plotter = mocker.patch(\"pydefect.cli.vasp.main_function.SitePotentialMplPlotter\")\n mock_make_gkfo = mocker.patch(\"pydefect.cli.vasp.main_function.make_gkfo_correction\")\n\n args = Namespace(\n initial_efnv_correction=mock_i_correction,\n initial_calc_results=mock_i_calc_results,\n final_calc_results=mock_f_calc_results,\n charge_diff=1,\n unitcell=mock_unitcell)\n\n make_gkfo_correction_from_vasp(args)\n mock_make_gkfo.assert_called_with(\n efnv_correction=mock_i_correction,\n additional_charge=1,\n final_calc_results=mock_f_calc_results,\n initial_calc_results=mock_i_calc_results,\n diele_tensor=mock_unitcell.dielectric_constant,\n ion_clamped_diele_tensor=mock_unitcell.ele_dielectric_const)\n\n\ndef test_make_defect_eigenvalues(mocker):\n mock_vasprun = mocker.patch(\"pydefect.cli.vasp.main_function.Vasprun\")\n\n mock_make_eigvals = mocker.patch(\n \"pydefect.cli.vasp.main_function.make_band_edge_eigenvalues\")\n\n mock_perfect_calc_results = mocker.Mock(spec=CalcResults)\n mock_perfect_calc_results.vbm = 10\n mock_perfect_calc_results.cbm = 20\n\n mock_defect_entry = mocker.Mock(spec=DefectEntry, autospec=True)\n\n mock_eigval_plotter = mocker.patch(\n \"pydefect.cli.vasp.main_function.EigenvalueMplPlotter\")\n\n def side_effect(key):\n if str(key) == \"Va_O1_2/defect_entry.json\":\n mock_defect_entry.name = \"Va_O1\"\n mock_defect_entry.charge = 2\n return mock_defect_entry\n else:\n raise ValueError\n\n mock_loadfn = mocker.patch(\"pydefect.cli.vasp.main_function.loadfn\",\n side_effect=side_effect)\n\n args = Namespace(dirs=[Path(\"Va_O1_2\")],\n perfect_calc_results=mock_perfect_calc_results)\n make_defect_eigenvalues(args)\n\n mock_vasprun.assert_called_with(Path(\"Va_O1_2\") / defaults.vasprun)\n mock_make_eigvals.assert_called_with(mock_vasprun.return_value, 10, 20)\n mock_make_eigvals.return_value.to_json_file.assert_called_with(\n Path(\"Va_O1_2\") / \"band_edge_eigenvalues.json\")\n mock_loadfn.assert_any_call(Path(\"Va_O1_2\") / \"defect_entry.json\")\n mock_eigval_plotter.assert_called_with(\n title=\"Va_O1\",\n band_edge_eigenvalues=mock_make_eigvals.return_value,\n supercell_vbm=10,\n supercell_cbm=20)\n\n\ndef test_make_edge_characters(mocker):\n mock_vasprun = mocker.patch(\"pydefect.cli.vasp.main_function.Vasprun\")\n mock_procar = mocker.patch(\"pydefect.cli.vasp.main_function.Procar\")\n mock_outcar = mocker.patch(\"pydefect.cli.vasp.main_function.Outcar\")\n\n mock_perfect_calc_results = mocker.Mock(spec=CalcResults, autospec=True)\n mock_perfect_calc_results.structure = mocker.Mock(spec=Structure)\n mock_calc_results = mocker.Mock(spec=CalcResults, autospec=True)\n\n mock_analyzer = mocker.patch(\n \"pydefect.cli.vasp.main_function.DefectStructureAnalyzer\")\n mock_characters = mocker.patch(\n \"pydefect.cli.vasp.main_function.MakeEdgeCharacters\")\n\n def side_effect(key):\n if str(key) == \"Va_O1_2/calc_results.json\":\n mock_calc_results.structure = mocker.Mock(spec=Structure, autospec=True)\n return mock_calc_results\n else:\n raise ValueError\n mocker.patch(\"pydefect.cli.vasp.main_function.loadfn\", side_effect=side_effect)\n\n args = Namespace(dirs=[Path(\"Va_O1_2\")],\n perfect_calc_results=mock_perfect_calc_results)\n make_edge_characters(args)\n\n mock_vasprun.assert_called_with(Path(\"Va_O1_2\") / defaults.vasprun)\n mock_procar.assert_called_with(Path(\"Va_O1_2\") / defaults.procar)\n mock_outcar.assert_called_with(Path(\"Va_O1_2\") / defaults.outcar)\n mock_analyzer.assert_called_with(mock_calc_results.structure,\n mock_perfect_calc_results.structure)\n mock_characters.assert_called_with(mock_procar.return_value,\n mock_vasprun.return_value,\n mock_outcar.return_value,\n mock_analyzer.return_value.neighboring_atom_indices)\n\n\ndef test_make_edge_state(mocker):\n mock_perf_edge_chars = mocker.Mock(spec=EdgeCharacters, autospec=True)\n args = Namespace(dirs=[Path(\"Va_O1_2\")],\n perfect_edge_characters=mock_perf_edge_chars)\n\n mock_edge_chars = mocker.Mock(spec=EdgeCharacters, autospec=True)\n def side_effect(key):\n if str(key) == \"Va_O1_2/edge_characters.json\":\n return mock_edge_chars\n else:\n raise ValueError\n mocker.patch(\"pydefect.cli.vasp.main_function.loadfn\", side_effect=side_effect)\n\n mocker.patch(\"pydefect.cli.vasp.main_function.make_band_edge_state\")\n mocker.patch(\"pydefect.cli.vasp.main_function.BandEdgeStates\")\n\n\[email protected](\"skip_shallow\", [False, True])\ndef test_make_defect_formation_energy(skip_shallow, tmpdir, mocker):\n tmpdir.chdir()\n mock_perfect_calc_results = mocker.Mock(spec=CalcResults)\n mock_perfect_calc_results.structure = Structure(Lattice.cubic(1), species=[\"H\"] * 2, coords=[[0]*3] * 2)\n mock_perfect_calc_results.energy = 10\n mock_perfect_calc_results.vbm = 10\n mock_perfect_calc_results.cbm = 20\n\n mock_chem_pot_diag = mocker.patch(\"pydefect.cli.vasp.main_function.ChemPotDiag\")\n mock_chem_pot_diag.from_yaml.return_value.abs_chem_pot_dict.return_value = {Element.H: 0}\n\n mock_defect_entry = mocker.Mock(spec=DefectEntry, autospec=True)\n mock_calc_results = mocker.Mock(spec=CalcResults, autospec=True)\n mock_correction = mocker.Mock(spec=ExtendedFnvCorrection, autospec=True)\n\n def side_effect(key):\n if str(key) == \"Va_O1_2/defect_entry.json\":\n mock_defect_entry.name = \"Va_H1\"\n mock_defect_entry.charge = 2\n return mock_defect_entry\n elif str(key) == \"Va_O1_2/calc_results.json\":\n mock_calc_results.structure = Structure(Lattice.cubic(1), species=[\"H\"], coords=[[0]*3])\n mock_calc_results.energy = 10\n return mock_calc_results\n elif str(key) == \"Va_O1_2/correction.json\":\n mock_correction.correction_energy = 20\n return mock_correction\n elif str(key) == \"Va_O1_2/band_edge_states.json\":\n mock_band_edge_states = mocker.Mock(spec=BandEdgeStates, autospec=True)\n mock_band_edge_states.is_shallow = True\n return mock_band_edge_states\n else:\n raise ValueError\n\n mock_loadfn = mocker.patch(\"pydefect.cli.vasp.main_function.loadfn\", side_effect=side_effect)\n\n mock_unitcell = mocker.Mock(spec=Unitcell)\n mock_unitcell.vbm = 11\n mock_unitcell.cbm = 19\n\n args = Namespace(dirs=[Path(\"Va_O1_2\")],\n perfect_calc_results=mock_perfect_calc_results,\n unitcell=mock_unitcell,\n chem_pot_diag=\"cpd.yaml\",\n label=\"A\",\n y_range=[-100, 100],\n skip_shallow=skip_shallow,\n print=True)\n\n make_defect_formation_energy(args)\n\n if skip_shallow is True:\n mock_loadfn.assert_any_call(Path(\"Va_O1_2\") / \"band_edge_states.json\")\n else:\n mock_loadfn.assert_any_call(Path(\"Va_O1_2\") / \"defect_entry.json\")\n mock_loadfn.assert_any_call(Path(\"Va_O1_2\") / \"calc_results.json\")\n mock_loadfn.assert_any_call(Path(\"Va_O1_2\") / \"correction.json\")\n\n\n\n" ]
[ [ "numpy.eye" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lxh5147/Text-Classification-Pytorch
[ "51f9189aad62051127c5a537c72ab3a8b0f97c60" ]
[ "models/LSTM_Attn.py" ]
[ "# _*_ coding: utf-8 _*_\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.nn import functional as F\n\n\nclass AttentionModel(torch.nn.Module):\n def __init__(self, batch_size, output_size, hidden_size, vocab_size, embedding_length, weights):\n super(AttentionModel, self).__init__()\n\n \"\"\"\n Arguments\n ---------\n batch_size : Size of the batch which is same as the batch_size of the data returned by the TorchText BucketIterator\n output_size : 2 = (pos, neg)\n hidden_sie : Size of the hidden_state of the LSTM\n vocab_size : Size of the vocabulary containing unique words\n embedding_length : Embeddding dimension of GloVe word embeddings\n weights : Pre-trained GloVe word_embeddings which we will use to create our word_embedding look-up table \n \n --------\n \n \"\"\"\n\n self.batch_size = batch_size\n self.output_size = output_size\n self.hidden_size = hidden_size\n self.vocab_size = vocab_size\n self.embedding_length = embedding_length\n\n self.word_embeddings = nn.Embedding(vocab_size, embedding_length)\n self.word_embeddings.weights = nn.Parameter(weights, requires_grad=False)\n self.lstm = nn.LSTM(embedding_length, hidden_size)\n self.label = nn.Linear(hidden_size, output_size)\n\n # self.attn_fc_layer = nn.Linear()\n\n def attention_net(self, lstm_output, final_state):\n\n \"\"\"\n Now we will incorporate Attention mechanism in our LSTM model. In this new model, we will use attention to compute soft alignment score corresponding\n between each of the hidden_state and the last hidden_state of the LSTM. We will be using torch.bmm for the batch matrix multiplication.\n\n Arguments\n ---------\n\n lstm_output : Final output of the LSTM which contains hidden layer outputs for each sequence.\n final_state : Final time-step hidden state (h_n) of the LSTM\n\n ---------\n\n Returns : It performs attention mechanism by first computing weights for each of the sequence present in lstm_output and and then finally computing the\n new hidden state.\n\n Tensor Size :\n hidden.size() = (batch_size, hidden_size)\n attn_weights.size() = (batch_size, num_seq)\n soft_attn_weights.size() = (batch_size, num_seq)\n new_hidden_state.size() = (batch_size, hidden_size)\n\n \"\"\"\n\n hidden = final_state.squeeze(0)\n attn_weights = torch.bmm(lstm_output, hidden.unsqueeze(2)).squeeze(2)\n soft_attn_weights = F.softmax(attn_weights, 1)\n new_hidden_state = torch.bmm(lstm_output.transpose(1, 2), soft_attn_weights.unsqueeze(2)).squeeze(2)\n\n return new_hidden_state\n\n def forward(self, input_sentences, batch_size=None):\n\n \"\"\"\n Parameters\n ----------\n input_sentence: input_sentence of shape = (batch_size, num_sequences)\n batch_size : default = None. Used only for prediction on a single sentence after training (batch_size = 1)\n\n Returns\n -------\n Output of the linear layer containing logits for pos & neg class which receives its input as the new_hidden_state which is basically the output of the Attention network.\n final_output.shape = (batch_size, output_size)\n\n \"\"\"\n\n input = self.word_embeddings(input_sentences)\n input = input.permute(1, 0, 2)\n if batch_size is None:\n h_0 = torch.zeros(1, self.batch_size, self.hidden_size)\n c_0 = torch.zeros(1, self.batch_size, self.hidden_size)\n else:\n h_0 = torch.zeros(1, batch_size, self.hidden_size)\n c_0 = torch.zeros(1, batch_size, self.hidden_size)\n\n if torch.cuda.is_available():\n h_0 = h_0.cuda()\n c_0 = c_0.cuda()\n\n output, (final_hidden_state, final_cell_state) = self.lstm(input, (\n h_0, c_0)) # final_hidden_state.size() = (1, batch_size, hidden_size)\n output = output.permute(1, 0, 2) # output.size() = (batch_size, num_seq, hidden_size)\n\n attn_output = self.attention_net(output, final_hidden_state)\n logits = self.label(attn_output)\n\n return logits\n" ]
[ [ "torch.nn.functional.softmax", "torch.nn.Parameter", "torch.nn.LSTM", "torch.zeros", "torch.nn.Embedding", "torch.nn.Linear", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MinnDevelopment/robosat.pink
[ "0e4b88a7b1fc91e2a20e5e3bf0c4f742be9ea2c5" ]
[ "robosat_pink/tools/rasterize.py" ]
[ "import os\nimport sys\nimport json\nimport collections\n\nimport numpy as np\nfrom tqdm import tqdm\n\nimport mercantile\nfrom rasterio.crs import CRS\nfrom rasterio.transform import from_bounds\nfrom rasterio.features import rasterize\nfrom rasterio.warp import transform\nfrom supermercado import burntiles\n\nimport psycopg2\n\nfrom robosat_pink.core import load_config, check_classes, make_palette, web_ui, Logs\nfrom robosat_pink.tiles import tiles_from_csv, tile_label_to_file, tile_bbox\n\n\ndef add_parser(subparser, formatter_class):\n parser = subparser.add_parser(\n \"rasterize\", help=\"Rasterize GeoJSON or PostGIS features to tiles\", formatter_class=formatter_class\n )\n\n inp = parser.add_argument_group(\"Inputs [either --postgis or --geojson is required]\")\n inp.add_argument(\"--cover\", type=str, help=\"path to csv tiles cover file [required]\")\n inp.add_argument(\"--config\", type=str, help=\"path to config file [required]\")\n inp.add_argument(\"--type\", type=str, required=True, help=\"type of feature to rasterize (e.g Building, Road) [required]\")\n inp.add_argument(\"--pg\", type=str, help=\"PostgreSQL dsn using psycopg2 syntax (e.g 'dbname=db user=postgres')\")\n inp.add_argument(\"--sql\", type=str, help=\"SQL to retrieve geometry features [e.g SELECT geom FROM your_table]\")\n inp.add_argument(\"--geojson\", type=str, nargs=\"+\", help=\"path to GeoJSON features files\")\n\n out = parser.add_argument_group(\"Outputs\")\n out.add_argument(\"out\", type=str, help=\"output directory path [required]\")\n out.add_argument(\"--ts\", type=int, default=512, help=\"output tile size [default: 512]\")\n\n ui = parser.add_argument_group(\"Web UI\")\n ui.add_argument(\"--web_ui_base_url\", type=str, help=\"alternate Web UI base URL\")\n ui.add_argument(\"--web_ui_template\", type=str, help=\"alternate Web UI template path\")\n ui.add_argument(\"--no_web_ui\", action=\"store_true\", help=\"desactivate Web UI output\")\n\n parser.set_defaults(func=main)\n\n\ndef geojson_reproject(feature, srid_in, srid_out):\n \"\"\"Reproject GeoJSON Polygon feature coords\n Inspired by: https://gist.github.com/dnomadb/5cbc116aacc352c7126e779c29ab7abe\n \"\"\"\n\n if feature[\"geometry\"][\"type\"] == \"Polygon\":\n xys = (zip(*ring) for ring in feature[\"geometry\"][\"coordinates\"])\n xys = (list(zip(*transform(CRS.from_epsg(srid_in), CRS.from_epsg(srid_out), *xy))) for xy in xys)\n\n yield {\"coordinates\": list(xys), \"type\": \"Polygon\"}\n\n\ndef geojson_tile_burn(tile, features, srid, ts, burn_value=1):\n \"\"\"Burn tile with GeoJSON features.\"\"\"\n\n shapes = ((geometry, burn_value) for feature in features for geometry in geojson_reproject(feature, srid, 3857))\n\n bounds = tile_bbox(tile, mercator=True)\n transform = from_bounds(*bounds, ts, ts)\n\n try:\n return rasterize(shapes, out_shape=(ts, ts), transform=transform)\n except:\n return None\n\n\ndef main(args):\n\n if args.pg:\n if not args.sql:\n sys.exit(\"ERROR: With PostgreSQL db, --sql must be provided\")\n\n if (args.sql and args.geojson) or (args.sql and not args.pg):\n sys.exit(\"ERROR: You can use either --pg or --geojson inputs, but only one at once.\")\n\n config = load_config(args.config)\n check_classes(config)\n palette = make_palette(*[classe[\"color\"] for classe in config[\"classes\"]], complementary=True)\n burn_value = next(config[\"classes\"].index(classe) for classe in config[\"classes\"] if classe[\"title\"] == args.type)\n if \"burn_value\" not in locals():\n sys.exit(\"ERROR: asked type to rasterize is not contains in your config file classes.\")\n\n args.out = os.path.expanduser(args.out)\n os.makedirs(args.out, exist_ok=True)\n log = Logs(os.path.join(args.out, \"log\"), out=sys.stderr)\n\n def geojson_parse_polygon(zoom, srid, feature_map, polygon, i):\n\n try:\n if srid != 4326:\n polygon = [xy for xy in geojson_reproject({\"type\": \"feature\", \"geometry\": polygon}, srid, 4326)][0]\n\n for i, ring in enumerate(polygon[\"coordinates\"]): # GeoJSON coordinates could be N dimensionals\n polygon[\"coordinates\"][i] = [[x, y] for point in ring for x, y in zip([point[0]], [point[1]])]\n\n if polygon[\"coordinates\"]:\n for tile in burntiles.burn([{\"type\": \"feature\", \"geometry\": polygon}], zoom=zoom):\n feature_map[mercantile.Tile(*tile)].append({\"type\": \"feature\", \"geometry\": polygon})\n\n except ValueError:\n log.log(\"Warning: invalid feature {}, skipping\".format(i))\n\n return feature_map\n\n def geojson_parse_geometry(zoom, srid, feature_map, geometry, i):\n\n if geometry[\"type\"] == \"Polygon\":\n feature_map = geojson_parse_polygon(zoom, srid, feature_map, geometry, i)\n\n elif geometry[\"type\"] == \"MultiPolygon\":\n for polygon in geometry[\"coordinates\"]:\n feature_map = geojson_parse_polygon(zoom, srid, feature_map, {\"type\": \"Polygon\", \"coordinates\": polygon}, i)\n else:\n log.log(\"Notice: {} is a non surfacic geometry type, skipping feature {}\".format(geometry[\"type\"], i))\n\n return feature_map\n\n if args.geojson:\n\n tiles = [tile for tile in tiles_from_csv(os.path.expanduser(args.cover))]\n assert tiles, \"Empty cover\"\n\n zoom = tiles[0].z\n assert not [tile for tile in tiles if tile.z != zoom], \"Unsupported zoom mixed cover. Use PostGIS instead\"\n\n feature_map = collections.defaultdict(list)\n\n log.log(\"RoboSat.pink - rasterize - Compute spatial index\")\n for geojson_file in args.geojson:\n\n with open(os.path.expanduser(geojson_file)) as geojson:\n feature_collection = json.load(geojson)\n\n try:\n crs_mapping = {\"CRS84\": \"4326\", \"900913\": \"3857\"}\n srid = feature_collection[\"crs\"][\"properties\"][\"name\"].split(\":\")[-1]\n srid = int(srid) if srid not in crs_mapping else int(crs_mapping[srid])\n except:\n srid = int(4326)\n\n for i, feature in enumerate(tqdm(feature_collection[\"features\"], ascii=True, unit=\"feature\")):\n\n if feature[\"geometry\"][\"type\"] == \"GeometryCollection\":\n for geometry in feature[\"geometry\"][\"geometries\"]:\n feature_map = geojson_parse_geometry(zoom, srid, feature_map, geometry, i)\n else:\n feature_map = geojson_parse_geometry(zoom, srid, feature_map, feature[\"geometry\"], i)\n features = args.geojson\n\n if args.pg:\n\n conn = psycopg2.connect(args.pg)\n db = conn.cursor()\n\n assert \"limit\" not in args.sql.lower(), \"LIMIT is not supported\"\n db.execute(\"SELECT ST_Srid(geom) AS srid FROM ({} LIMIT 1) AS sub\".format(args.sql))\n srid = db.fetchone()[0]\n assert srid, \"Unable to retrieve geometry SRID.\"\n\n if \"where\" not in args.sql.lower(): # TODO: Find a more reliable way to handle feature filtering\n args.sql += \" WHERE ST_Intersects(tile.geom, geom)\"\n else:\n args.sql += \" AND ST_Intersects(tile.geom, geom)\"\n features = args.sql\n\n log.log(\"RoboSat.pink - rasterize - rasterizing {} from {} on cover {}\".format(args.type, features, args.cover))\n with open(os.path.join(os.path.expanduser(args.out), \"instances.cover\"), mode=\"w\") as cover:\n\n for tile in tqdm(list(tiles_from_csv(os.path.expanduser(args.cover))), ascii=True, unit=\"tile\"):\n\n geojson = None\n\n if args.pg:\n\n w, s, e, n = tile_bbox(tile)\n\n query = \"\"\"\n WITH\n tile AS (SELECT ST_Transform(ST_MakeEnvelope({},{},{},{}, 4326), {}) AS geom),\n geom AS (SELECT ST_Intersection(tile.geom, sql.geom) AS geom FROM tile CROSS JOIN LATERAL ({}) sql),\n json AS (SELECT '{{\"type\": \"Feature\", \"geometry\": '\n || ST_AsGeoJSON((ST_Dump(ST_Transform(ST_Force2D(geom.geom), 4326))).geom, 6)\n || '}}' AS features\n FROM geom)\n SELECT '{{\"type\": \"FeatureCollection\", \"features\": [' || Array_To_String(array_agg(features), ',') || ']}}'\n FROM json\n \"\"\".format(\n w, s, e, n, srid, args.sql\n )\n\n db.execute(query)\n row = db.fetchone()\n try:\n geojson = json.loads(row[0])[\"features\"] if row and row[0] else None\n except Exception:\n log.log(\"Warning: Invalid geometries, skipping {}\".format(tile))\n conn = psycopg2.connect(args.pg)\n db = conn.cursor()\n\n if args.geojson:\n geojson = feature_map[tile] if tile in feature_map else None\n\n if geojson:\n num = len(geojson)\n out = geojson_tile_burn(tile, geojson, 4326, args.ts, burn_value)\n\n if not geojson or out is None:\n num = 0\n out = np.zeros(shape=(args.ts, args.ts), dtype=np.uint8)\n\n tile_label_to_file(args.out, tile, palette, out)\n cover.write(\"{},{},{} {}{}\".format(tile.x, tile.y, tile.z, num, os.linesep))\n\n if not args.no_web_ui:\n template = \"leaflet.html\" if not args.web_ui_template else args.web_ui_template\n base_url = args.web_ui_base_url if args.web_ui_base_url else \"./\"\n tiles = [tile for tile in tiles_from_csv(args.cover)]\n web_ui(args.out, base_url, tiles, tiles, \"png\", template)\n" ]
[ [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
CCH852573130/3DPrinting10
[ "ca843d728bd7501f332a7946976c40d86b362930" ]
[ "app/src/main/python/image.py" ]
[ "# coding=utf-8\nimport numpy\nimport time\nimport struct\nfrom PIL import Image\nfrom MeshBuilder import MeshBuilder\nfrom Vector import Vector\nfrom CuraSceneNode import CuraSceneNode as SceneNode\n\ndef generateSceneNode(file_name, xz_size, peak_height, base_height, blur_iterations, max_size,lighter_is_higher,file):\n scene_node = SceneNode()\n\n mesh = MeshBuilder()#初始化\n\n # img = QImage(file_name)\n im= Image.open(file_name)\n# if im.isNull():\n # Logger.log(\"e\", \"Image is corrupt.\")\n # return None\n\n # width = max(img.width(), 2)\n # height = max(img.height(), 2)\n width = max(im.size[0], 2)\n height = max(im.size[1], 2)\n aspect = height / width\n\n# if im.width() < 2 or im.height() < 2:\n # img = img.scaled(width, height, Qt.IgnoreAspectRatio)\n # im = im.resize(width, height, Image.ANTIALIAS)\n base_height = max(base_height, 0)\n peak_height = max(peak_height, -base_height)\n\n xz_size = max(xz_size, blur_iterations)\n scale_vector = Vector(xz_size, peak_height, xz_size)\n\n if width > height:\n scale_vector = scale_vector.set(z=scale_vector.z * aspect)\n elif height > width:\n scale_vector = scale_vector.set(x=scale_vector.x / aspect)\n\n if width > max_size or height > max_size:\n scale_factor = max_size / width\n if height > width:\n scale_factor = max_size / height\n\n width = int(max(round(width * scale_factor), 2))\n height = int(max(round(height * scale_factor), 2))\n # img = img.scaled(width, height, Qt.IgnoreAspectRatio)\n im = im.resize((width, height), Image.ANTIALIAS)\n width_minus_one = width - 1\n height_minus_one = height - 1\n\n #Job.yieldThread()\n\n texel_width = 1.0 / (width_minus_one) * scale_vector.x\n texel_height = 1.0 / (height_minus_one) * scale_vector.z\n\n height_data = numpy.zeros((height, width), dtype=numpy.float32)\n\n for x in range(0, width):\n for y in range(0, height):\n # qrgb = img.pixel(x, y)\n qrgb = im.getpixel((x, y))\n R=qrgb[0]\n G=qrgb[1]\n B=qrgb[2]\n avg=float(R+G+B)/(3*255)\n # qR=qRed(qrgb)\n # qG=qGreen(qrgb)\n # qB=qBlue(qrgb)\n # avg=float(qR+qG+qB)/(3 * 255)\n # avg = float(qRed(qrgb) + qGreen(qrgb) + qBlue(qrgb)) / (3 * 255)\n height_data[y, x] = avg\n\n #Job.yieldThread()\n\n if not lighter_is_higher:\n height_data = 1 - height_data\n\n for _ in range(0,blur_iterations):\n copy = numpy.pad(height_data, ((1, 1), (1, 1)), mode=\"edge\")\n\n height_data += copy[1:-1, 2:]\n height_data += copy[1:-1, :-2]\n height_data += copy[2:, 1:-1]\n height_data += copy[:-2, 1:-1]\n\n height_data += copy[2:, 2:]\n height_data += copy[:-2, 2:]\n height_data += copy[2:, :-2]\n height_data += copy[:-2, :-2]\n\n height_data /= 9\n\n # Job.yieldThread()\n\n height_data *= scale_vector.y\n height_data += base_height\n\n heightmap_face_count = 2 * height_minus_one * width_minus_one\n total_face_count = heightmap_face_count + (width_minus_one * 2) * (height_minus_one * 2) + 2\n\n mesh.reserveFaceCount(total_face_count)\n\n # initialize to texel space vertex offsets.\n # 6 is for 6 vertices for each texel quad.\n heightmap_vertices = numpy.zeros((width_minus_one * height_minus_one, 6, 3), dtype=numpy.float32)\n heightmap_vertices = heightmap_vertices + numpy.array([[\n [0, base_height, 0],\n [0, base_height, texel_height],\n [texel_width, base_height, texel_height],\n [texel_width, base_height, texel_height],\n [texel_width, base_height, 0],\n [0, base_height, 0]\n ]], dtype=numpy.float32)\n\n offsetsz, offsetsx = numpy.mgrid[0: height_minus_one, 0: width - 1]\n offsetsx = numpy.array(offsetsx, numpy.float32).reshape(-1, 1) * texel_width\n offsetsz = numpy.array(offsetsz, numpy.float32).reshape(-1, 1) * texel_height\n\n # offsets for each texel quad\n heightmap_vertex_offsets = numpy.concatenate(\n [offsetsx, numpy.zeros((offsetsx.shape[0], offsetsx.shape[1]), dtype=numpy.float32), offsetsz], 1)\n heightmap_vertices += heightmap_vertex_offsets.repeat(6, 0).reshape(-1, 6, 3)\n\n # apply height data to y values\n heightmap_vertices[:, 0, 1] = heightmap_vertices[:, 5, 1] = height_data[:-1, :-1].reshape(-1)\n heightmap_vertices[:, 1, 1] = height_data[1:, :-1].reshape(-1)\n heightmap_vertices[:, 2, 1] = heightmap_vertices[:, 3, 1] = height_data[1:, 1:].reshape(-1)\n heightmap_vertices[:, 4, 1] = height_data[:-1, 1:].reshape(-1)\n\n heightmap_indices = numpy.array(numpy.mgrid[0:heightmap_face_count * 3], dtype=numpy.int32).reshape(-1, 3)\n\n mesh._vertices[0:(heightmap_vertices.size // 3), :] = heightmap_vertices.reshape(-1, 3)\n mesh._indices[0:(heightmap_indices.size // 3), :] = heightmap_indices\n\n mesh._vertex_count = heightmap_vertices.size // 3\n mesh._face_count = heightmap_indices.size // 3\n\n geo_width = width_minus_one * texel_width\n geo_height = height_minus_one * texel_height\n\n # bottom\n mesh.addFaceByPoints(0, 0, 0, 0, 0, geo_height, geo_width, 0, geo_height)\n mesh.addFaceByPoints(geo_width, 0, geo_height, geo_width, 0, 0, 0, 0, 0)\n\n # north and south walls\n for n in range(0, width_minus_one):\n x = n * texel_width\n nx = (n + 1) * texel_width\n\n hn0 = height_data[0, n]\n hn1 = height_data[0, n + 1]\n\n hs0 = height_data[height_minus_one, n]\n hs1 = height_data[height_minus_one, n + 1]\n\n mesh.addFaceByPoints(x, 0, 0, nx, 0, 0, nx, hn1, 0)\n mesh.addFaceByPoints(nx, hn1, 0, x, hn0, 0, x, 0, 0)\n\n mesh.addFaceByPoints(x, 0, geo_height, nx, 0, geo_height, nx, hs1, geo_height)\n mesh.addFaceByPoints(nx, hs1, geo_height, x, hs0, geo_height, x, 0, geo_height)\n\n # west and east walls\n for n in range(0, height_minus_one):\n y = n * texel_height\n ny = (n + 1) * texel_height\n\n hw0 = height_data[n, 0]\n hw1 = height_data[n + 1, 0]\n\n he0 = height_data[n, width_minus_one]\n he1 = height_data[n + 1, width_minus_one]\n\n mesh.addFaceByPoints(0, 0, y, 0, 0, ny, 0, hw1, ny)\n mesh.addFaceByPoints(0, hw1, ny, 0, hw0, y, 0, 0, y)\n\n mesh.addFaceByPoints(geo_width, 0, y, geo_width, 0, ny, geo_width, he1, ny)\n mesh.addFaceByPoints(geo_width, he1, ny, geo_width, he0, y, geo_width, 0, y)\n\n mesh.calculateNormals(fast=True)\n\n scene_node.setMeshData(mesh.build())\n saveScene(file, scene_node)\n # return scene_node\n\ndef saveScene(filename, object):\n f = open(filename, 'wb')\n _writeBinary(f, object)\n f.close()\n\ndef _writeBinary(stream, node):\n stream.write(\"Uranium STLWriter {0}\".format(time.strftime(\"%a %d %b %Y %H:%M:%S\")).encode().ljust(80, b\"\\000\"))\n\n face_count = 0\n# nodes = list(node)\n # for node in nodes:\n if node.getMeshData().hasIndices():\n face_count += node.getMeshData().getFaceCount()\n else:\n face_count += node.getMeshData().getVertexCount() / 3\n\n stream.write(struct.pack(\"<I\", int(face_count))) # Write number of faces to STL\n\n# for node in node:\n mesh_data = node.getMeshData().getTransformed(node.getWorldTransformation())\n\n if mesh_data.hasIndices():\n verts = mesh_data.getVertices()\n for face in mesh_data.getIndices():\n v1 = verts[face[0]]\n v2 = verts[face[1]]\n v3 = verts[face[2]]\n stream.write(struct.pack(\"<fff\", 0.0, 0.0, 0.0))\n stream.write(struct.pack(\"<fff\", v1[0], -v1[2], v1[1]))\n stream.write(struct.pack(\"<fff\", v2[0], -v2[2], v2[1]))\n stream.write(struct.pack(\"<fff\", v3[0], -v3[2], v3[1]))\n stream.write(struct.pack(\"<H\", 0))\n else:\n num_verts = mesh_data.getVertexCount()\n verts = mesh_data.getVertices()\n for index in range(0, num_verts - 1, 3):\n v1 = verts[index]\n v2 = verts[index + 1]\n v3 = verts[index + 2]\n stream.write(struct.pack(\"<fff\", 0.0, 0.0, 0.0))\n stream.write(struct.pack(\"<fff\", v1[0], -v1[2], v1[1]))\n stream.write(struct.pack(\"<fff\", v2[0], -v2[2], v2[1]))\n stream.write(struct.pack(\"<fff\", v3[0], -v3[2], v3[1]))\n stream.write(struct.pack(\"<H\", 0))\n\n\n#file = \"/sdcard/Android/data/com.android.browser/files/yushengnan4.stl\"\n#xz_size = 120.0\n#peak_height = 20\n#base_height = 0.4\n#blur_iterations= 1\n#max_size = 512\n#lighter_is_higher = False\n\n#object = generateSceneNode(file_name, xz_size, peak_height, base_height, blur_iterations, max_size, lighter_is_higher,file)\n#generateSceneNode(file_name, 120, 20, 0.4, 1, 512, False)\n#file = 'F:\\\\111\\\\img5.stl'\n#saveScene(file,object)\n" ]
[ [ "numpy.array", "numpy.zeros", "numpy.pad" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sdevenes/M05_MiniProject
[ "38f81c6cc0b0d8f777c51609118b160c010c5590" ]
[ "rr/experiment/database.py" ]
[ "import numpy as np\nimport csv\nfrom sklearn.model_selection import train_test_split\n\nPROTOCOLS = {\n \"proto1\": {\"train\": 0.8, \"test\": 0.2, \"random\": 1},\n \"proto2\": {\"train\": 0.8, \"test\": 0.2, \"random\": 2},\n}\n\nSUBSETS = [\"train\", \"validation\", \"test\"]\n\nCLASSES = [\n \"Other_Activity\",\n \"Watch_TV\",\n \"Sleep_Out_Of_Bed\",\n \"Bathe\",\n \"Cook_Breakfast\",\n \"Dress\",\n \"Toilet\",\n \"Personal_Hygiene\",\n \"Sleep\",\n \"Read\",\n \"Relax\",\n \"Cook_Dinner\",\n \"Drink\",\n \"Eat_Breakfast\",\n \"Morning_Meds\",\n \"Evening_Meds\",\n \"Wash_Breakfast_Dishes\",\n \"Cook_Lunch\",\n \"Wash_Dishes\",\n \"Leave_Home\",\n \"Cook\",\n \"Enter_Home\",\n \"Entertain_Guests\",\n \"Wash_Dinner_Dishes\",\n \"Phone\",\n \"Groom\",\n \"Step_Out\",\n \"Eat_Dinner\",\n \"Eat_Lunch\",\n \"Wash_Lunch_Dishes\",\n \"Bed_Toilet_Transition\",\n \"Eat\",\n \"Go_To_Sleep\",\n \"Wake_Up\",\n \"Work_At_Table\",\n]\n\nVARIABLES = [\n \"lastSensorEventHours\",\n \"lastSensorEventSeconds\",\n \"lastSensorDayOfWeek\",\n \"windowDuration\",\n \"timeSinceLastSensorEvent\",\n \"prevDominantSensor1\",\n \"prevDominantSensor2\",\n \"lastSensorID\",\n \"lastSensorLocation\",\n \"lastMotionLocation\",\n \"complexity\",\n \"activityChange\",\n \"areaTransitions\",\n \"numDistinctSensors\",\n \"sensorCount-Bathroom\",\n \"sensorCount-Bedroom\",\n \"sensorCount-Chair\",\n \"sensorCount-DiningRoom\",\n \"sensorCount-Hall\",\n \"sensorCount-Ignore\",\n \"sensorCount-Kitchen\",\n \"sensorCount-LivingRoom\",\n \"sensorCount-Office\",\n \"sensorCount-OutsideDoor\",\n \"sensorCount-WorkArea\",\n \"sensorElTime-Bathroom\",\n \"sensorElTime-Bedroom\",\n \"sensorElTime-Chair\",\n \"sensorElTime-DiningRoom\",\n \"sensorElTime-Hall\",\n \"sensorElTime-Ignore\",\n \"sensorElTime-Kitchen\",\n \"sensorElTime-LivingRoom\",\n \"sensorElTime-Office\",\n \"sensorElTime-OutsideDoor\",\n \"sensorElTime-WorkArea\",\n]\n\n\ndef load(filepath=\"./data/csh101/csh101.ann.features.csv\"):\n \"\"\"Loads the dataset\n\n Args:\n filepath (str): path to the file containing the dataset to load\n Returns:\n x (numpy.ndarray):A NxM 2D-array where each row corresponds to a sample and each column to a feature\n y (numpy.ndarray): A 1D-array of length N, where each element corresponds to a sample label\n Raises:\n None\n \"\"\"\n x = []\n y = []\n with open(filepath, \"rt\") as f:\n reader = csv.reader(f, delimiter=\",\")\n for k, row in enumerate(reader):\n if not k:\n continue\n x.append(row[:-1])\n y.append(row[-1])\n return np.array(x), np.array(y)\n\n\ndef split_data(x, y, subset, splits):\n \"\"\"Splits the data set\n\n Args:\n x (numpy.ndarray):A NxM 2D-array where each row corresponds to a sample and each column to a feature\n y (numpy.ndarray): A 1D-array of length N, where each element corresponds to a sample label\n subset (str): subset to extract (train or test)\n splits (dict): a dictionary mapping the subsets to their dataset proportion and the random state to use for splitting\n Returns:\n x_split (numpy.ndarray):A PxM 2D-array containing only a subset of samples\n y_split (numpy.ndarray): A 1D-array of length P containing only the labels corresponding to the subset x_split\n Raises:\n None\n \"\"\"\n x_train, x_test, y_train, y_test = train_test_split(\n x,\n y,\n test_size=splits[\"test\"],\n train_size=splits[\"train\"],\n random_state=splits[\"random\"],\n stratify=y,\n )\n (x_split, y_split) = (x_train, y_train) if subset == \"train\" else (x_test, y_test)\n return x_split, y_split\n\n\ndef get(\n protocol,\n subset,\n classes=CLASSES,\n variables=VARIABLES,\n filepath=\"./data/csh101/csh101.ann.features.csv\",\n):\n \"\"\"Get the desired subset\n\n Args:\n protocol (str): protocol to use\n subset (str): subset to extract (train or test)\n classes (list): list of desired classes\n variables (list): list of desired variables (features)\n filepath (str): path to the file containing the dataset to load\n Returns:\n ret_x (numpy.ndarray):A PxQ 2D-array containing only the desired subset of samples with the Q desired features\n ret_y (numpy.ndarray): A 1D-array of length P containing only the labels corresponding to the subset ret_x\n Raises:\n None\n \"\"\"\n x, y = load(filepath)\n x_split, y_split = split_data(x, y, subset, PROTOCOLS[protocol])\n var_index = [VARIABLES.index(k) for k in variables]\n classes_condition = np.isin(y_split, classes)\n ret_x = x_split[classes_condition][:, var_index]\n ret_y = y_split[classes_condition]\n return ret_x, ret_y\n" ]
[ [ "numpy.array", "sklearn.model_selection.train_test_split", "numpy.isin" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
IvanaEscobar/xgcm
[ "95f4f33d72d2add00136e27f6b3bedecb97d4d77" ]
[ "xgcm/test/test_autogenerate.py" ]
[ "import numpy as np\nimport pytest\nimport xarray as xr\nfrom xarray.testing import assert_allclose, assert_equal\n\nfrom xgcm.autogenerate import (\n _fill_attrs,\n _parse_boundary_params,\n _parse_position,\n _position_to_relative,\n generate_axis,\n generate_grid_ds,\n)\n\n# create test datasets\npad_value = 1000\ndx = 5.0\ndy = 2.5\ndz = 0.5\na = np.random.rand(int(180 / dy), int(360 / dx), int(10 / dz))\nx = np.arange(-180 + dx, 180 + dx, dx)\ny = np.arange(-90 + dy, 90 + dy, dy)\nz = np.arange(0 + dz, 10 + dz, dz)\nx_outer = np.arange(-180 + dx / 2, 180 + dx, dx)\ny_outer = np.arange(-90 + dy / 2, 90 + dy, dy)\nz_outer = np.arange(0 + dz / 2, 10 + dz, dz)\nz_1D = z[0:3]\nz_1D_padded = np.hstack([z[0:2] + (dz / 2.0), (z[2] + pad_value) / 2])\n\nxx, yy = np.meshgrid(x, y)\n_, _, zz = np.meshgrid(x, y, z)\n\nxx_outer, _ = np.meshgrid(x_outer, y)\n_, yy_outer = np.meshgrid(x, y_outer)\n_, _, zz_outer = np.meshgrid(x, y, z_outer)\n\nds_original = xr.Dataset(\n {\"somedata\": ([\"lat\", \"lon\", \"z\"], a)},\n coords={\n \"lon\": ([\"lon\"], x + (dx / 2.0)),\n \"lat\": ([\"lat\"], y + (dy / 2.0)),\n \"z\": ([\"z\"], z + (dz / 2.0)),\n \"llon\": ([\"lat\", \"lon\"], xx + (dx / 2.0)),\n \"llat\": ([\"lat\", \"lon\"], yy + (dy / 2.0)),\n \"zz\": ([\"lat\", \"lon\", \"z\"], zz + (dz / 2.0)),\n },\n)\n\nds_original_left = xr.Dataset(\n {\"somedata\": ([\"lat\", \"lon\", \"z\"], a)},\n coords={\n \"lon\": ([\"lon\"], x),\n \"lat\": ([\"lat\"], y),\n \"z\": ([\"z\"], z),\n \"llon\": ([\"lat\", \"lon\"], xx),\n \"llat\": ([\"lat\", \"lon\"], yy),\n \"zz\": ([\"lat\", \"lon\", \"z\"], zz),\n },\n)\n\nds_original_1D = xr.Dataset(\n {\"somedata\": ([\"z\"], np.array([1, 2, 3]))}, coords={\"z\": ([\"z\"], z_1D)}\n)\nds_original_1D_padded = xr.Dataset(\n {\"somedata\": ([\"z\"], np.array([1, 2, 3]))},\n coords={\"z\": ([\"z\"], z[0:3]), \"test\": ([\"test\"], z_1D_padded)},\n)\nds_out_left = xr.Dataset(\n {\"somedata\": ([\"lat\", \"lon\", \"z\"], a)},\n coords={\n \"lon\": ([\"lon\"], x + (dx / 2.0), {\"axis\": \"X\"}),\n \"lat\": ([\"lat\"], y + (dy / 2.0), {\"axis\": \"Y\"}),\n \"z\": ([\"z\"], z + (dz / 2.0), {\"axis\": \"Z\"}),\n \"llon\": ([\"lat\", \"lon\"], xx + (dx / 2.0), {\"axis\": \"X\"}),\n \"llat\": ([\"lat\", \"lon\"], yy + (dy / 2.0), {\"axis\": \"Y\"}),\n \"zz\": ([\"lat\", \"lon\", \"z\"], zz + (dz / 2.0), {\"axis\": \"Z\"}),\n \"lon_left\": ([\"lon_left\"], x, {\"axis\": \"X\", \"c_grid_axis_shift\": -0.5}),\n \"lat_left\": ([\"lat_left\"], y, {\"axis\": \"Y\", \"c_grid_axis_shift\": -0.5}),\n \"z_left\": ([\"z_left\"], z, {\"axis\": \"Z\", \"c_grid_axis_shift\": -0.5}),\n \"llon_left\": (\n [\"lat\", \"lon_left\"],\n xx,\n {\"axis\": \"X\", \"c_grid_axis_shift\": -0.5},\n ),\n \"llat_left\": (\n [\"lat_left\", \"lon\"],\n yy,\n {\"axis\": \"Y\", \"c_grid_axis_shift\": -0.5},\n ),\n \"zz_left\": (\n [\"lat\", \"lon\", \"z_left\"],\n zz,\n {\"axis\": \"Z\", \"c_grid_axis_shift\": -0.5},\n ),\n },\n)\n\nds_out_right = xr.Dataset(\n {\"somedata\": ([\"lat\", \"lon\", \"z\"], a)},\n coords={\n \"lon\": ([\"lon\"], x + (dx / 2.0), {\"axis\": \"X\", \"c_grid_axis_shift\": -0.5}),\n \"lat\": ([\"lat\"], y + (dy / 2.0), {\"axis\": \"Y\", \"c_grid_axis_shift\": -0.5}),\n \"z\": ([\"z\"], z + (dz / 2.0), {\"axis\": \"Z\", \"c_grid_axis_shift\": -0.5}),\n \"llon\": (\n [\"lat\", \"lon\"],\n xx + (dx / 2.0),\n {\"axis\": \"X\", \"c_grid_axis_shift\": -0.5},\n ),\n \"llat\": (\n [\"lat\", \"lon\"],\n yy + (dy / 2.0),\n {\"axis\": \"Y\", \"c_grid_axis_shift\": -0.5},\n ),\n \"zz\": (\n [\"lat\", \"lon\", \"z\"],\n zz + (dz / 2.0),\n {\"axis\": \"Z\", \"c_grid_axis_shift\": -0.5},\n ),\n \"lon_right\": ([\"lon_right\"], x + dx, {\"axis\": \"X\"}),\n \"lat_right\": ([\"lat_right\"], y + dy, {\"axis\": \"Y\"}),\n \"z_right\": ([\"z_right\"], z + dz, {\"axis\": \"Z\"}),\n \"llon_right\": ([\"lat\", \"lon_right\"], xx + dx, {\"axis\": \"X\"}),\n \"llat_right\": ([\"lat_right\", \"lon\"], yy + dy, {\"axis\": \"Y\"}),\n \"zz_right\": ([\"lat\", \"lon\", \"z_right\"], zz + dz, {\"axis\": \"Z\"}),\n },\n)\n\nds_out_center = xr.Dataset(\n {\"somedata\": ([\"lat\", \"lon\", \"z\"], a)},\n coords={\n \"lon\": ([\"lon\"], x, {\"axis\": \"X\", \"c_grid_axis_shift\": -0.5}),\n \"lat\": ([\"lat\"], y, {\"axis\": \"Y\", \"c_grid_axis_shift\": -0.5}),\n \"z\": ([\"z\"], z, {\"axis\": \"Z\", \"c_grid_axis_shift\": -0.5}),\n \"llon\": ([\"lat\", \"lon\"], xx, {\"axis\": \"X\", \"c_grid_axis_shift\": -0.5}),\n \"llat\": ([\"lat\", \"lon\"], yy, {\"axis\": \"Y\", \"c_grid_axis_shift\": -0.5}),\n \"zz\": ([\"lat\", \"lon\", \"z\"], zz, {\"axis\": \"Z\", \"c_grid_axis_shift\": -0.5}),\n \"lon_center\": ([\"lon_center\"], x + (dx / 2.0), {\"axis\": \"X\"}),\n \"lat_center\": ([\"lat_center\"], y + (dy / 2.0), {\"axis\": \"Y\"}),\n \"z_center\": ([\"z_center\"], z + (dz / 2.0), {\"axis\": \"Z\"}),\n \"llon_center\": ([\"lat\", \"lon_center\"], xx + (dx / 2.0), {\"axis\": \"X\"}),\n \"llat_center\": ([\"lat_center\", \"lon\"], yy + (dy / 2.0), {\"axis\": \"Y\"}),\n \"zz_center\": ([\"lat\", \"lon\", \"z_center\"], zz + (dz / 2.0), {\"axis\": \"Z\"}),\n },\n)\n\nds_out_outer = xr.Dataset(\n {\"somedata\": ([\"lat\", \"lon\", \"z\"], a)},\n coords={\n \"lon\": ([\"lon\"], x, {\"axis\": \"X\"}),\n \"lat\": ([\"lat\"], y, {\"axis\": \"Y\"}),\n \"z\": ([\"z\"], z, {\"axis\": \"Z\"}),\n \"llon\": ([\"lat\", \"lon\"], xx, {\"axis\": \"X\"}),\n \"llat\": ([\"lat\", \"lon\"], yy, {\"axis\": \"Y\"}),\n \"zz\": ([\"lat\", \"lon\", \"z\"], zz, {\"axis\": \"Z\"}),\n \"lon_outer\": ([\"lon_outer\"], x_outer, {\"axis\": \"X\", \"c_grid_axis_shift\": -0.5}),\n \"lat_outer\": ([\"lat_outer\"], y_outer, {\"axis\": \"Y\", \"c_grid_axis_shift\": -0.5}),\n \"z_outer\": ([\"z_outer\"], z_outer, {\"axis\": \"Z\", \"c_grid_axis_shift\": -0.5}),\n \"llon_outer\": (\n [\"lat\", \"lon_outer\"],\n xx_outer,\n {\"axis\": \"X\", \"c_grid_axis_shift\": -0.5},\n ),\n \"llat_outer\": (\n [\"lat_outer\", \"lon\"],\n yy_outer,\n {\"axis\": \"Y\", \"c_grid_axis_shift\": -0.5},\n ),\n \"zz_outer\": (\n [\"lat\", \"lon\", \"z_outer\"],\n zz_outer,\n {\"axis\": \"Z\", \"c_grid_axis_shift\": -0.5},\n ),\n },\n)\n\n\ndef test_generate_axis():\n a = generate_axis(\n ds_original,\n \"X\",\n \"lon\",\n \"lon\",\n pos_from=\"center\",\n pos_to=\"right\",\n pad=None,\n boundary_discontinuity=360,\n )\n b = generate_axis(\n ds_original,\n \"Y\",\n \"lat\",\n \"lat\",\n pos_from=\"center\",\n pos_to=\"left\",\n pad=None,\n boundary_discontinuity=180,\n )\n c = generate_axis(\n ds_original, \"Z\", \"z\", \"z\", pos_from=\"center\", pos_to=\"left\", pad=\"auto\"\n )\n d = generate_axis(\n ds_original_1D,\n \"Z\",\n \"z\",\n \"z\",\n pos_from=\"left\",\n pos_to=\"center\",\n pad=pad_value,\n new_name=\"test\",\n )\n e = generate_axis(\n ds_original_left, \"Z\", \"z\", \"z\", pos_from=\"left\", pos_to=\"center\", pad=\"auto\"\n )\n f = generate_axis(\n ds_original_left, \"Z\", \"z\", \"z\", pos_from=\"center\", pos_to=\"outer\", pad=\"auto\"\n )\n g = generate_axis(\n ds_original_left,\n \"X\",\n \"lon\",\n \"lon\",\n pos_from=\"center\",\n pos_to=\"outer\",\n pad=None,\n boundary_discontinuity=360,\n )\n\n assert_allclose(a[\"lon_right\"], ds_out_right[\"lon_right\"])\n assert_allclose(b[\"lat_left\"], ds_out_left[\"lat_left\"])\n assert_allclose(c[\"z_left\"], ds_out_left[\"z_left\"])\n assert_allclose(d[\"test\"], ds_original_1D_padded[\"test\"])\n assert_allclose(e[\"z_center\"], ds_out_center[\"z_center\"])\n assert_allclose(f[\"z_outer\"], ds_out_outer[\"z_outer\"])\n assert_allclose(g[\"lon_outer\"], ds_out_outer[\"lon_outer\"])\n\n # Mulitdim cases\n aa = generate_axis(\n a,\n \"X\",\n \"llon\",\n \"lon\",\n pos_from=\"center\",\n pos_to=\"right\",\n pad=None,\n boundary_discontinuity=360,\n attrs_from_scratch=False,\n )\n bb = generate_axis(\n b,\n \"Y\",\n \"llat\",\n \"lat\",\n pos_from=\"center\",\n pos_to=\"left\",\n pad=None,\n boundary_discontinuity=180,\n attrs_from_scratch=False,\n )\n ee = generate_axis(\n e,\n \"Z\",\n \"zz\",\n \"z\",\n pos_from=\"left\",\n pos_to=\"center\",\n pad=\"auto\",\n attrs_from_scratch=False,\n )\n ff = generate_axis(\n f,\n \"Z\",\n \"zz\",\n \"z\",\n pos_from=\"center\",\n pos_to=\"outer\",\n pad=\"auto\",\n attrs_from_scratch=False,\n )\n\n gg = generate_axis(\n g,\n \"X\",\n \"llon\",\n \"lon\",\n pos_from=\"center\",\n pos_to=\"outer\",\n pad=None,\n boundary_discontinuity=360,\n attrs_from_scratch=False,\n )\n\n assert_allclose(aa[\"llon_right\"], ds_out_right[\"llon_right\"])\n assert_allclose(bb[\"llat_left\"], ds_out_left[\"llat_left\"])\n assert_allclose(ee[\"zz_center\"], ds_out_center[\"zz_center\"])\n assert_allclose(ff[\"zz_outer\"], ds_out_outer[\"zz_outer\"])\n assert_allclose(gg[\"llon_outer\"], ds_out_outer[\"llon_outer\"])\n\n with pytest.raises(ValueError):\n # Check if generate axis fails when a DataArray is passed instead of\n # Dataset\n generate_axis(\n c[\"somedata\"],\n \"Z\",\n \"zz\",\n \"z\",\n pos_from=\"left\",\n pos_to=\"center\",\n pad=\"auto\",\n attrs_from_scratch=False,\n )\n with pytest.raises(ValueError):\n generate_axis(c, \"Z\", \"zz\", \"z\", pad=\"auto\", boundary_discontinuity=360)\n with pytest.raises(ValueError):\n generate_axis(c, \"Z\", \"zz\", \"z\", pad=None, boundary_discontinuity=None)\n\n\ndef test_generate_grid_ds():\n # This needs more cases\n axis_dims = {\"X\": \"lon\", \"Y\": \"lat\", \"Z\": \"z\"}\n axis_coords = {\"X\": \"llon\", \"Y\": \"llat\", \"Z\": \"zz\"}\n position = (\"center\", \"outer\")\n boundary_discontinuity = {\"lon\": 360, \"llon\": 360, \"lat\": 180, \"llat\": 180}\n pad = {\"z\": \"auto\", \"zz\": \"auto\"}\n ds = generate_grid_ds(\n ds_original_left, axis_dims, axis_coords, position, boundary_discontinuity, pad\n )\n assert_equal(ds, ds_out_outer)\n\n\ndef test_parse_boundary_params():\n assert _parse_boundary_params(360, \"anything\") == 360\n assert _parse_boundary_params({\"something\": 360}, \"something\") == 360\n assert _parse_boundary_params({\"something\": 360}, \"something_else\") is None\n\n\[email protected](\n \"p_f, p_t\",\n [(\"left\", \"center\"), (\"center\", \"left\"), (\"center\", \"right\"), (\"right\", \"center\")],\n)\ndef test_parse_position(p_f, p_t):\n default = (\"center\", \"left\")\n assert _parse_position((p_f, p_t), \"anything\") == (p_f, p_t)\n assert _parse_position({\"a\": (p_f, p_t)}, \"a\") == (p_f, p_t)\n assert _parse_position({\"a\": (p_f, p_t)}, \"b\") == default\n assert _parse_position({\"a\": (p_f, p_t), \"b\": (p_t, p_f)}, \"a\") == (p_f, p_t)\n assert _parse_position({\"a\": (p_f, p_t), \"b\": (p_t, p_f)}, \"b\") == (p_t, p_f)\n\n\[email protected](\n \"p, relative\",\n [\n ((\"left\", \"center\"), \"right\"),\n ((\"center\", \"left\"), \"left\"),\n ((\"center\", \"right\"), \"right\"),\n ((\"right\", \"center\"), \"left\"),\n ((\"center\", \"outer\"), \"outer\"),\n ((\"center\", \"inner\"), \"inner\"),\n ],\n)\ndef test_position_to_relative(p, relative):\n assert _position_to_relative(p[0], p[1]) == relative\n error_test = [\n (\"left\", \"right\"),\n (\"inner\", \"outer\"),\n (\"left\", \"outer\"),\n (\"right\", \"outer\"),\n (\"left\", \"inner\"),\n (\"right\", \"inner\"),\n ]\n for et in error_test:\n with pytest.raises(RuntimeError):\n _position_to_relative(et[0], et[1])\n with pytest.raises(RuntimeError):\n _position_to_relative(et[1], et[0])\n\n\ndef test_fill_attrs():\n a = _fill_attrs(ds_out_right[\"lon\"], \"right\", \"X\")\n assert a.attrs[\"axis\"] == \"X\"\n assert a.attrs[\"c_grid_axis_shift\"] == 0.5\n\n a = _fill_attrs(ds_out_right[\"lon\"], \"left\", \"Z\")\n assert a.attrs[\"axis\"] == \"Z\"\n assert a.attrs[\"c_grid_axis_shift\"] == -0.5\n\n a = _fill_attrs(ds_out_right[\"lon\"], \"center\", \"Y\")\n assert a.attrs[\"axis\"] == \"Y\"\n assert \"c_grid_axis_shift\" not in a.attrs.keys()\n" ]
[ [ "numpy.arange", "numpy.array", "numpy.meshgrid", "numpy.hstack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ignacia-fp/bempp-cl
[ "a65232558826e51e624b1a4f649b6a0ed5a7f551" ]
[ "bempp/api/external/fenicsx.py" ]
[ "\"\"\"Interface to DOLFINX for FEM-BEM coupling.\"\"\"\n\n\ndef boundary_grid_from_fenics_mesh(fenics_mesh):\n \"\"\"\n Create a Bempp boundary grid from a FEniCS Mesh.\n\n Return the Bempp grid and a map from the node numberings of the FEniCS\n mesh to the node numbers of the boundary grid.\n \"\"\"\n import bempp.api\n import numpy as np\n from dolfinx.cpp.mesh import entities_to_geometry, exterior_facet_indices\n\n boundary = entities_to_geometry(\n fenics_mesh,\n fenics_mesh.topology.dim - 1,\n exterior_facet_indices(fenics_mesh),\n True,\n )\n\n bm_nodes = set()\n for tri in boundary:\n for node in tri:\n bm_nodes.add(node)\n bm_nodes = list(bm_nodes)\n bm_cells = np.array([[bm_nodes.index(i) for i in tri] for tri in boundary])\n bm_coords = fenics_mesh.geometry.x[bm_nodes]\n\n bempp_boundary_grid = bempp.api.Grid(bm_coords.transpose(), bm_cells.transpose())\n\n return bempp_boundary_grid, bm_nodes\n\n\ndef fenics_to_bempp_trace_data(fenics_space):\n \"\"\"Return tuple (space,trace_matrix).\"\"\"\n family, degree = fenics_space_info(fenics_space)\n\n if family == \"Lagrange\":\n if degree == 1:\n return p1_trace(fenics_space)\n else:\n raise NotImplementedError()\n\n\ndef fenics_space_info(fenics_space):\n \"\"\"Return tuple (family,degree) containing information about a FEniCS space.\"\"\"\n element = fenics_space.ufl_element()\n family = element.family()\n degree = element.degree()\n return (family, degree)\n\n\n# pylint: disable=too-many-locals\ndef p1_trace(fenics_space):\n \"\"\"\n Return the P1 trace operator.\n\n This function returns a pair (space, trace_matrix),\n where space is a Bempp space object and trace_matrix is the corresponding\n matrix that maps the coefficients of a FEniCS function to its boundary\n trace coefficients in the corresponding Bempp space.\n \"\"\"\n import bempp.api\n from scipy.sparse import coo_matrix\n import numpy as np\n\n family, degree = fenics_space_info(fenics_space)\n if not (family == \"Lagrange\" and degree == 1):\n raise ValueError(\"fenics_space must be a p1 Lagrange space\")\n\n fenics_mesh = fenics_space.mesh\n bempp_boundary_grid, bm_nodes = boundary_grid_from_fenics_mesh(fenics_mesh)\n\n # First get trace space\n space = bempp.api.function_space(bempp_boundary_grid, \"P\", 1)\n\n num_fenics_vertices = fenics_mesh.topology.connectivity(0, 0).num_nodes\n\n # FEniCS vertices to bempp dofs\n b_vertices_from_vertices = coo_matrix(\n (np.ones(len(bm_nodes)), (np.arange(len(bm_nodes)), bm_nodes)),\n shape=(len(bm_nodes), num_fenics_vertices),\n dtype=\"float64\",\n ).tocsc()\n\n # Finally FEniCS dofs to vertices.\n dof_to_vertex_map = np.zeros(num_fenics_vertices, dtype=np.int64)\n tets = fenics_mesh.geometry.dofmap\n for tet in range(tets.num_nodes):\n cell_dofs = fenics_space.dofmap.cell_dofs(tet)\n cell_verts = tets.links(tet)\n for v in range(4):\n vertex_n = cell_verts[v]\n dof = cell_dofs[fenics_space.dofmap.dof_layout.entity_dofs(0, v)[0]]\n dof_to_vertex_map[dof] = vertex_n\n\n vertices_from_fenics_dofs = coo_matrix(\n (\n np.ones(num_fenics_vertices),\n (dof_to_vertex_map, np.arange(num_fenics_vertices)),\n ),\n shape=(num_fenics_vertices, num_fenics_vertices),\n dtype=\"float64\",\n ).tocsc()\n\n # Get trace matrix by multiplication\n trace_matrix = b_vertices_from_vertices @ vertices_from_fenics_dofs\n\n # Now return everything\n return space, trace_matrix\n\n\nclass FenicsOperator(object):\n \"\"\"Wrap a FEniCS-X Operator into a Bempp operator.\"\"\"\n\n def __init__(self, fenics_weak_form):\n \"\"\"Construct an operator from a weak form in FEniCS.\"\"\"\n self._fenics_weak_form = fenics_weak_form\n self._sparse_mat = None\n\n def weak_form(self):\n \"\"\"Return the weak form.\"\"\"\n from bempp.api.assembly.discrete_boundary_operator import (\n SparseDiscreteBoundaryOperator,\n )\n from dolfinx.fem import assemble_matrix, form\n from scipy.sparse import csr_matrix\n\n if self._sparse_mat is None:\n mat = assemble_matrix(form(self._fenics_weak_form))\n mat.finalize()\n shape = tuple(\n i._ufl_function_space.dofmap.index_map.size_global\n for i in self._fenics_weak_form.arguments()\n )\n self._sparse_mat = csr_matrix(\n (mat.data, mat.indices, mat.indptr), shape=shape\n )\n\n return SparseDiscreteBoundaryOperator(self._sparse_mat)\n" ]
[ [ "numpy.arange", "numpy.zeros", "scipy.sparse.csr_matrix", "numpy.ones" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] } ]
jostl/masters-thesis
[ "211e1f12a07428d37507e2bddc808f6da1149efb", "211e1f12a07428d37507e2bddc808f6da1149efb" ]
[ "bird_view/models/birdview.py", "perception/training/train_depth.py" ]
[ "import cv2\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\n\nfrom . import common\nfrom .agent import Agent\nfrom .controller import PIDController, CustomController\nfrom .controller import ls_circle\n\n\nSTEPS = 5\nSPEED_STEPS = 3\nCOMMANDS = 4\nDT = 0.1\nCROP_SIZE = 192\nPIXELS_PER_METER = 5\n\n\ndef regression_base():\n return nn.Sequential(\n nn.ConvTranspose2d(640,256,4,2,1,0),\n nn.BatchNorm2d(256),\n nn.ReLU(True),\n nn.ConvTranspose2d(256,128,4,2,1,0),\n nn.BatchNorm2d(128),\n nn.ReLU(True),\n nn.ConvTranspose2d(128,64,4,2,1,0),\n nn.BatchNorm2d(64),\n nn.ReLU(True))\n\n\ndef spatial_softmax_base():\n return nn.Sequential(\n nn.BatchNorm2d(640),\n nn.ConvTranspose2d(640,256,3,2,1,1),\n nn.ReLU(True),\n nn.BatchNorm2d(256),\n nn.ConvTranspose2d(256,128,3,2,1,1),\n nn.ReLU(True),\n nn.BatchNorm2d(128),\n nn.ConvTranspose2d(128,64,3,2,1,1),\n nn.ReLU(True))\n\n\nclass BirdViewPolicyModelSS(common.ResnetBase):\n def __init__(self, backbone='resnet18', input_channel=7, n_step=5, all_branch=False, **kwargs):\n super().__init__(backbone=backbone, input_channel=input_channel, bias_first=False)\n\n self.deconv = spatial_softmax_base()\n self.location_pred = nn.ModuleList([\n nn.Sequential(\n nn.BatchNorm2d(64),\n nn.Conv2d(64,STEPS,1,1,0),\n common.SpatialSoftmax(48,48,STEPS)) for i in range(COMMANDS)\n ])\n \n self.all_branch = all_branch\n\n def forward(self, bird_view, velocity, command):\n h = self.conv(bird_view)\n b, c, kh, kw = h.size()\n\n # Late fusion for velocity\n velocity = velocity[...,None,None,None].repeat((1,128,kh,kw))\n\n h = torch.cat((h, velocity), dim=1)\n h = self.deconv(h)\n\n location_preds = [location_pred(h) for location_pred in self.location_pred]\n location_preds = torch.stack(location_preds, dim=1)\n \n location_pred = common.select_branch(location_preds, command)\n \n if self.all_branch:\n return location_pred, location_preds\n \n return location_pred\n\n\nclass BirdViewAgent(Agent):\n def __init__(self, steer_points=None, pid=None, gap=5, **kwargs):\n super().__init__(**kwargs)\n\n self.speed_control = PIDController(K_P=1.0, K_I=0.1, K_D=2.5)\n\n if steer_points is None:\n steer_points = {\"1\": 3, \"2\": 2, \"3\": 2, \"4\": 2}\n \n if pid is None:\n pid = {\n \"1\": {\"Kp\": 1.0, \"Ki\": 0.1, \"Kd\": 0}, # Left\n \"2\": {\"Kp\": 1.0, \"Ki\": 0.1, \"Kd\": 0}, # Right\n \"3\": {\"Kp\": 0.8, \"Ki\": 0.1, \"Kd\": 0}, # Straight\n \"4\": {\"Kp\": 0.8, \"Ki\": 0.1, \"Kd\": 0}, # Follow\n }\n \n self.turn_control = CustomController(pid)\n self.steer_points = steer_points\n\n self.gap = gap\n\n def run_step(self, observations, teaching=False):\n birdview = common.crop_birdview(observations['birdview'], dx=-10)\n speed = np.linalg.norm(observations['velocity'])\n command = self.one_hot[int(observations['command']) - 1]\n\n with torch.no_grad():\n _birdview = self.transform(birdview).to(self.device).unsqueeze(0)\n _speed = torch.FloatTensor([speed]).to(self.device)\n _command = command.to(self.device).unsqueeze(0)\n \n if self.model.all_branch:\n _locations, _ = self.model(_birdview, _speed, _command)\n else:\n _locations = self.model(_birdview, _speed, _command)\n _locations = _locations.squeeze().detach().cpu().numpy()\n \n _map_locations = _locations\n # Pixel coordinates.\n _locations = (_locations + 1) / 2 * CROP_SIZE\n\n targets = list()\n\n for i in range(STEPS):\n pixel_dx, pixel_dy = _locations[i]\n pixel_dx = pixel_dx - CROP_SIZE / 2\n pixel_dy = CROP_SIZE - pixel_dy\n\n angle = np.arctan2(pixel_dx, pixel_dy)\n dist = np.linalg.norm([pixel_dx, pixel_dy]) / PIXELS_PER_METER\n\n targets.append([dist * np.cos(angle), dist * np.sin(angle)])\n\n target_speed = 0.0\n\n for i in range(1, SPEED_STEPS):\n pixel_dx, pixel_dy = _locations[i]\n prev_dx, prev_dy = _locations[i-1]\n\n dx = pixel_dx - prev_dx\n dy = pixel_dy - prev_dy\n delta = np.linalg.norm([dx, dy])\n\n target_speed += delta / (PIXELS_PER_METER * self.gap * DT) / (SPEED_STEPS-1)\n\n _cmd = int(observations['command'])\n n = self.steer_points.get(str(_cmd), 1)\n targets = np.concatenate([[[0, 0]], targets], 0)\n c, r = ls_circle(targets)\n closest = common.project_point_to_circle(targets[n], c, r)\n\n v = [1.0, 0.0, 0.0]\n w = [closest[0], closest[1], 0.0]\n alpha = common.signed_angle(v, w)\n steer = self.turn_control.run_step(alpha, _cmd)\n throttle = self.speed_control.step(target_speed - speed)\n brake = 0.0\n\n if target_speed < 1.0:\n steer = 0.0\n throttle = 0.0\n brake = 1.0\n\n self.debug['locations_birdview'] = _locations[:,::-1].astype(int)\n self.debug['target'] = closest\n self.debug['target_speed'] = target_speed\n\n control = self.postprocess(steer, throttle, brake)\n if teaching:\n return control, _map_locations\n else:\n return control\n", "import copy\nimport math\nimport time\nfrom pathlib import Path\nimport random\n\nfrom kornia.filters import spatial_gradient\nfrom pytorch_msssim import ssim, SSIM\nimport torch\nimport torch.optim as optim\nfrom tensorboardX import SummaryWriter\nfrom torch.utils.data import DataLoader, random_split\nfrom tqdm import tqdm\n\nfrom perception.custom_datasets import DepthDataset\n\nfrom perception.training.models import createUNet, createUNetResNet\nfrom perception.utils.visualization import display_images_horizontally\n\n\ndef depth_loss_function(y_pred, y_true, theta=0.1, maxDepthVal=1):\n # from https://github.com/ialhashim/DenseDepth/blob/ed044069eb99fa06dd4af415d862b3b5cbfab283/loss.py\n # and their paper https://arxiv.org/pdf/1812.11941.pdf\n # with modifications for pytorch - using libraries pytorch_msssim and kornia\n\n # Point-wise depth\n l_depth = torch.mean(torch.abs(y_pred - y_true), dim=-1)\n\n # Edges\n d_true = spatial_gradient(y_true)\n dx_true = d_true[:, :, 0, :, :]\n dy_true = d_true[:, :, 1, :, :]\n\n d_pred = spatial_gradient(y_pred)\n dx_pred = d_pred[:, :, 0, :, :]\n dy_pred = d_pred[:, :, 1, :, :]\n\n l_edges = torch.mean(torch.abs(dy_pred - dy_true) + torch.abs(dx_pred - dx_true), dim=-1)\n\n # Structural similarity (SSIM) index\n l_ssim = torch.clip((1 - ssim(y_true, y_pred, maxDepthVal, nonnegative_ssim=True)) * 0.5, 0, 1)\n\n # Weights\n w1 = 1.0\n w2 = 1.0\n w3 = theta\n\n return (w1 * l_ssim) + (w2 * torch.mean(l_edges)) + (w3 * torch.mean(l_depth))\n\n\ndef create_dataloaders(path, validation_set_size, batch_size=32, max_n_instances=None, augment_strategy=None,\n num_workers=0, use_transform=None):\n\n dataset = DepthDataset(root_folder=path, max_n_instances=max_n_instances,\n augment_strategy=augment_strategy, use_transform=use_transform)\n train_size = int((1 - validation_set_size) * len(dataset))\n validation_size = len(dataset) - train_size\n train_dataset, validation_dataset = random_split(dataset, [train_size, validation_size])\n\n dataloaders = {\"train\": DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers),\n \"val\": DataLoader(validation_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)}\n return dataloaders\n\n\ndef train_model(model, dataloaders, criterion, optimizer, n_epochs, model_save_path, scheduler=None,\n save_model_weights=True, display_img_after_epoch=0):\n # determine the computational device\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n # device = torch.device(\"cpu\")\n model.to(device)\n\n best_val_loss = math.inf\n best_model_weights = copy.deepcopy(model.state_dict())\n\n start = time.time()\n last_time = start\n\n # Tensorboard logging\n train_log_path = model_save_path / \"logs/train\"\n val_log_path = model_save_path / \"logs/val\"\n train_log_path.mkdir(parents=True)\n val_log_path.mkdir(parents=True)\n writer_train = SummaryWriter(model_save_path / \"logs/train\")\n writer_val = SummaryWriter(model_save_path / \"logs/val\")\n\n for epoch in range(n_epochs):\n print('-' * 10)\n print('Epoch {}/{}'.format(epoch + 1, n_epochs))\n\n for phase in [\"train\", \"val\"]:\n if phase == \"train\":\n model.train()\n else:\n model.eval()\n\n running_loss = 0.0\n display_images = None\n\n for i, data in tqdm(enumerate(dataloaders[phase])):\n # Get the inputs; data is a list of (RGB, semantic segmentation, depth maps).\n rgb_input = data[0].to(device, dtype=torch.float32) # TODO så stor datatype?\n #rgb_target = data[1].to(device, dtype=torch.float32)\n depth_image = data[2].to(device, dtype=torch.float32)\n rgb_raw = data[3]\n\n # Find the size of rgb_image\n input_size = rgb_input.size(0)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n with torch.set_grad_enabled(phase == \"train\"):\n outputs = model(rgb_input)\n #depth_image = torch.flatten(depth_image, start_dim=1)\n # TODO loss\n loss = criterion(outputs, depth_image)\n\n # backward + optimize only if in training phase\n if phase == \"train\":\n loss.backward()\n optimizer.step()\n\n if display_images is None:\n # Save image as a numpy array. Used later for displaying predictions.\n idx = random.randint(0, input_size-1)\n display_images = [rgb_raw.cpu().numpy()[idx].transpose(1, 2, 0),\n depth_image.cpu().numpy()[idx].reshape(160, 384),\n outputs.detach().cpu().numpy()[idx].reshape(160, 384)]\n\n # statistics\n running_loss += loss.item() * input_size\n\n if phase == 'train' and scheduler is not None:\n scheduler.step()\n\n dataset_size = len(dataloaders[\"train\"].dataset) if phase == \"train\" else len(dataloaders[\"val\"].dataset)\n epoch_loss = running_loss / dataset_size\n\n print('{} Loss: {:.6f}'.format(phase, epoch_loss))\n\n # deep copy the model\n if phase == \"val\" and epoch_loss < best_val_loss:\n print(\"val loss record low, saving these weights...\")\n best_val_loss = epoch_loss\n best_model_weights = copy.deepcopy(model.state_dict())\n\n writer = writer_train if phase == \"train\" else writer_val\n writer.add_scalar(\"epoch_loss\", epoch_loss, epoch)\n\n display = True if phase == \"val\" and display_img_after_epoch else False\n\n figtitle = \"{} visualization after epoch {}\".format(phase, epoch)\n subtitles = [\"augmented input\", \"ground truth\", \"prediction\"]\n\n img = display_images_horizontally(display_images, fig_width=10, fig_height=2, display=display,\n title=figtitle, subplot_titles=subtitles)\n\n writer.add_image(\"{} comparison\".format(phase), img.transpose(2, 0, 1), epoch)\n\n now = time.time()\n time_elapsed = now - last_time\n print(\"Epoch completed in {:.0f}m {:.0f}s\".format(\n time_elapsed // 60, time_elapsed % 60))\n last_time = now\n\n # Save the model\n if save_model_weights:\n path = model_save_path / \"epoch-{}.pt\".format(epoch+1)\n print(\"Saving weights to:\", path)\n torch.save(model.state_dict(), path)\n\n time_elapsed = time.time() - start\n print(\"Training complete in {:.0f}m {:.0f}s\".format(\n time_elapsed // 60, time_elapsed % 60))\n\n # load the best weights\n model.load_state_dict(best_model_weights)\n\n # Save the model\n if save_model_weights:\n path = model_save_path / \"best_weights.pt\"\n print(\"Saving best weights to:\", path)\n torch.save(model.state_dict(), path)\n return model\n\n\ndef main():\n\n model_name = \"depth_unet_resnet_ssim6\"\n model_save_path = Path(\"training_logs/perception\") / model_name\n\n validation_set_size = 0.2\n max_n_instances = None\n batch_size = 42\n augment_strategy = \"medium\"\n path = \"data/perception/test1\"\n\n model_save_path.mkdir(parents=True)\n dataloaders = create_dataloaders(path=path, validation_set_size=validation_set_size,\n batch_size=batch_size, max_n_instances=max_n_instances,\n augment_strategy=augment_strategy, num_workers=0,\n use_transform=None) # \"midas_large\"\n\n save_model_weights = True\n display_img_after_epoch = True\n n_epochs = 20\n\n #model = createDeepLabv3(outputchannels=len(DEFAULT_CLASSES) + 1, backbone=backbone, pretrained=True)\n #model = createUNet()\n model = createUNetResNet()\n #criterion = torch.nn.MSELoss()\n criterion = depth_loss_function\n optimizer = optim.Adam(model.parameters(), lr=0.0001)\n\n train_model(model=model, dataloaders=dataloaders, criterion=criterion, optimizer=optimizer,\n n_epochs=n_epochs, model_save_path=model_save_path, save_model_weights=save_model_weights,\n display_img_after_epoch=display_img_after_epoch)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.nn.ConvTranspose2d", "torch.cat", "torch.nn.Conv2d", "numpy.linalg.norm", "numpy.cos", "numpy.sin", "numpy.concatenate", "numpy.arctan2", "torch.no_grad", "torch.FloatTensor", "torch.nn.BatchNorm2d", "torch.stack", "torch.nn.ReLU" ], [ "torch.abs", "torch.mean", "torch.utils.data.DataLoader", "torch.utils.data.random_split", "torch.set_grad_enabled", "torch.cuda.is_available" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
abramhindle/mne-python
[ "989390a484cba219aae74c778b71568586f9edb2", "989390a484cba219aae74c778b71568586f9edb2", "989390a484cba219aae74c778b71568586f9edb2" ]
[ "tutorials/stats-sensor-space/plot_stats_cluster_1samp_test_time_frequency.py", "mne/parallel.py", "tutorials/source-modeling/plot_forward.py" ]
[ "\"\"\"\n===============================================================\nNon-parametric 1 sample cluster statistic on single trial power\n===============================================================\n\nThis script shows how to estimate significant clusters\nin time-frequency power estimates. It uses a non-parametric\nstatistical procedure based on permutations and cluster\nlevel statistics.\n\nThe procedure consists of:\n\n - extracting epochs\n - compute single trial power estimates\n - baseline line correct the power estimates (power ratios)\n - compute stats to see if ratio deviates from 1.\n\n\"\"\"\n# Authors: Alexandre Gramfort <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport mne\nfrom mne.time_frequency import tfr_morlet\nfrom mne.stats import permutation_cluster_1samp_test\nfrom mne.datasets import sample\n\nprint(__doc__)\n\n###############################################################################\n# Set parameters\n# --------------\ndata_path = sample.data_path()\nraw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'\ntmin, tmax, event_id = -0.3, 0.6, 1\n\n# Setup for reading the raw data\nraw = mne.io.read_raw_fif(raw_fname)\nevents = mne.find_events(raw, stim_channel='STI 014')\n\ninclude = []\nraw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more\n\n# picks MEG gradiometers\npicks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,\n stim=False, include=include, exclude='bads')\n\n# Load condition 1\nevent_id = 1\nepochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,\n baseline=(None, 0), preload=True,\n reject=dict(grad=4000e-13, eog=150e-6))\n\n# Take only one channel\nch_name = 'MEG 1332'\nepochs.pick_channels([ch_name])\n\nevoked = epochs.average()\n\n# Factor to down-sample the temporal dimension of the TFR computed by\n# tfr_morlet. Decimation occurs after frequency decomposition and can\n# be used to reduce memory usage (and possibly computational time of downstream\n# operations such as nonparametric statistics) if you don't need high\n# spectrotemporal resolution.\ndecim = 5\nfreqs = np.arange(8, 40, 2) # define frequencies of interest\nsfreq = raw.info['sfreq'] # sampling in Hz\ntfr_epochs = tfr_morlet(epochs, freqs, n_cycles=4., decim=decim,\n average=False, return_itc=False, n_jobs=1)\n\n# Baseline power\ntfr_epochs.apply_baseline(mode='logratio', baseline=(-.100, 0))\n\n# Crop in time to keep only what is between 0 and 400 ms\nevoked.crop(0., 0.4)\ntfr_epochs.crop(0., 0.4)\n\nepochs_power = tfr_epochs.data[:, 0, :, :] # take the 1 channel\n\n###############################################################################\n# Compute statistic\n# -----------------\nthreshold = 2.5\nn_permutations = 100 # Warning: 100 is too small for real-world analysis.\nT_obs, clusters, cluster_p_values, H0 = \\\n permutation_cluster_1samp_test(epochs_power, n_permutations=n_permutations,\n threshold=threshold, tail=0)\n\n###############################################################################\n# View time-frequency plots\n# -------------------------\n\nevoked_data = evoked.data\ntimes = 1e3 * evoked.times\n\nplt.figure()\nplt.subplots_adjust(0.12, 0.08, 0.96, 0.94, 0.2, 0.43)\n\n# Create new stats image with only significant clusters\nT_obs_plot = np.nan * np.ones_like(T_obs)\nfor c, p_val in zip(clusters, cluster_p_values):\n if p_val <= 0.05:\n T_obs_plot[c] = T_obs[c]\n\nvmax = np.max(np.abs(T_obs))\nvmin = -vmax\nplt.subplot(2, 1, 1)\nplt.imshow(T_obs, cmap=plt.cm.gray,\n extent=[times[0], times[-1], freqs[0], freqs[-1]],\n aspect='auto', origin='lower', vmin=vmin, vmax=vmax)\nplt.imshow(T_obs_plot, cmap=plt.cm.RdBu_r,\n extent=[times[0], times[-1], freqs[0], freqs[-1]],\n aspect='auto', origin='lower', vmin=vmin, vmax=vmax)\nplt.colorbar()\nplt.xlabel('Time (ms)')\nplt.ylabel('Frequency (Hz)')\nplt.title('Induced power (%s)' % ch_name)\n\nax2 = plt.subplot(2, 1, 2)\nevoked.plot(axes=[ax2], time_unit='s')\nplt.show()\n", "\"\"\"Parallel util function.\"\"\"\n\n# Author: Alexandre Gramfort <[email protected]>\n#\n# License: Simplified BSD\n\nimport logging\nimport os\n\nfrom . import get_config\nfrom .utils import logger, verbose, warn, ProgressBar\nfrom .fixes import _get_args\n\nif 'MNE_FORCE_SERIAL' in os.environ:\n _force_serial = True\nelse:\n _force_serial = None\n\n\n@verbose\ndef parallel_func(func, n_jobs, max_nbytes='auto', pre_dispatch='n_jobs',\n total=None, prefer=None, verbose=None):\n \"\"\"Return parallel instance with delayed function.\n\n Util function to use joblib only if available\n\n Parameters\n ----------\n func: callable\n A function\n n_jobs: int\n Number of jobs to run in parallel\n max_nbytes : int, str, or None\n Threshold on the minimum size of arrays passed to the workers that\n triggers automated memory mapping. Can be an int in Bytes,\n or a human-readable string, e.g., '1M' for 1 megabyte.\n Use None to disable memmaping of large arrays. Use 'auto' to\n use the value set using mne.set_memmap_min_size.\n pre_dispatch : int, or string, optional\n See :class:`joblib.Parallel`.\n total : int | None\n If int, use a progress bar to display the progress of dispatched\n jobs. This should only be used when directly iterating, not when\n using ``split_list`` or :func:`np.array_split`.\n If None (default), do not add a progress bar.\n prefer : str | None\n If str, can be \"processes\" or \"threads\". See :class:`joblib.Parallel`.\n Ignored if the joblib version is too old to support this.\n\n .. versionadded:: 0.18\n %(verbose)s INFO or DEBUG\n will print parallel status, others will not.\n\n Returns\n -------\n parallel: instance of joblib.Parallel or list\n The parallel object\n my_func: callable\n func if not parallel or delayed(func)\n n_jobs: int\n Number of jobs >= 0\n \"\"\"\n should_print = (logger.level <= logging.INFO)\n # for a single job, we don't need joblib\n if n_jobs != 1:\n try:\n from joblib import Parallel, delayed\n except ImportError:\n try:\n from sklearn.externals.joblib import Parallel, delayed\n except ImportError:\n warn('joblib not installed. Cannot run in parallel.')\n n_jobs = 1\n if n_jobs == 1:\n n_jobs = 1\n my_func = func\n parallel = list\n else:\n # check if joblib is recent enough to support memmaping\n p_args = _get_args(Parallel.__init__)\n joblib_mmap = ('temp_folder' in p_args and 'max_nbytes' in p_args)\n\n cache_dir = get_config('MNE_CACHE_DIR', None)\n if isinstance(max_nbytes, str) and max_nbytes == 'auto':\n max_nbytes = get_config('MNE_MEMMAP_MIN_SIZE', None)\n\n if max_nbytes is not None:\n if not joblib_mmap and cache_dir is not None:\n warn('\"MNE_CACHE_DIR\" is set but a newer version of joblib is '\n 'needed to use the memmapping pool.')\n if joblib_mmap and cache_dir is None:\n logger.info(\n 'joblib supports memapping pool but \"MNE_CACHE_DIR\" '\n 'is not set in MNE-Python config. To enable it, use, '\n 'e.g., mne.set_cache_dir(\\'/tmp/shm\\'). This will '\n 'store temporary files under /dev/shm and can result '\n 'in large memory savings.')\n\n # create keyword arguments for Parallel\n kwargs = {'verbose': 5 if should_print and total is None else 0}\n kwargs['pre_dispatch'] = pre_dispatch\n if 'prefer' in p_args:\n kwargs['prefer'] = prefer\n\n if joblib_mmap:\n if cache_dir is None:\n max_nbytes = None # disable memmaping\n kwargs['temp_folder'] = cache_dir\n kwargs['max_nbytes'] = max_nbytes\n\n n_jobs = check_n_jobs(n_jobs)\n parallel = _check_wrapper(Parallel(n_jobs, **kwargs))\n my_func = delayed(func)\n\n if total is not None:\n def parallel_progress(op_iter):\n pb = ProgressBar(total, verbose_bool=should_print)\n return parallel(pb(op_iter))\n parallel_out = parallel_progress\n else:\n parallel_out = parallel\n return parallel_out, my_func, n_jobs\n\n\ndef _check_wrapper(fun):\n def run(*args, **kwargs):\n try:\n return fun(*args, **kwargs)\n except RuntimeError as err:\n msg = str(err.args[0]) if err.args else ''\n if msg.startswith('The task could not be sent to the workers'):\n raise RuntimeError(\n msg + ' Consider using joblib memmap caching to get '\n 'around this problem. See mne.set_mmap_min_size, '\n 'mne.set_cache_dir, and buffer_size parallel function '\n 'arguments (if applicable).')\n raise\n return run\n\n\ndef check_n_jobs(n_jobs, allow_cuda=False):\n \"\"\"Check n_jobs in particular for negative values.\n\n Parameters\n ----------\n n_jobs : int\n The number of jobs.\n allow_cuda : bool\n Allow n_jobs to be 'cuda'. Default: False.\n\n Returns\n -------\n n_jobs : int\n The checked number of jobs. Always positive (or 'cuda' if\n applicable.)\n \"\"\"\n if not isinstance(n_jobs, int):\n if not allow_cuda:\n raise ValueError('n_jobs must be an integer')\n elif not isinstance(n_jobs, str) or n_jobs != 'cuda':\n raise ValueError('n_jobs must be an integer, or \"cuda\"')\n # else, we have n_jobs='cuda' and this is okay, so do nothing\n elif _force_serial:\n n_jobs = 1\n logger.info('... MNE_FORCE_SERIAL set. Processing in forced '\n 'serial mode.')\n elif n_jobs <= 0:\n try:\n import multiprocessing\n n_cores = multiprocessing.cpu_count()\n n_jobs = min(n_cores + n_jobs + 1, n_cores)\n if n_jobs <= 0:\n raise ValueError('If n_jobs has a negative value it must not '\n 'be less than the number of CPUs present. '\n 'You\\'ve got %s CPUs' % n_cores)\n except ImportError:\n # only warn if they tried to use something other than 1 job\n if n_jobs != 1:\n warn('multiprocessing not installed. Cannot run in parallel.')\n n_jobs = 1\n\n return n_jobs\n", "\"\"\"\n.. _tut-forward:\n\nHead model and forward computation\n==================================\n\nThe aim of this tutorial is to be a getting started for forward\ncomputation.\n\nFor more extensive details and presentation of the general\nconcepts for forward modeling. See :ref:`c_legacy_ch_forward`.\n\n\"\"\"\n\nimport os.path as op\nimport mne\nfrom mne.datasets import sample\ndata_path = sample.data_path()\n\n# the raw file containing the channel location + types\nraw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'\n# The paths to Freesurfer reconstructions\nsubjects_dir = data_path + '/subjects'\nsubject = 'sample'\n\n###############################################################################\n# Computing the forward operator\n# ------------------------------\n#\n# To compute a forward operator we need:\n#\n# - a ``-trans.fif`` file that contains the coregistration info.\n# - a source space\n# - the :term:`BEM` surfaces\n\n###############################################################################\n# Compute and visualize BEM surfaces\n# ----------------------------------\n#\n# The :term:`BEM` surfaces are the triangulations of the interfaces between\n# different tissues needed for forward computation. These surfaces are for\n# example the inner skull surface, the outer skull surface and the outer skin\n# surface, a.k.a. scalp surface.\n#\n# Computing the BEM surfaces requires FreeSurfer and makes use of either of\n# the two following command line tools:\n#\n# - :ref:`gen_mne_watershed_bem`\n# - :ref:`gen_mne_flash_bem`\n#\n# Or by calling in a Python script one of the functions\n# :func:`mne.bem.make_watershed_bem` or :func:`mne.bem.make_flash_bem`.\n#\n# Here we'll assume it's already computed. It takes a few minutes per subject.\n#\n# For EEG we use 3 layers (inner skull, outer skull, and skin) while for\n# MEG 1 layer (inner skull) is enough.\n#\n# Let's look at these surfaces. The function :func:`mne.viz.plot_bem`\n# assumes that you have the the *bem* folder of your subject FreeSurfer\n# reconstruction the necessary files.\n\nmne.viz.plot_bem(subject=subject, subjects_dir=subjects_dir,\n brain_surfaces='white', orientation='coronal')\n\n###############################################################################\n# Visualization the coregistration\n# --------------------------------\n#\n# The coregistration is operation that allows to position the head and the\n# sensors in a common coordinate system. In the MNE software the transformation\n# to align the head and the sensors in stored in a so-called **trans file**.\n# It is a FIF file that ends with ``-trans.fif``. It can be obtained with\n# :func:`mne.gui.coregistration` (or its convenient command line\n# equivalent :ref:`gen_mne_coreg`), or mrilab if you're using a Neuromag\n# system.\n#\n# For the Python version see :func:`mne.gui.coregistration`\n#\n# Here we assume the coregistration is done, so we just visually check the\n# alignment with the following code.\n\n# The transformation file obtained by coregistration\ntrans = data_path + '/MEG/sample/sample_audvis_raw-trans.fif'\n\ninfo = mne.io.read_info(raw_fname)\n# Here we look at the dense head, which isn't used for BEM computations but\n# is useful for coregistration.\nmne.viz.plot_alignment(info, trans, subject=subject, dig=True,\n meg=['helmet', 'sensors'], subjects_dir=subjects_dir,\n surfaces='head-dense')\n\n###############################################################################\n# .. _plot_forward_source_space:\n#\n# Compute Source Space\n# --------------------\n#\n# The source space defines the position and orientation of the candidate source\n# locations. There are two types of source spaces:\n#\n# - **source-based** source space when the candidates are confined to a\n# surface.\n#\n# - **volumetric or discrete** source space when the candidates are discrete,\n# arbitrarily located source points bounded by the surface.\n#\n# **Source-based** source space is computed using\n# :func:`mne.setup_source_space`, while **volumetric** source space is computed\n# using :func:`mne.setup_volume_source_space`.\n#\n# We will now compute a source-based source space with an OCT-6 resolution.\n# See :ref:`setting_up_source_space` for details on source space definition\n# and spacing parameter.\n\nsrc = mne.setup_source_space(subject, spacing='oct6',\n subjects_dir=subjects_dir, add_dist=False)\nprint(src)\n\n###############################################################################\n# The surface based source space ``src`` contains two parts, one for the left\n# hemisphere (4098 locations) and one for the right hemisphere\n# (4098 locations). Sources can be visualized on top of the BEM surfaces\n# in purple.\n\nmne.viz.plot_bem(subject=subject, subjects_dir=subjects_dir,\n brain_surfaces='white', src=src, orientation='coronal')\n\n###############################################################################\n# To compute a volume based source space defined with a grid of candidate\n# dipoles inside a sphere of radius 90mm centered at (0.0, 0.0, 40.0)\n# you can use the following code.\n# Obviously here, the sphere is not perfect. It is not restricted to the\n# brain and it can miss some parts of the cortex.\n\nsphere = (0.0, 0.0, 40.0, 90.0)\nvol_src = mne.setup_volume_source_space(subject, subjects_dir=subjects_dir,\n sphere=sphere)\nprint(vol_src)\n\nmne.viz.plot_bem(subject=subject, subjects_dir=subjects_dir,\n brain_surfaces='white', src=vol_src, orientation='coronal')\n\n###############################################################################\n# To compute a volume based source space defined with a grid of candidate\n# dipoles inside the brain (requires the :term:`BEM` surfaces) you can use the\n# following.\n\nsurface = op.join(subjects_dir, subject, 'bem', 'inner_skull.surf')\nvol_src = mne.setup_volume_source_space(subject, subjects_dir=subjects_dir,\n surface=surface)\nprint(vol_src)\n\nmne.viz.plot_bem(subject=subject, subjects_dir=subjects_dir,\n brain_surfaces='white', src=vol_src, orientation='coronal')\n\n###############################################################################\n# With the surface-based source space only sources that lie in the plotted MRI\n# slices are shown. Let's see how to view all sources in 3D.\n\nfig = mne.viz.plot_alignment(subject=subject, subjects_dir=subjects_dir,\n surfaces='white', coord_frame='head',\n src=src)\nmne.viz.set_3d_view(fig, azimuth=173.78, elevation=101.75,\n distance=0.30, focalpoint=(-0.03, -0.01, 0.03))\n\n###############################################################################\n# .. _plot_forward_compute_forward_solution:\n#\n# Compute forward solution\n# ------------------------\n#\n# We can now compute the forward solution.\n# To reduce computation we'll just compute a single layer BEM (just inner\n# skull) that can then be used for MEG (not EEG).\n#\n# We specify if we want a one-layer or a three-layer BEM using the\n# conductivity parameter.\n#\n# The BEM solution requires a BEM model which describes the geometry\n# of the head the conductivities of the different tissues.\n\nconductivity = (0.3,) # for single layer\n# conductivity = (0.3, 0.006, 0.3) # for three layers\nmodel = mne.make_bem_model(subject='sample', ico=4,\n conductivity=conductivity,\n subjects_dir=subjects_dir)\nbem = mne.make_bem_solution(model)\n\n###############################################################################\n# Note that the :term:`BEM` does not involve any use of the trans file. The BEM\n# only depends on the head geometry and conductivities.\n# It is therefore independent from the MEG data and the head position.\n#\n# Let's now compute the forward operator, commonly referred to as the\n# gain or leadfield matrix.\n#\n# See :func:`mne.make_forward_solution` for details on parameters meaning.\n\nfwd = mne.make_forward_solution(raw_fname, trans=trans, src=src, bem=bem,\n meg=True, eeg=False, mindist=5.0, n_jobs=2)\nprint(fwd)\n\n###############################################################################\n# We can explore the content of fwd to access the numpy array that contains\n# the gain matrix.\n\nleadfield = fwd['sol']['data']\nprint(\"Leadfield size : %d sensors x %d dipoles\" % leadfield.shape)\n\n###############################################################################\n# To extract the numpy array containing the forward operator corresponding to\n# the source space `fwd['src']` with cortical orientation constraint\n# we can use the following:\n\nfwd_fixed = mne.convert_forward_solution(fwd, surf_ori=True, force_fixed=True,\n use_cps=True)\nleadfield = fwd_fixed['sol']['data']\nprint(\"Leadfield size : %d sensors x %d dipoles\" % leadfield.shape)\n\n###############################################################################\n# This is equivalent to the following code that explicitly applies the\n# forward operator to a source estimate composed of the identity operator:\n\nimport numpy as np # noqa\n\nn_dipoles = leadfield.shape[1]\nvertices = [src_hemi['vertno'] for src_hemi in fwd_fixed['src']]\nstc = mne.SourceEstimate(1e-9 * np.eye(n_dipoles), vertices, tmin=0., tstep=1)\nleadfield = mne.apply_forward(fwd_fixed, stc, info).data / 1e-9\n\n###############################################################################\n# To save to disk a forward solution you can use\n# :func:`mne.write_forward_solution` and to read it back from disk\n# :func:`mne.read_forward_solution`. Don't forget that FIF files containing\n# forward solution should end with *-fwd.fif*.\n#\n# To get a fixed-orientation forward solution, use\n# :func:`mne.convert_forward_solution` to convert the free-orientation\n# solution to (surface-oriented) fixed orientation.\n\n###############################################################################\n# Exercise\n# --------\n#\n# By looking at\n# :ref:`sphx_glr_auto_examples_forward_plot_forward_sensitivity_maps.py`\n# plot the sensitivity maps for EEG and compare it with the MEG, can you\n# justify the claims that:\n#\n# - MEG is not sensitive to radial sources\n# - EEG is more sensitive to deep sources\n#\n# How will the MEG sensitivity maps and histograms change if you use a free\n# instead if a fixed/surface oriented orientation?\n#\n# Try this changing the mode parameter in :func:`mne.sensitivity_map`\n# accordingly. Why don't we see any dipoles on the gyri?\n" ]
[ [ "matplotlib.pyplot.imshow", "numpy.ones_like", "numpy.abs", "matplotlib.pyplot.title", "numpy.arange", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.subplot", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ], [ "sklearn.externals.joblib.delayed", "sklearn.externals.joblib.Parallel" ], [ "numpy.eye" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
LinghengMeng/openai_baselines_extension
[ "65ec57a71be77b6cfd92defd070d76ae225a92e7" ]
[ "baselines/deepq_n_step/build_graph.py" ]
[ "\"\"\"Deep Q learning graph\n\nThe functions in this file can are used to create the following functions:\n\n======= act ========\n\n Function to chose an action given an observation\n\n Parameters\n ----------\n observation: object\n Observation that can be feed into the output of make_obs_ph\n stochastic: bool\n if set to False all the actions are always deterministic (default False)\n update_eps_ph: float\n update epsilon a new value, if negative not update happens\n (default: no update)\n\n Returns\n -------\n Tensor of dtype tf.int64 and shape (BATCH_SIZE,) with an action to be performed for\n every element of the batch.\n\n\n======= act (in case of parameter noise) ========\n\n Function to chose an action given an observation\n\n Parameters\n ----------\n observation: object\n Observation that can be feed into the output of make_obs_ph\n stochastic: bool\n if set to False all the actions are always deterministic (default False)\n update_eps_ph: float\n update epsilon to a new value, if negative no update happens\n (default: no update)\n reset_ph: bool\n reset the perturbed policy by sampling a new perturbation\n update_param_noise_threshold_ph: float\n the desired threshold for the difference between non-perturbed and perturbed policy\n update_param_noise_scale_ph: bool\n whether or not to update the scale of the noise for the next time it is re-perturbed\n\n Returns\n -------\n Tensor of dtype tf.int64 and shape (BATCH_SIZE,) with an action to be performed for\n every element of the batch.\n\n\n======= train =======\n\n Function that takes a transition (s,a,r,s') and optimizes Bellman equation's error:\n\n td_error = Q(s,a) - (r + gamma * max_a' Q(s', a'))\n loss = huber_loss[td_error]\n\n Parameters\n ----------\n obs_t: object\n a batch of observations\n action: np.array\n actions that were selected upon seeing obs_t.\n dtype must be int32 and shape must be (batch_size,)\n reward: np.array\n immediate reward attained after executing those actions\n dtype must be float32 and shape must be (batch_size,)\n obs_tp1: object\n observations that followed obs_t\n done: np.array\n 1 if obs_t was the last observation in the episode and 0 otherwise\n obs_tp1 gets ignored, but must be of the valid shape.\n dtype must be float32 and shape must be (batch_size,)\n weight: np.array\n imporance weights for every element of the batch (gradient is multiplied\n by the importance weight) dtype must be float32 and shape must be (batch_size,)\n\n Returns\n -------\n td_error: np.array\n a list of differences between Q(s,a) and the target in Bellman's equation.\n dtype is float32 and shape is (batch_size,)\n\n======= update_target ========\n\n copy the parameters from optimized Q function to the target Q function.\n In Q learning we actually optimize the following error:\n\n Q(s,a) - (r + gamma * max_a' Q'(s', a'))\n\n Where Q' is lagging behind Q to stablize the learning. For example for Atari\n\n Q' is set to Q once every 10000 updates training steps.\n\n\"\"\"\nimport tensorflow as tf\nimport baselines.common.tf_util as U\n\n\ndef scope_vars(scope, trainable_only=False):\n \"\"\"\n Get variables inside a scope\n The scope can be specified as a string\n Parameters\n ----------\n scope: str or VariableScope\n scope in which the variables reside.\n trainable_only: bool\n whether or not to return only the variables that were marked as trainable.\n Returns\n -------\n vars: [tf.Variable]\n list of variables in `scope`.\n \"\"\"\n return tf.get_collection(\n tf.GraphKeys.TRAINABLE_VARIABLES if trainable_only else tf.GraphKeys.GLOBAL_VARIABLES,\n scope=scope if isinstance(scope, str) else scope.name\n )\n\n\ndef scope_name():\n \"\"\"Returns the name of current scope as a string, e.g. deepq/q_func\"\"\"\n return tf.get_variable_scope().name\n\n\ndef absolute_scope_name(relative_scope_name):\n \"\"\"Appends parent scope name to `relative_scope_name`\"\"\"\n return scope_name() + \"/\" + relative_scope_name\n\n\ndef default_param_noise_filter(var):\n if var not in tf.trainable_variables():\n # We never perturb non-trainable vars.\n return False\n if \"fully_connected\" in var.name:\n # We perturb fully-connected layers.\n return True\n\n # The remaining layers are likely conv or layer norm layers, which we do not wish to\n # perturb (in the former case because they only extract features, in the latter case because\n # we use them for normalization purposes). If you change your network, you will likely want\n # to re-consider which layers to perturb and which to keep untouched.\n return False\n\n\ndef build_act(make_obs_ph, q_func, num_actions, scope=\"deepq\", reuse=None):\n \"\"\"Creates the act function:\n\n Parameters\n ----------\n make_obs_ph: str -> tf.placeholder or TfInput\n a function that take a name and creates a placeholder of input with that name\n q_func: (tf.Variable, int, str, bool) -> tf.Variable\n the model that takes the following inputs:\n observation_in: object\n the output of observation placeholder\n num_actions: int\n number of actions\n scope: str\n reuse: bool\n should be passed to outer variable scope\n and returns a tensor of shape (batch_size, num_actions) with values of every action.\n num_actions: int\n number of actions.\n scope: str or VariableScope\n optional scope for variable_scope.\n reuse: bool or None\n whether or not the variables should be reused. To be able to reuse the scope must be given.\n\n Returns\n -------\n act: (tf.Variable, bool, float) -> tf.Variable\n function to select and action given observation.\n` See the top of the file for details.\n \"\"\"\n with tf.variable_scope(scope, reuse=reuse):\n observations_ph = make_obs_ph(\"observation\")\n stochastic_ph = tf.placeholder(tf.bool, (), name=\"stochastic\")\n update_eps_ph = tf.placeholder(tf.float32, (), name=\"update_eps\")\n\n eps = tf.get_variable(\"eps\", (), initializer=tf.constant_initializer(0))\n\n q_values = q_func(observations_ph.get(), num_actions, scope=\"q_func\")\n deterministic_actions = tf.argmax(q_values, axis=1)\n\n batch_size = tf.shape(observations_ph.get())[0]\n random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)\n chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps\n stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)\n\n output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)\n update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))\n _act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph],\n outputs=output_actions,\n givens={update_eps_ph: -1.0, stochastic_ph: True},\n updates=[update_eps_expr])\n def act(ob, stochastic=True, update_eps=-1):\n return _act(ob, stochastic, update_eps)\n return act\n\n\ndef build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope=\"deepq\", reuse=None, param_noise_filter_func=None):\n \"\"\"Creates the act function with support for parameter space noise exploration (https://arxiv.org/abs/1706.01905):\n\n Parameters\n ----------\n make_obs_ph: str -> tf.placeholder or TfInput\n a function that take a name and creates a placeholder of input with that name\n q_func: (tf.Variable, int, str, bool) -> tf.Variable\n the model that takes the following inputs:\n observation_in: object\n the output of observation placeholder\n num_actions: int\n number of actions\n scope: str\n reuse: bool\n should be passed to outer variable scope\n and returns a tensor of shape (batch_size, num_actions) with values of every action.\n num_actions: int\n number of actions.\n scope: str or VariableScope\n optional scope for variable_scope.\n reuse: bool or None\n whether or not the variables should be reused. To be able to reuse the scope must be given.\n param_noise_filter_func: tf.Variable -> bool\n function that decides whether or not a variable should be perturbed. Only applicable\n if param_noise is True. If set to None, default_param_noise_filter is used by default.\n\n Returns\n -------\n act: (tf.Variable, bool, float, bool, float, bool) -> tf.Variable\n function to select and action given observation.\n` See the top of the file for details.\n \"\"\"\n if param_noise_filter_func is None:\n param_noise_filter_func = default_param_noise_filter\n\n with tf.variable_scope(scope, reuse=reuse):\n observations_ph = make_obs_ph(\"observation\")\n stochastic_ph = tf.placeholder(tf.bool, (), name=\"stochastic\")\n update_eps_ph = tf.placeholder(tf.float32, (), name=\"update_eps\")\n update_param_noise_threshold_ph = tf.placeholder(tf.float32, (), name=\"update_param_noise_threshold\")\n update_param_noise_scale_ph = tf.placeholder(tf.bool, (), name=\"update_param_noise_scale\")\n reset_ph = tf.placeholder(tf.bool, (), name=\"reset\")\n\n eps = tf.get_variable(\"eps\", (), initializer=tf.constant_initializer(0))\n param_noise_scale = tf.get_variable(\"param_noise_scale\", (), initializer=tf.constant_initializer(0.01), trainable=False)\n param_noise_threshold = tf.get_variable(\"param_noise_threshold\", (), initializer=tf.constant_initializer(0.05), trainable=False)\n\n # Unmodified Q.\n q_values = q_func(observations_ph.get(), num_actions, scope=\"q_func\")\n\n # Perturbable Q used for the actual rollout.\n q_values_perturbed = q_func(observations_ph.get(), num_actions, scope=\"perturbed_q_func\")\n # We have to wrap this code into a function due to the way tf.cond() works. See\n # https://stackoverflow.com/questions/37063952/confused-by-the-behavior-of-tf-cond for\n # a more detailed discussion.\n def perturb_vars(original_scope, perturbed_scope):\n all_vars = scope_vars(absolute_scope_name(original_scope))\n all_perturbed_vars = scope_vars(absolute_scope_name(perturbed_scope))\n assert len(all_vars) == len(all_perturbed_vars)\n perturb_ops = []\n for var, perturbed_var in zip(all_vars, all_perturbed_vars):\n if param_noise_filter_func(perturbed_var):\n # Perturb this variable.\n op = tf.assign(perturbed_var, var + tf.random_normal(shape=tf.shape(var), mean=0., stddev=param_noise_scale))\n else:\n # Do not perturb, just assign.\n op = tf.assign(perturbed_var, var)\n perturb_ops.append(op)\n assert len(perturb_ops) == len(all_vars)\n return tf.group(*perturb_ops)\n\n # Set up functionality to re-compute `param_noise_scale`. This perturbs yet another copy\n # of the network and measures the effect of that perturbation in action space. If the perturbation\n # is too big, reduce scale of perturbation, otherwise increase.\n q_values_adaptive = q_func(observations_ph.get(), num_actions, scope=\"adaptive_q_func\")\n perturb_for_adaption = perturb_vars(original_scope=\"q_func\", perturbed_scope=\"adaptive_q_func\")\n kl = tf.reduce_sum(tf.nn.softmax(q_values) * (tf.log(tf.nn.softmax(q_values)) - tf.log(tf.nn.softmax(q_values_adaptive))), axis=-1)\n mean_kl = tf.reduce_mean(kl)\n def update_scale():\n with tf.control_dependencies([perturb_for_adaption]):\n update_scale_expr = tf.cond(mean_kl < param_noise_threshold,\n lambda: param_noise_scale.assign(param_noise_scale * 1.01),\n lambda: param_noise_scale.assign(param_noise_scale / 1.01),\n )\n return update_scale_expr\n\n # Functionality to update the threshold for parameter space noise.\n update_param_noise_threshold_expr = param_noise_threshold.assign(tf.cond(update_param_noise_threshold_ph >= 0,\n lambda: update_param_noise_threshold_ph, lambda: param_noise_threshold))\n\n # Put everything together.\n deterministic_actions = tf.argmax(q_values_perturbed, axis=1)\n batch_size = tf.shape(observations_ph.get())[0]\n random_actions = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=num_actions, dtype=tf.int64)\n chose_random = tf.random_uniform(tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps\n stochastic_actions = tf.where(chose_random, random_actions, deterministic_actions)\n\n output_actions = tf.cond(stochastic_ph, lambda: stochastic_actions, lambda: deterministic_actions)\n update_eps_expr = eps.assign(tf.cond(update_eps_ph >= 0, lambda: update_eps_ph, lambda: eps))\n updates = [\n update_eps_expr,\n tf.cond(reset_ph, lambda: perturb_vars(original_scope=\"q_func\", perturbed_scope=\"perturbed_q_func\"), lambda: tf.group(*[])),\n tf.cond(update_param_noise_scale_ph, lambda: update_scale(), lambda: tf.Variable(0., trainable=False)),\n update_param_noise_threshold_expr,\n ]\n _act = U.function(inputs=[observations_ph, stochastic_ph, update_eps_ph, reset_ph, update_param_noise_threshold_ph, update_param_noise_scale_ph],\n outputs=output_actions,\n givens={update_eps_ph: -1.0, stochastic_ph: True, reset_ph: False, update_param_noise_threshold_ph: False, update_param_noise_scale_ph: False},\n updates=updates)\n def act(ob, reset=False, update_param_noise_threshold=False, update_param_noise_scale=False, stochastic=True, update_eps=-1):\n return _act(ob, stochastic, update_eps, reset, update_param_noise_threshold, update_param_noise_scale)\n return act\n\n\ndef build_train(n_step, make_obs_ph, q_func, num_actions, optimizer, grad_norm_clipping=None, gamma=1.0,\n double_q=True, scope=\"deepq\", reuse=None, param_noise=False, param_noise_filter_func=None):\n \"\"\"Creates the train function:\n\n Parameters\n ----------\n make_obs_ph: str -> tf.placeholder or TfInput\n a function that takes a name and creates a placeholder of input with that name\n q_func: (tf.Variable, int, str, bool) -> tf.Variable\n the model that takes the following inputs:\n observation_in: object\n the output of observation placeholder\n num_actions: int\n number of actions\n scope: str\n reuse: bool\n should be passed to outer variable scope\n and returns a tensor of shape (batch_size, num_actions) with values of every action.\n num_actions: int\n number of actions\n reuse: bool\n whether or not to reuse the graph variables\n optimizer: tf.train.Optimizer\n optimizer to use for the Q-learning objective.\n grad_norm_clipping: float or None\n clip gradient norms to this value. If None no clipping is performed.\n gamma: float\n discount rate.\n double_q: bool\n if true will use Double Q Learning (https://arxiv.org/abs/1509.06461).\n In general it is a good idea to keep it enabled.\n scope: str or VariableScope\n optional scope for variable_scope.\n reuse: bool or None\n whether or not the variables should be reused. To be able to reuse the scope must be given.\n param_noise: bool\n whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)\n param_noise_filter_func: tf.Variable -> bool\n function that decides whether or not a variable should be perturbed. Only applicable\n if param_noise is True. If set to None, default_param_noise_filter is used by default.\n\n Returns\n -------\n act: (tf.Variable, bool, float) -> tf.Variable\n function to select and action given observation.\n` See the top of the file for details.\n train: (object, np.array, np.array, object, np.array, np.array) -> np.array\n optimize the error in Bellman's equation.\n` See the top of the file for details.\n update_target: () -> ()\n copy the parameters from optimized Q function to the target Q function.\n` See the top of the file for details.\n debug: {str: function}\n a bunch of functions to print debug data like q_values.\n \"\"\"\n if param_noise:\n act_f = build_act_with_param_noise(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse,\n param_noise_filter_func=param_noise_filter_func)\n else:\n act_f = build_act(make_obs_ph, q_func, num_actions, scope=scope, reuse=reuse)\n\n with tf.variable_scope(scope, reuse=reuse):\n # set up placeholders\n obs_t_input = make_obs_ph(\"obs_t\")\n act_t_ph = tf.placeholder(tf.int32, [None], name=\"action\")\n rew_t_ph = tf.placeholder(tf.float32, shape=(None, n_step), name=\"reward\")\n obs_tp1_input = make_obs_ph(\"obs_tp1\")\n done_mask_ph = tf.placeholder(tf.float32, shape=(None, n_step), name=\"done\")\n importance_weights_ph = tf.placeholder(tf.float32, [None], name=\"weight\")\n\n # q network evaluation\n q_t = q_func(obs_t_input.get(), num_actions, scope=\"q_func\", reuse=True) # reuse parameters from act\n q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=tf.get_variable_scope().name + \"/q_func\")\n\n # target q network evalution\n q_tp1 = q_func(obs_tp1_input.get(), num_actions, scope=\"target_q_func\")\n target_q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=tf.get_variable_scope().name + \"/target_q_func\")\n\n # q scores for actions which we know were selected in the given state.\n q_t_selected = tf.reduce_sum(q_t * tf.one_hot(act_t_ph, num_actions), 1)\n\n # compute estimate of best possible value starting from state at t + 1\n if double_q:\n q_tp1_using_online_net = q_func(obs_tp1_input.get(), num_actions, scope=\"q_func\", reuse=True)\n q_tp1_best_using_online_net = tf.argmax(q_tp1_using_online_net, 1)\n q_tp1_best = tf.reduce_sum(q_tp1 * tf.one_hot(q_tp1_best_using_online_net, num_actions), 1)\n else:\n q_tp1_best = tf.reduce_max(q_tp1, 1)\n\n # compute RHS of bellman equation\n # q_t_selected_target = rew_t_ph + gamma * (1.0 - done_mask_ph) * q_tp1_best\n q_t_selected_target = tf.reduce_sum(tf.multiply([gamma ** (i) for i in range(n_step)] * (1 - done_mask_ph), rew_t_ph), axis=1)\\\n + gamma ** n_step * (1 - done_mask_ph[:, -1]) * q_tp1_best\n\n # compute the error (potentially clipped)\n td_error = q_t_selected - tf.stop_gradient(q_t_selected_target)\n errors = U.huber_loss(td_error)\n weighted_error = tf.reduce_mean(importance_weights_ph * errors)\n\n # compute optimization op (potentially with gradient clipping)\n if grad_norm_clipping is not None:\n gradients = optimizer.compute_gradients(weighted_error, var_list=q_func_vars)\n for i, (grad, var) in enumerate(gradients):\n if grad is not None:\n gradients[i] = (tf.clip_by_norm(grad, grad_norm_clipping), var)\n optimize_expr = optimizer.apply_gradients(gradients)\n else:\n optimize_expr = optimizer.minimize(weighted_error, var_list=q_func_vars)\n\n # update_target_fn will be called periodically to copy Q network to target Q network\n update_target_expr = []\n for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name),\n sorted(target_q_func_vars, key=lambda v: v.name)):\n update_target_expr.append(var_target.assign(var))\n update_target_expr = tf.group(*update_target_expr)\n\n # Create callable functions\n train = U.function(\n inputs=[\n obs_t_input,\n act_t_ph,\n rew_t_ph,\n obs_tp1_input,\n done_mask_ph,\n importance_weights_ph\n ],\n outputs=td_error,\n updates=[optimize_expr]\n )\n update_target = U.function([], [], updates=[update_target_expr])\n\n q_values = U.function([obs_t_input], q_t)\n\n return act_f, train, update_target, {'q_values': q_values}\n" ]
[ [ "tensorflow.cond", "tensorflow.control_dependencies", "tensorflow.stack", "tensorflow.where", "tensorflow.group", "tensorflow.Variable", "tensorflow.stop_gradient", "tensorflow.clip_by_norm", "tensorflow.trainable_variables", "tensorflow.argmax", "tensorflow.shape", "tensorflow.placeholder", "tensorflow.one_hot", "tensorflow.reduce_max", "tensorflow.nn.softmax", "tensorflow.reduce_mean", "tensorflow.assign", "tensorflow.constant_initializer", "tensorflow.variable_scope", "tensorflow.get_variable_scope" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]