repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
btlorch/adversarial-robustness-toolbox
|
[
"3789bd9fa7b6ea6f62e0ffe61df362dfa9f6df5e",
"3789bd9fa7b6ea6f62e0ffe61df362dfa9f6df5e"
] |
[
"art/preprocessing/standardisation_mean_std/standardisation_mean_std_pytorch.py",
"art/attacks/inference/membership_inference/black_box.py"
] |
[
"# MIT License\n#\n# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2020\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"\nThis module implements the standardisation with mean and standard deviation.\n\"\"\"\nimport logging\nfrom typing import Optional, Tuple, TYPE_CHECKING\n\nfrom art.preprocessing.preprocessing import PreprocessorPyTorch\n\nif TYPE_CHECKING:\n import torch\n\nlogger = logging.getLogger(__name__)\n\n\nclass StandardisationMeanStdPyTorch(PreprocessorPyTorch):\n \"\"\"\n Implement the standardisation with mean and standard deviation.\n \"\"\"\n\n params = [\"mean\", \"std\", \"apply_fit\", \"apply_predict\"]\n\n def __init__(\n self,\n mean: float = 0.0,\n std: float = 1.0,\n apply_fit: bool = True,\n apply_predict: bool = True,\n device_type: str = \"gpu\",\n ):\n \"\"\"\n Create an instance of StandardisationMeanStdPyTorch.\n\n :param mean: Mean.\n :param std: Standard Deviation.\n \"\"\"\n import torch # lgtm [py/repeated-import]\n\n super().__init__(is_fitted=True, apply_fit=apply_fit, apply_predict=apply_predict)\n self.mean = mean\n self.std = std\n self._check_params()\n\n # Set device\n if device_type == \"cpu\" or not torch.cuda.is_available():\n self._device = torch.device(\"cpu\")\n else:\n cuda_idx = torch.cuda.current_device()\n self._device = torch.device(\"cuda:{}\".format(cuda_idx))\n\n def forward(\n self, x: \"torch.Tensor\", y: Optional[\"torch.Tensor\"] = None\n ) -> Tuple[\"torch.Tensor\", Optional[\"torch.Tensor\"]]:\n \"\"\"\n Apply standardisation with mean and standard deviation to input `x`.\n \"\"\"\n import torch # lgtm [py/repeated-import]\n\n mean = torch.tensor(self.mean, device=self._device, dtype=torch.float32)\n std = torch.tensor(self.std, device=self._device, dtype=torch.float32)\n\n x_norm = x - mean\n x_norm = x_norm / std\n\n return x_norm, y\n\n def _check_params(self) -> None:\n pass\n\n def __repr__(self):\n return \"StandardisationMeanStdPyTorch(mean={}, std={}, apply_fit={}, apply_predict={}, device={})\".format(\n self.mean, self.std, self.apply_fit, self.apply_predict, self._device\n )\n",
"# MIT License\n#\n# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2020\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\nThis module implements membership inference attacks.\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nfrom typing import Any, Optional, Union, TYPE_CHECKING\n\nimport numpy as np\nfrom sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier\n\nfrom art.attacks.attack import InferenceAttack\nfrom art.estimators.estimator import BaseEstimator, NeuralNetworkMixin\nfrom art.estimators.classification.classifier import ClassifierMixin\nfrom art.utils import check_and_transform_label_format\n\nif TYPE_CHECKING:\n from art.utils import CLASSIFIER_TYPE\n\nlogger = logging.getLogger(__name__)\n\n\nclass MembershipInferenceBlackBox(InferenceAttack):\n \"\"\"\n Implementation of a learned black-box membership inference attack.\n\n This implementation can use as input to the learning process probabilities/logits or losses,\n depending on the type of model and provided configuration.\n \"\"\"\n\n attack_params = InferenceAttack.attack_params + [\n \"input_type\",\n \"attack_model_type\",\n \"attack_model\",\n ]\n _estimator_requirements = (BaseEstimator, ClassifierMixin)\n\n def __init__(\n self,\n classifier: Union[\"CLASSIFIER_TYPE\"],\n input_type: str = \"prediction\",\n attack_model_type: str = \"nn\",\n attack_model: Optional[Any] = None,\n ):\n \"\"\"\n Create a MembershipInferenceBlackBox attack instance.\n\n :param classifier: Target classifier.\n :param attack_model_type: the type of default attack model to train, optional. Should be one of `nn` (for neural\n network, default), `rf` (for random forest) or `gb` (gradient boosting). If\n `attack_model` is supplied, this option will be ignored.\n :param input_type: the type of input to train the attack on. Can be one of: 'prediction' or 'loss'. Default is\n `prediction`. Predictions can be either probabilities or logits, depending on the return type\n of the model.\n :param attack_model: The attack model to train, optional. If none is provided, a default model will be created.\n \"\"\"\n\n super().__init__(estimator=classifier)\n self.input_type = input_type\n self.attack_model_type = attack_model_type\n self.attack_model = attack_model\n\n self._check_params()\n\n if self.attack_model:\n self.default_model = False\n self.attack_model_type = \"None\"\n else:\n self.default_model = True\n if self.attack_model_type == \"nn\":\n import torch # lgtm [py/repeated-import]\n import torch.nn as nn # lgtm [py/repeated-import]\n\n class MembershipInferenceAttackModel(nn.Module):\n \"\"\"\n Implementation of a pytorch model for learning a membership inference attack.\n\n The features used are probabilities/logits or losses for the attack training data along with\n its true labels.\n \"\"\"\n\n def __init__(self, num_classes, num_features=None):\n\n self.num_classes = num_classes\n if num_features:\n self.num_features = num_features\n else:\n self.num_features = num_classes\n\n super().__init__()\n\n self.features = nn.Sequential(\n nn.Linear(self.num_features, 512),\n nn.ReLU(),\n nn.Linear(512, 100),\n nn.ReLU(),\n nn.Linear(100, 64),\n nn.ReLU(),\n )\n\n self.labels = nn.Sequential(\n nn.Linear(self.num_classes, 256), nn.ReLU(), nn.Linear(256, 64), nn.ReLU(),\n )\n\n self.combine = nn.Sequential(nn.Linear(64 * 2, 1),)\n\n self.output = nn.Sigmoid()\n\n def forward(self, x_1, label):\n out_x1 = self.features(x_1)\n out_l = self.labels(label)\n is_member = self.combine(torch.cat((out_x1, out_l), 1))\n return self.output(is_member)\n\n if self.input_type == \"prediction\":\n self.attack_model = MembershipInferenceAttackModel(classifier.nb_classes)\n else:\n self.attack_model = MembershipInferenceAttackModel(classifier.nb_classes, num_features=1)\n self.epochs = 100\n self.batch_size = 100\n self.learning_rate = 0.0001\n elif self.attack_model_type == \"rf\":\n self.attack_model = RandomForestClassifier()\n elif self.attack_model_type == \"gb\":\n self.attack_model = GradientBoostingClassifier()\n\n def fit(self, x: np.ndarray, y: np.ndarray, test_x: np.ndarray, test_y: np.ndarray, **kwargs):\n \"\"\"\n Infer membership in the training set of the target estimator.\n\n :param x: Records that were used in training the target model.\n :param y: True labels for `x`.\n :param test_x: Records that were not used in training the target model.\n :param test_y: True labels for `test_x`.\n :return: An array holding the inferred membership status, 1 indicates a member and 0 indicates non-member.\n \"\"\"\n if self.estimator.input_shape[0] != x.shape[1]:\n raise ValueError(\"Shape of x does not match input_shape of classifier\")\n if self.estimator.input_shape[0] != test_x.shape[1]:\n raise ValueError(\"Shape of test_x does not match input_shape of classifier\")\n\n y = check_and_transform_label_format(y, len(np.unique(y)), return_one_hot=True)\n test_y = check_and_transform_label_format(test_y, len(np.unique(test_y)), return_one_hot=True)\n\n if y.shape[0] != x.shape[0]:\n raise ValueError(\"Number of rows in x and y do not match\")\n if test_y.shape[0] != test_x.shape[0]:\n raise ValueError(\"Number of rows in test_x and test_y do not match\")\n\n # Create attack dataset\n # uses final probabilities/logits\n if self.input_type == \"prediction\":\n # members\n features = self.estimator.predict(x).astype(np.float32)\n # non-members\n test_features = self.estimator.predict(test_x).astype(np.float32)\n # only for models with loss\n elif self.input_type == \"loss\":\n if NeuralNetworkMixin not in type(self.estimator).__mro__:\n raise TypeError(\"loss input_type can only be used with neural networks\")\n # members\n features = self.estimator.loss(x, y).astype(np.float32).reshape(-1, 1)\n # non-members\n test_features = self.estimator.loss(test_x, test_y).astype(np.float32).reshape(-1, 1)\n else:\n raise ValueError(\"Illegal value for parameter `input_type`.\")\n\n # members\n labels = np.ones(x.shape[0])\n # non-members\n test_labels = np.zeros(test_x.shape[0])\n\n x_1 = np.concatenate((features, test_features))\n x_2 = np.concatenate((y, test_y))\n y_new = np.concatenate((labels, test_labels))\n\n if self.default_model and self.attack_model_type == \"nn\":\n import torch # lgtm [py/repeated-import]\n import torch.nn as nn # lgtm [py/repeated-import]\n import torch.optim as optim # lgtm [py/repeated-import]\n from torch.utils.data import DataLoader # lgtm [py/repeated-import]\n from art.utils import to_cuda\n\n loss_fn = nn.BCELoss()\n optimizer = optim.Adam(self.attack_model.parameters(), lr=self.learning_rate)\n\n attack_train_set = self._get_attack_dataset(f_1=x_1, f_2=x_2, label=y_new)\n train_loader = DataLoader(attack_train_set, batch_size=self.batch_size, shuffle=True, num_workers=0)\n\n self.attack_model = to_cuda(self.attack_model)\n self.attack_model.train()\n\n for _ in range(self.epochs):\n for (input1, input2, targets) in train_loader:\n input1, input2, targets = to_cuda(input1), to_cuda(input2), to_cuda(targets)\n _, input2 = torch.autograd.Variable(input1), torch.autograd.Variable(input2)\n targets = torch.autograd.Variable(targets)\n\n optimizer.zero_grad()\n outputs = self.attack_model(input1, input2)\n loss = loss_fn(outputs, targets.unsqueeze(1)) # lgtm [py/call-to-non-callable]\n\n loss.backward()\n optimizer.step()\n else:\n if self.attack_model_type == \"gb\":\n y_ready = check_and_transform_label_format(y_new, len(np.unique(y_new)), return_one_hot=False)\n else:\n y_ready = check_and_transform_label_format(y_new, len(np.unique(y_new)), return_one_hot=True)\n self.attack_model.fit(np.c_[x_1, x_2], y_ready)\n\n def infer(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray:\n \"\"\"\n Infer membership in the training set of the target estimator.\n\n :param x: Input records to attack.\n :param y: True labels for `x`.\n :return: An array holding the inferred membership status, 1 indicates a member and 0 indicates non-member.\n \"\"\"\n if y is None:\n raise ValueError(\"MembershipInferenceBlackBox requires true labels `y`.\")\n\n if self.estimator.input_shape[0] != x.shape[1]:\n raise ValueError(\"Shape of x does not match input_shape of classifier\")\n\n y = check_and_transform_label_format(y, len(np.unique(y)), return_one_hot=True)\n\n if y.shape[0] != x.shape[0]:\n raise ValueError(\"Number of rows in x and y do not match\")\n\n if self.input_type == \"prediction\":\n features = self.estimator.predict(x).astype(np.float32)\n elif self.input_type == \"loss\":\n features = self.estimator.loss(x, y).astype(np.float32).reshape(-1, 1)\n\n if self.default_model and self.attack_model_type == \"nn\":\n import torch # lgtm [py/repeated-import]\n from torch.utils.data import DataLoader # lgtm [py/repeated-import]\n from art.utils import to_cuda, from_cuda\n\n self.attack_model.eval()\n inferred = None\n test_set = self._get_attack_dataset(f_1=features, f_2=y)\n test_loader = DataLoader(test_set, batch_size=self.batch_size, shuffle=True, num_workers=0)\n for input1, input2, _ in test_loader:\n input1, input2 = to_cuda(input1), to_cuda(input2)\n outputs = self.attack_model(input1, input2)\n predicted = torch.round(outputs)\n predicted = from_cuda(predicted)\n\n if inferred is None:\n inferred = predicted.detach().numpy()\n else:\n inferred = np.vstack((inferred, predicted.detach().numpy()))\n inferred = inferred.reshape(-1).astype(np.int)\n else:\n inferred = np.array([np.argmax(arr) for arr in self.attack_model.predict(np.c_[features, y])])\n return inferred\n\n def _get_attack_dataset(self, f_1, f_2, label=None):\n from torch.utils.data.dataset import Dataset\n\n class AttackDataset(Dataset):\n \"\"\"\n Implementation of a pytorch dataset for membership inference attack.\n\n The features are probabilities/logits or losses for the attack training data (`x_1`) along with\n its true labels (`x_2`). The labels (`y`) are a boolean representing whether this is a member.\n \"\"\"\n\n def __init__(self, x_1, x_2, y=None):\n import torch # lgtm [py/repeated-import]\n\n self.x_1 = torch.from_numpy(x_1.astype(np.float64)).type(torch.FloatTensor)\n self.x_2 = torch.from_numpy(x_2.astype(np.int32)).type(torch.FloatTensor)\n\n if y is not None:\n self.y = torch.from_numpy(y.astype(np.int8)).type(torch.FloatTensor)\n else:\n self.y = torch.zeros(x_1.shape[0])\n\n def __len__(self):\n return len(self.x_1)\n\n def __getitem__(self, idx):\n if idx >= len(self.x_1):\n raise IndexError(\"Invalid Index\")\n\n return self.x_1[idx], self.x_2[idx], self.y[idx]\n\n return AttackDataset(x_1=f_1, x_2=f_2, y=label)\n\n def _check_params(self) -> None:\n if self.input_type not in [\"prediction\", \"loss\"]:\n raise ValueError(\"Illegal value for parameter `input_type`.\")\n\n if self.attack_model_type not in [\"nn\", \"rf\", \"gb\"]:\n raise ValueError(\"Illegal value for parameter `attack_model_type`.\")\n\n if self.attack_model:\n if ClassifierMixin not in type(self.attack_model).__mro__:\n raise TypeError(\"Attack model must be of type Classifier.\")\n"
] |
[
[
"torch.device",
"torch.cuda.is_available",
"torch.cuda.current_device",
"torch.tensor"
],
[
"sklearn.ensemble.RandomForestClassifier",
"numpy.unique",
"torch.zeros",
"torch.cat",
"torch.round",
"torch.utils.data.DataLoader",
"torch.nn.BCELoss",
"numpy.concatenate",
"numpy.ones",
"torch.nn.Sigmoid",
"numpy.argmax",
"torch.nn.Linear",
"sklearn.ensemble.GradientBoostingClassifier",
"torch.nn.ReLU",
"numpy.zeros",
"torch.autograd.Variable"
]
] |
knicos/voltu
|
[
"70b39da7069f8ffd7e33aeb5bdacc84fe4a78f01"
] |
[
"SDK/CPP/tests/test_intrinsics.py"
] |
[
"import unittest\nimport voltu\nimport numpy as np\n\nclass Intrinsics(unittest.TestCase):\n\n def test_default_ctor(self):\n intr = voltu.Intrinsics()\n self.assertIsNotNone(intr)\n\n def test_create(self):\n fx = 2.0\n fy = 3.0\n cx = 4.0\n cy = 5.0\n w = 6\n h = 7\n\n intr = voltu.Intrinsics(\n width = w,\n height = h,\n focal_x = fx,\n focal_y = fy,\n principle_x = -cx,\n principle_y = -cy\n )\n\n K = np.array([\n [fx , 0.0, cx],\n [0.0, fy, cy],\n [0.0, 0.0, 1.0],\n ])\n\n self.assertTrue(np.array_equal(intr.matrix(), K))\n\n w_, h_ = intr.size()\n self.assertEqual(w_, w)\n self.assertEqual(h_, h)\n self.assertTrue(np.array_equal((w, h), intr.size()))\n"
] |
[
[
"numpy.array"
]
] |
pth051001/Gender-Recognition-by-Voice-with-different-ML-classification-methods
|
[
"5158f951d8df52eeff229c80555194d50aeca41e"
] |
[
"NaiveBayesFromScratch.py"
] |
[
"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.stats import multivariate_normal\r\n\r\nclass NaiveBayesFromScratch():\r\n def __init__(self, X, y):\r\n self.num_examples, self.num_features = X.shape\r\n self.num_classes = len(np.unique(y))\r\n\r\n def fit(self, X, y):\r\n self.classes_mean = {}\r\n self.classes_variance = {}\r\n self.classes_prior = {}\r\n\r\n for c in range(self.num_classes):\r\n X_c = X[y == c]\r\n\r\n self.classes_mean[str(c)] = np.mean(X_c, axis=0)\r\n self.classes_variance[str(c)] = np.var(X_c, axis=0)\r\n self.classes_prior[str(c)] = X_c.shape[0] / X.shape[0]\r\n\r\n def predict(self, X):\r\n probs = np.zeros((X.shape[0], self.num_classes))\r\n for c in range(self.num_classes):\r\n prior = self.classes_prior[str(c)]\r\n probs_c = multivariate_normal.pdf(X, mean=self.classes_mean[str(c)], cov=self.classes_variance[str(c)])\r\n probs[:,c] = probs_c*prior\r\n return np.argmax(probs, 1)"
] |
[
[
"numpy.unique",
"numpy.argmax",
"numpy.mean",
"numpy.var",
"numpy.zeros"
]
] |
alexamici/xpop
|
[
"940f935dfd125d5d51ab7b71a281196c55b29da4"
] |
[
"xpop/data/italy.py"
] |
[
"import numpy as np\nimport pandas as pd\nimport xarray as xr\n\n\ndef istat_deaths_to_pandas(path):\n istat = pd.read_csv(path, encoding=\"8859\", na_values=\"n.d.\", dtype={\"GE\": str})\n\n # make a date index from GE\n def ge2month_day(x):\n return f\"{x[:2]}-{x[2:]}\"\n\n month_day = istat[\"GE\"].map(ge2month_day).values\n istat[\"month_day\"] = month_day\n\n def cl_eta2age(x):\n if x <= 1:\n return x\n elif x <= 21:\n age = x - 1\n return age * 5\n else:\n raise ValueError(f\"unknown age class {x}\")\n\n istat[\"age\"] = istat[\"CL_ETA\"].apply(cl_eta2age)\n\n return istat\n\n\ndef read_istat_deaths(path):\n istat = istat_deaths_to_pandas(path).rename(columns={\"NOME_COMUNE\": \"location\"})\n\n data = None\n\n for yy in range(11, 21):\n tmp = istat.groupby([\"month_day\", \"age\", \"location\"]).agg(\n **{\n \"f\": (f\"F_{yy}\", sum),\n \"m\": (f\"M_{yy}\", sum),\n }\n )\n if yy % 4 != 0:\n tmp = tmp.drop(index=\"02-29\")\n tmp = tmp.reset_index()\n tmp[\"time\"] = tmp[\"month_day\"].map(lambda x: np.datetime64(f\"20{yy}-{x}\"))\n tmp = tmp.set_index([\"time\", \"age\", \"location\"]).drop(columns=\"month_day\")\n xtmp = tmp.to_xarray().to_array(\"sex\").fillna(0)\n if data is None:\n data = xtmp\n else:\n data = xr.concat([data, xtmp], dim=\"time\", fill_value=0)\n\n coords = {\n \"region\": (\n \"location\",\n \"Italy / \" + istat.groupby([\"location\"])[\"NOME_REGIONE\"].first(),\n ),\n \"province\": (\n \"location\",\n istat.groupby([\"location\"])[\"NOME_PROVINCIA\"].first(),\n ),\n }\n data = data.assign_coords(coords)\n return istat, data\n\n\ndef istat_deaths_to_italy_year(istat):\n deaths_italy = istat.sum(\"location\")\n deaths_italy = deaths_italy.resample(time=\"Y\", label=\"left\", loffset=\"1D\").sum()\n deaths_italy = deaths_italy.assign_coords(year=deaths_italy.time.dt.year)\n return deaths_italy.swap_dims(time=\"year\").drop_vars(\"time\")\n"
] |
[
[
"pandas.read_csv",
"numpy.datetime64"
]
] |
wangnmiaon5/stock_investiment
|
[
"468850106db71add64be1c81afad8209578a68ab"
] |
[
"good_morning/good_morning.py"
] |
[
"# Copyright (c) 2015 Peter Cerno\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\"\"\"Module for downloading financial data from financials.morningstar.com.\n\"\"\"\n\nimport csv\nimport json\nimport numpy as np\nimport pandas as pd\nimport re\nimport urllib.request\nfrom bs4 import BeautifulSoup\nfrom datetime import date\n\nclass KeyRatiosDownloader(object):\n u\"\"\"Downloads key ratios from http://financials.morningstar.com/\n \"\"\"\n\n def __init__(self, table_prefix = u'morningstar_'):\n u\"\"\"Constructs the KeyRatiosDownloader instance.\n\n :param table_prefix: Prefix of the MySQL tables.\n \"\"\"\n self._table_prefix = table_prefix\n\n def download(self, ticker, conn = None, region = 'GBR', culture = 'en_US', currency = 'USD'):\n u\"\"\"Downloads and returns key ratios for the given Morningstar ticker.\n\n Downloads and returns an array of pandas.DataFrames containing the key\n ratios for the given Morningstar ticker. If the MySQL connection is\n specified then the downloaded key ratios are uploaded to the MySQL\n database.\n\n :param ticker: Morningstar ticker.\n :param conn: MySQL connection.\n :param region: Sets the region.\n :param culture: Sets culture.\n :param currency: Sets currency.\n :return: List of pandas.DataFrames containing the key ratios.\n \"\"\"\n url = (r'http://financials.morningstar.com/ajax/exportKR2CSV.html?' +\n r'&callback=?&t={t}®ion={reg}&culture={cult}&cur={cur}'.format(\n t=ticker, reg=region, cult=culture, cur=currency))\n with urllib.request.urlopen(url) as response:\n tables = self._parse_tables(response)\n response_structure = [\n # Original Name, New pandas.DataFrame Name\n (u'Financials', u'Key Financials'),\n (u'Key Ratios -> Profitability', u'Key Margins % of Sales'),\n (u'Key Ratios -> Profitability', u'Key Profitability'),\n (u'Key Ratios -> Growth', None),\n (u'Revenue %', u'Key Revenue %'),\n (u'Operating Income %', u'Key Operating Income %'),\n (u'Net Income %', u'Key Net Income %'),\n (u'EPS %', u'Key EPS %'),\n (u'Key Ratios -> Cash Flow', u'Key Cash Flow Ratios'),\n (u'Key Ratios -> Financial Health',\n u'Key Balance Sheet Items (in %)'),\n (u'Key Ratios -> Financial Health',\n u'Key Liquidity/Financial Health'),\n (u'Key Ratios -> Efficiency Ratios', u'Key Efficiency Ratios')]\n frames = self._parse_frames(tables, response_structure)\n\n ############################\n # Error Handling for Ratios\n ############################\n\n # Empty String\n if len(ticker) == 0:\n raise ValueError(\"You did not enter a ticker symbol. Please\"\n \" try again.\")\n # Wrong ticker symbol\n elif frames == \"MorningStar could not find the ticker\":\n raise ValueError(\"MorningStar cannot find the ticker symbol \"\n \"you entered or it is INVALID. Please try \"\n \"again.\")\n\n currency = re.match(u'^.* ([A-Z]+) Mil$',\n frames[0].index[0]).group(1)\n frames[0].index.name += u' ' + currency\n if conn:\n self._upload_frames_to_db(ticker, frames, conn)\n return frames\n\n @staticmethod\n def _parse_tables(response):\n u\"\"\"Parses the given csv response from financials.morningstar.com.\n\n :param response: Response from financials.morningstar.com.\n :return: List of pairs, where the first item is the name of the table\n (extracted from the response) and the second item is the corresponding\n pandas.DataFrame table containing the data.\n \"\"\"\n # Regex pattern used to recognize csv lines containing financial data.\n num_commas = 5\n pat_commas = r'(.*,){%d,}' % num_commas\n # Resulting array of pairs (table_name, table_frame).\n tables = []\n table_name = None\n table_rows = None\n for line in response:\n line = line.decode(u'utf-8').strip()\n match = re.match(pat_commas, line)\n if match:\n for row in csv.reader([line]):\n table_rows.append(row)\n else:\n if table_name and table_rows:\n tables.append([table_name, pd.DataFrame(table_rows)])\n if line != u'':\n table_name = line\n table_rows = []\n if table_name and table_rows:\n tables.append([table_name, pd.DataFrame(table_rows)])\n return tables\n\n @staticmethod\n def _parse_frames(tables, response_structure):\n u\"\"\"Returns an array of processed pandas.DataFrames based on the\n original list of tables and the special response_structure list.\n\n :param tables: Original list of tables (obtained from _parse_tables).\n :param response_structure: List of pairs (expected table name, new name\n assigned to the corresponding (processed) pandas.DataFrame).\n \"\"\"\n\n #############################\n # Error Handling\n #############################\n\n # Fail Early on Empty String\n if len(tables) == 0:\n return (\"MorningStar could not find the ticker\")\n\n period_start = tables[0][1].ix[0][1]\n period_month = pd.datetime.strptime(period_start, u'%Y-%m').month\n #period_freq = pd.datetools.YearEnd(month=period_month)\n period_freq = pd.tseries.offsets.YearEnd(month=period_month)\n frames = []\n for index, (check_name, frame_name) in enumerate(response_structure):\n if frame_name and tables[index][0] == check_name:\n frame = KeyRatiosDownloader._process_frame(\n tables[index][1], frame_name, period_start, period_freq)\n if frame is not None and frame.index.size > 0:\n frames.append(frame)\n return frames\n\n @staticmethod\n def _process_frame(frame, frame_name, period_start,\n period_freq):\n u\"\"\"Returns a processed pandas.DataFrame based on the original frame.\n\n :param frame: Original pandas.DataFrame to be processed.\n :param frame_name: New name assigned to the processed pandas.DataFrame.\n :param period_start: Start of the period.\n :param period_freq: Frequency of the period.\n :return Processed pandas.DataFrame based on the original frame.\n \"\"\"\n output_frame = frame.set_index(frame[0])\n del output_frame[0]\n output_frame.index.name = frame_name\n output_frame.columns = pd.period_range(period_start,\n periods=len(output_frame.ix[0]),\n freq=period_freq)\n output_frame.columns.name = u'Period'\n if re.match(r'^\\d{4}-\\d{2}$', output_frame.ix[0][0]):\n output_frame.drop(output_frame.index[0], inplace=True)\n output_frame.replace(u',', u'', regex=True, inplace=True)\n output_frame.replace(u'^\\s*$', u'NaN', regex=True, inplace=True)\n return output_frame.astype(float)\n\n def _upload_frames_to_db(self, ticker, frames,\n conn):\n u\"\"\"Uploads the given array of pandas.DataFrames to the MySQL database.\n\n :param ticker: Morningstar ticker.\n :param frames: Array of pandas.DataFrames to be uploaded.\n :param conn: MySQL connection.\n \"\"\"\n for frame in frames:\n table_name = self._get_db_table_name(frame)\n if not _db_table_exists(table_name, conn):\n _db_execute(self._get_db_create_table(frame), conn)\n _db_execute(self._get_db_replace_values(ticker, frame), conn)\n\n @staticmethod\n def _get_db_name(name):\n u\"\"\"Returns a new (cleaned) name that can be used in a MySQL database.\n\n :param name: Original name.\n :return Name that can be used in a MySQL database.\n \"\"\"\n name = (name.lower()\n .replace(u'/', u' per ')\n .replace(u'&', u' and ')\n .replace(u'%', u' percent '))\n name = re.sub(r'[^a-z0-9]', u' ', name)\n name = re.sub(r'\\s+', u' ', name).strip()\n return name.replace(u' ', u'_')\n\n def _get_db_table_name(self, frame):\n u\"\"\"Returns the MySQL TABLE name for the given pandas.DataFrame.\n\n :param frame: pandas.DataFrame.\n :return MySQL TABLE name.\n \"\"\"\n return self._table_prefix + self._get_db_name(frame.index.name)\n\n def _get_db_create_table(self, frame):\n u\"\"\"Returns the MySQL CREATE TABLE statement for the given\n pandas.DataFrame.\n\n :param frame: pandas.DataFrame.\n :return MySQL CREATE TABLE statement.\n \"\"\"\n columns = (u',\\n'.\n join([u' `%s` DECIMAL(20,5) DEFAULT NULL COMMENT \"%s\"' %\n (self._get_db_name(name), name) for name in\n frame.index.values]))\n table_name = self._get_db_table_name(frame)\n return (\n u'CREATE TABLE `%s` (\\n' % table_name +\n u' `ticker` VARCHAR(50) NOT NULL COMMENT \"Exchange:Ticker\",\\n' +\n u' `period` DATE NOT NULL COMMENT \"Period\",\\n' +\n u'%s,\\n' % columns +\n u' PRIMARY KEY USING BTREE (`ticker`, `period`),\\n' +\n u' KEY `ix_ticker` USING BTREE (`ticker`))\\n' +\n u'ENGINE=MyISAM DEFAULT CHARSET=utf8\\n' +\n u'COMMENT = \"%s\"' % frame.index.name)\n\n def _get_db_replace_values(self, ticker, frame):\n u\"\"\"Returns the MySQL REPLACE INTO statement for the given\n Morningstar ticker and the corresponding pandas.DataFrame.\n\n :param ticker: Morningstar ticker.\n :param frame: pandas.DataFrame.\n :return MySQL REPLACE INTO statement.\n \"\"\"\n columns = ([u'`ticker`', u'`period`'] +\n [u'`%s`' % self._get_db_name(name) for name in\n frame.index.values])\n return (\n u'REPLACE INTO `%s`\\n' % self._get_db_table_name(frame) +\n u' (%s)\\nVALUES\\n' % u',\\n '.join(columns) +\n u',\\n'.join([u'(\"' + ticker + u'\", \"' + column.strftime(u'%Y-%m-%d') +\n u'\", ' +\n u', '.join([u'NULL' if np.isnan(x) else u'%.5f' % x\n for x in frame[column].values]) +\n u')' for column in frame.columns]))\n\n\nclass FinancialsDownloader(object):\n u\"\"\"Downloads financials from http://financials.morningstar.com/\n \"\"\"\n\n def __init__(self, table_prefix = u'morningstar_'):\n u\"\"\"Constructs the FinancialsDownloader instance.\n\n :param table_prefix: Prefix of the MySQL tables.\n \"\"\"\n self._table_prefix = table_prefix\n\n def download(self, ticker, conn = None):\n u\"\"\"Downloads and returns a dictionary containing pandas.DataFrames\n representing the financials (i.e. income statement, balance sheet,\n cash flow) for the given Morningstar ticker. If the MySQL connection\n is specified then the downloaded financials are uploaded to the MySQL\n database.\n\n :param ticker: Morningstar ticker.\n :param conn: MySQL connection.\n :return Dictionary containing pandas.DataFrames representing the\n financials for the given Morningstar ticker.\n \"\"\"\n result = {}\n\n ##########################\n # Error Handling\n ##########################\n\n # Empty String\n if len(ticker) == 0:\n raise ValueError(\"You did not enter a ticker symbol. Please\"\n \" try again.\")\n\n for report_type, table_name in [\n (u'is', u'income_statement'),\n (u'bs', u'balance_sheet'),\n (u'cf', u'cash_flow')]:\n frame = self._download(ticker, report_type)\n result[table_name] = frame\n if conn:\n self._upload_frame(\n frame, ticker, self._table_prefix + table_name, conn)\n if conn:\n self._upload_unit(ticker, self._table_prefix + u'unit', conn)\n result[u'period_range'] = self._period_range\n result[u'fiscal_year_end'] = self._fiscal_year_end\n result[u'currency'] = self._currency\n return result\n\n def _download(self, ticker, report_type):\n u\"\"\"Downloads and returns a pandas.DataFrame corresponding to the\n given Morningstar ticker and the given type of the report.\n\n :param ticker: Morningstar ticker.\n :param report_type: Type of the report ('is', 'bs', 'cf').\n :return pandas.DataFrame corresponding to the given Morningstar ticker\n and the given type of the report.\n \"\"\"\n url = (r'http://financials.morningstar.com/ajax/' +\n r'ReportProcess4HtmlAjax.html?&t=' + ticker +\n r'®ion=usa&culture=en-US&cur=USD' +\n r'&reportType=' + report_type + r'&period=12' +\n r'&dataType=A&order=asc&columnYear=5&rounding=3&view=raw')\n with urllib.request.urlopen(url) as response:\n json_text = response.read().decode(u'utf-8')\n\n ##############################\n # Error Handling\n ##############################\n\n # Wrong ticker\n if len(json_text)==0:\n raise ValueError(\"MorningStar cannot find the ticker symbol \"\n \"you entered or it is INVALID. Please try \"\n \"again.\")\n\n json_data = json.loads(json_text)\n result_soup = BeautifulSoup(json_data[u'result'],u'html.parser')\n return self._parse(result_soup)\n\n def _parse(self, soup):\n u\"\"\"Extracts and returns a pandas.DataFrame corresponding to the\n given parsed HTML response from financials.morningstar.com.\n\n :param soup: Parsed HTML response by BeautifulSoup.\n :return pandas.DataFrame corresponding to the given parsed HTML response\n from financials.morningstar.com.\n \"\"\"\n # Left node contains the labels.\n left = soup.find(u'div', u'left').div\n # Main node contains the (raw) data.\n main = soup.find(u'div', u'main').find(u'div', u'rf_table')\n year = main.find(u'div', {u'id': u'Year'})\n self._year_ids = [node.attrs[u'id'] for node in year]\n period_month = pd.datetime.strptime(year.div.text, u'%Y-%m').month\n self._period_range = pd.period_range(\n year.div.text, periods=len(self._year_ids),\n # freq=pd.datetools.YearEnd(month=period_month))\n freq = pd.tseries.offsets.YearEnd(month=period_month))\n unit = left.find(u'div', {u'id': u'unitsAndFiscalYear'})\n self._fiscal_year_end = int(unit.attrs[u'fyenumber'])\n self._currency = unit.attrs[u'currency']\n self._data = []\n self._label_index = 0\n self._read_labels(left)\n self._data_index = 0\n self._read_data(main)\n return pd.DataFrame(self._data,\n columns=[u'parent_index', u'title'] + list(\n self._period_range))\n\n def _read_labels(self, root_node, parent_label_index = None):\n u\"\"\"Recursively reads labels from the parsed HTML response.\n \"\"\"\n for node in root_node:\n if node.has_attr(u'class') and u'r_content' in node.attrs[u'class']:\n self._read_labels(node, self._label_index - 1)\n if (node.has_attr(u'id') and\n node.attrs[u'id'].startswith(u'label') and\n not node.attrs[u'id'].endswith(u'padding') and\n (not node.has_attr(u'style') or\n u'display:none' not in node.attrs[u'style'])):\n label_id = node.attrs[u'id'][6:]\n label_title = (node.div.attrs[u'title']\n if node.div.has_attr(u'title')\n else node.div.text)\n self._data.append({\n u'id': label_id,\n u'index': self._label_index,\n u'parent_index': (parent_label_index\n if parent_label_index is not None\n else self._label_index),\n u'title': label_title})\n self._label_index += 1\n\n def _read_data(self, root_node):\n u\"\"\"Recursively reads data from the parsed HTML response.\n \"\"\"\n for node in root_node:\n if node.has_attr(u'class') and u'r_content' in node.attrs[u'class']:\n self._read_data(node)\n if (node.has_attr(u'id') and\n node.attrs[u'id'].startswith(u'data') and\n not node.attrs[u'id'].endswith(u'padding') and\n (not node.has_attr(u'style') or\n u'display:none' not in node.attrs[u'style'])):\n data_id = node.attrs[u'id'][5:]\n while (self._data_index < len(self._data) and\n self._data[self._data_index][u'id'] != data_id):\n # In some cases we do not have data for all labels.\n self._data_index += 1\n assert(self._data_index < len(self._data) and\n self._data[self._data_index][u'id'] == data_id)\n for (i, child) in enumerate(node.children):\n try:\n value = float(child.attrs[u'rawvalue'])\n except ValueError:\n value = None\n self._data[self._data_index][\n self._period_range[i]] = value\n self._data_index += 1\n\n def _upload_frame(self, frame, ticker, table_name,\n conn):\n u\"\"\"Uploads the given pandas.DataFrame to the MySQL database.\n\n :param frame: pandas.DataFrames to be uploaded.\n :param ticker: Morningstar ticker.\n :param table_name: Name of the MySQL table.\n :param conn: MySQL connection.\n \"\"\"\n if not _db_table_exists(table_name, conn):\n _db_execute(self._get_db_create_table(table_name), conn)\n _db_execute(self._get_db_replace_values(\n ticker, frame, table_name), conn)\n\n def _upload_unit(self, ticker, table_name,\n conn):\n u\"\"\"Uploads the fiscal_year_end and the currency to the MySQL database.\n\n :param ticker: Morningstar ticker.\n :param table_name: Name of the MySQL table.\n :param conn: MySQL connection.\n \"\"\"\n if not _db_table_exists(table_name, conn):\n _db_execute(\n u'CREATE TABLE `%s` (\\n' % table_name +\n u' `ticker` varchar(50) NOT NULL\\n' +\n u' COMMENT \"Exchange:Ticker\",\\n' +\n u' `fiscal_year_end` int(10) unsigned NOT NULL\\n' +\n u' COMMENT \"Fiscal Year End Month\",\\n' +\n u' `currency` varchar(50) NOT NULL\\n' +\n u' COMMENT \"Currency\",\\n' +\n u' PRIMARY KEY USING BTREE (`ticker`))\\n' +\n u'ENGINE=MyISAM DEFAULT CHARSET=utf8', conn)\n _db_execute(\n u'REPLACE INTO `%s`\\n' % table_name +\n u' (`ticker`, `fiscal_year_end`, `currency`)\\nVALUES\\n' +\n u'(\"%s\", %d, \"%s\")' % (\n ticker, self._fiscal_year_end, self._currency), conn)\n\n @staticmethod\n def _get_db_create_table(table_name):\n u\"\"\"Returns the MySQL CREATE TABLE statement for the given table_name.\n\n :param table_name: Name of the MySQL table.\n :return MySQL CREATE TABLE statement.\n \"\"\"\n year = date.today().year\n year_range = xrange(year - 6, year + 2)\n columns = u',\\n'.join(\n [u' `year_%d` DECIMAL(20,5) DEFAULT NULL ' % year +\n u'COMMENT \"Year %d\"' % year\n for year in year_range])\n return (\n u'CREATE TABLE `%s` (\\n' % table_name +\n u' `ticker` VARCHAR(50) NOT NULL COMMENT \"Exchange:Ticker\",\\n' +\n u' `id` int(10) unsigned NOT NULL COMMENT \"Id\",\\n' +\n u' `parent_id` int(10) unsigned NOT NULL COMMENT \"Parent Id\",\\n' +\n u' `item` varchar(500) NOT NULL COMMENT \"Item\",\\n' +\n u'%s,\\n' % columns +\n u' PRIMARY KEY USING BTREE (`ticker`, `id`),\\n' +\n u' KEY `ix_ticker` USING BTREE (`ticker`))\\n' +\n u'ENGINE=MyISAM DEFAULT CHARSET=utf8')\n\n @staticmethod\n def _get_db_replace_values(ticker, frame,\n table_name):\n u\"\"\"Returns the MySQL REPLACE INTO statement for the given\n Morningstar ticker and the corresponding pandas.DataFrame.\n\n :param ticker: Morningstar ticker.\n :param frame: pandas.DataFrame.\n :param table_name: Name of the MySQL table.\n :return MySQL REPLACE INTO statement.\n \"\"\"\n columns = [u'`ticker`', u'`id`, `parent_id`, `item`'] + \\\n [u'`year_%d`' % period.year for period in\n frame.columns[2:]]\n return (\n u'REPLACE INTO `%s`\\n' % table_name +\n u' (%s)\\nVALUES\\n' % u', '.join(columns) +\n u',\\n'.join([u'(\"' + ticker + u'\", %d, %d, \"%s\", ' %\n (index, frame.ix[index, u'parent_index'],\n frame.ix[index, u'title']) +\n u', '.join(\n [u'NULL' if np.isnan(frame.ix[index, period])\n else u'%.5f' % frame.ix[index, period]\n for period in frame.columns[2:]]) + u')'\n for index in frame.index]))\n\n\ndef _db_table_exists(table_name, conn):\n u\"\"\"Helper method for checking whether the given MySQL table exists.\n\n :param table_name: Name of the MySQL table to be checked.\n :param conn: MySQL connection.\n :return True iff the given MySQL table exists.\n \"\"\"\n cursor = conn.cursor()\n cursor.execute(u\"\"\"\n SELECT COUNT(*)\n FROM information_schema.tables\n WHERE table_name = '{0}'\"\"\".format(table_name))\n table_exists = cursor.fetchone()[0] == 1\n cursor.close()\n return table_exists\n\n\ndef _db_execute(query, conn):\n u\"\"\"Helper method for executing the given MySQL non-query.\n\n :param query: MySQL query to be executed.\n :param conn: MySQL connection.\n \"\"\"\n cursor = conn.cursor()\n cursor.execute(query)\n cursor.close()\n"
] |
[
[
"numpy.isnan",
"pandas.tseries.offsets.YearEnd",
"pandas.datetime.strptime",
"pandas.DataFrame"
]
] |
kundajelab/fastISM
|
[
"1573feccba1ad5d9f1cee508f5bb03c4aa09bb2b"
] |
[
"test/test_simple_nested_architectures.py"
] |
[
"import tensorflow as tf\nimport unittest\n\nfrom context import fastISM\n\ndef conv_block(input_shape=(108,4)):\n inp = tf.keras.Input(shape=input_shape)\n x = tf.keras.layers.Conv1D(20, 3, padding='same')(inp)\n x = tf.keras.layers.MaxPooling1D(3)(x)\n x = tf.keras.layers.Conv1D(20, 5, padding='same')(x)\n x = tf.keras.layers.MaxPooling1D(3)(x)\n x = tf.keras.layers.Conv1D(20, 9, padding='same')(x)\n x = tf.keras.layers.MaxPooling1D(3)(x)\n model = tf.keras.Model(inputs=inp, outputs=x)\n return model\n\ndef res_block(input_shape=(108,20)):\n inp = tf.keras.Input(shape=input_shape)\n x = tf.keras.layers.Conv1D(20, 3, padding='same')(inp) \n x = tf.keras.layers.Add()([inp, x])\n model = tf.keras.Model(inputs=inp, outputs=x)\n return model\n\ndef doub_res_block(input_shape=(108,20)):\n inp = tf.keras.Input(shape=input_shape)\n x = res_block()(inp)\n x = res_block()(x) \n model = tf.keras.Model(inputs=inp, outputs=x)\n return model \n\ndef fc_block(input_shape=(80,)):\n inp = tf.keras.Input(shape=input_shape)\n x = tf.keras.layers.Dense(10)(inp)\n x = tf.keras.layers.Dense(1)(x)\n \n model = tf.keras.Model(inputs=inp, outputs=x)\n return model\n\ndef my_add_block(input_shape=(108,20)):\n x1 = tf.keras.Input(shape=input_shape)\n x2 = tf.keras.Input(shape=input_shape)\n y = tf.keras.layers.Add()([x1,x2])\n \n model = tf.keras.Model(inputs=[x1,x2], outputs=y)\n return model\n\ndef my_add_max_block(input_shape=(108,20)):\n x1 = tf.keras.Input(shape=input_shape)\n x2 = tf.keras.Input(shape=input_shape)\n y1 = tf.keras.layers.Add()([x1,x2])\n y2 = tf.keras.layers.Maximum()([x1,x2])\n \n model = tf.keras.Model(inputs=[x1,x2], outputs=[y1, y2])\n return model\n\n\ndef my_sub_block(input_shape=(108,20)):\n x1 = tf.keras.Input(shape=input_shape)\n x2 = tf.keras.Input(shape=input_shape)\n y = tf.keras.layers.Subtract()([x2,x1])\n \n model = tf.keras.Model(inputs=[x1,x2], outputs=y)\n return model\n\n\nclass TestSimpleSingleNestedArchitectures(unittest.TestCase):\n def test_three_conv_two_fc(self):\n # inp -> [ C -> M -> C -> M -> C -> M ] -> [ D -> D -> y ]\n convs = conv_block()\n fcs = fc_block()\n\n inp = tf.keras.Input((108, 4))\n x = convs(inp)\n x = tf.keras.layers.Flatten()(x)\n x = fcs(x)\n \n model = tf.keras.Model(inputs=inp, outputs=x)\n\n fast_ism_model = fastISM.FastISM(\n model, test_correctness=False)\n\n self.assertTrue(fast_ism_model.test_correctness())\n \n def test_conv_res_mxp_two_fc(self):\n # _________\n # ^ |\n # inp -> C [ -> C -> Add ] -> M -> [ D -> D -> y ]\n res = res_block()\n fcs = fc_block(input_shape=(36*20,))\n\n inp = tf.keras.Input((108, 4))\n x = tf.keras.layers.Conv1D(20, 3, padding='same')(inp)\n x = res(x)\n x = tf.keras.layers.MaxPooling1D(3)(x)\n x = tf.keras.layers.Flatten()(x)\n x = fcs(x)\n \n model = tf.keras.Model(inputs=inp, outputs=x)\n\n fast_ism_model = fastISM.FastISM(\n model, test_correctness=False)\n\n self.assertTrue(fast_ism_model.test_correctness())\n \n def test_conv_my_add_mxp_two_fc(self):\n # _________\n # ^ |\n # inp -> C -> C ->[ Add ] -> M -> [ D -> D -> y ]\n # testing a nested block that takes in multiple inputs\n my_add = my_add_block()\n fcs = fc_block(input_shape=(36*20,))\n\n inp = tf.keras.Input((108, 4))\n x1 = tf.keras.layers.Conv1D(20, 3, padding='same')(inp)\n x2 = tf.keras.layers.Conv1D(20, 3, padding='same')(x1)\n y = my_add([x1,x2])\n y = tf.keras.layers.MaxPooling1D(3)(y)\n y = tf.keras.layers.Flatten()(y)\n y = fcs(y)\n \n model = tf.keras.Model(inputs=inp, outputs=y)\n\n fast_ism_model = fastISM.FastISM(\n model, test_correctness=False)\n\n self.assertTrue(fast_ism_model.test_correctness())\n \n def test_conv_my_add_max_mxp_two_fc(self):\n # _________ __________\n # ^ | ^ |\n # inp -> C -> C ->[ Add/Max ] -> Add -> M -> [ D -> D -> y ]\n # testing a nested block that takes in multiple inputs\n # and returns multiple outputs\n my_add_max = my_add_max_block()\n fcs = fc_block(input_shape=(36*20,))\n\n inp = tf.keras.Input((108, 4))\n x1 = tf.keras.layers.Conv1D(20, 3, padding='same')(inp)\n x2 = tf.keras.layers.Conv1D(20, 3, padding='same')(x1)\n y1, y2 = my_add_max([x1,x2])\n y = tf.keras.layers.Add()([y1,y2])\n y = tf.keras.layers.MaxPooling1D(3)(y)\n y = tf.keras.layers.Flatten()(y)\n y = fcs(y)\n \n model = tf.keras.Model(inputs=inp, outputs=y)\n\n fast_ism_model = fastISM.FastISM(\n model, test_correctness=False)\n\n self.assertTrue(fast_ism_model.test_correctness())\n \n def test_conv_my_sub_mxp_two_fc(self):\n # TODO: fails as of now since inbound_edges does not contain\n # the correct node order\n # _________\n # ^ |\n # inp -> C -> C ->[ Sub ] -> M -> [ D -> D -> y ]\n # testing a nested block that takes in multiple inputs\n my_sub = my_sub_block()\n fcs = fc_block(input_shape=(36*20,))\n\n inp = tf.keras.Input((108, 4))\n x1 = tf.keras.layers.Conv1D(20, 3, padding='same')(inp)\n x2 = tf.keras.layers.Conv1D(20, 3, padding='same')(x1)\n y = my_sub([x1,x2])\n y = tf.keras.layers.MaxPooling1D(3)(y)\n y = tf.keras.layers.Flatten()(y)\n y = fcs(y)\n \n model = tf.keras.Model(inputs=inp, outputs=y)\n\n fast_ism_model = fastISM.FastISM(\n model, test_correctness=False)\n\n self.assertTrue(fast_ism_model.test_correctness())\n \n def test_conv_doub_res_mxp_two_fc(self):\n # _________ _________\n # ^ | ^ |\n # inp -> C [ [ -> C -> Add ] -> [ -> C -> Add ] ] -> M -> [ D -> D -> y ]\n # doub_res_block contains 2 res_blocks within it -> double nesting\n doub_res = doub_res_block()\n fcs = fc_block(input_shape=(36*20,))\n\n inp = tf.keras.Input((108, 4))\n x = tf.keras.layers.Conv1D(20, 3, padding='same')(inp)\n x = doub_res(x)\n x = tf.keras.layers.MaxPooling1D(3)(x)\n x = tf.keras.layers.Flatten()(x)\n x = fcs(x)\n \n model = tf.keras.Model(inputs=inp, outputs=x)\n\n fast_ism_model = fastISM.FastISM(\n model, test_correctness=False)\n\n self.assertTrue(fast_ism_model.test_correctness())\n\nif __name__ == '__main__':\n unittest.main()\n"
] |
[
[
"tensorflow.keras.layers.Maximum",
"tensorflow.keras.Input",
"tensorflow.keras.layers.Subtract",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv1D",
"tensorflow.keras.layers.MaxPooling1D",
"tensorflow.keras.Model",
"tensorflow.keras.layers.Add",
"tensorflow.keras.layers.Flatten"
]
] |
Sloug/h2o-3
|
[
"74f3eeee85aea3513adef1a7b519865d314a068a"
] |
[
"h2o-py/tests/pyunit_utils/utilsPY.py"
] |
[
"from __future__ import print_function\nfrom future import standard_library\nstandard_library.install_aliases()\nfrom builtins import range\nfrom past.builtins import basestring\nfrom scipy.sparse import csr_matrix\nimport sys, os\nimport pandas as pd\nfrom six import string_types\n\ntry: # works with python 2.7 not 3\n from StringIO import StringIO\nexcept: # works with python 3\n from io import StringIO\n\nsys.path.insert(1, \"../../\")\nimport h2o\nimport imp\nimport random\nimport re\nimport subprocess\nfrom subprocess import STDOUT,PIPE\nfrom h2o.utils.shared_utils import temp_ctr\nfrom h2o.model.binomial import H2OBinomialModel\nfrom h2o.model.clustering import H2OClusteringModel\nfrom h2o.model.multinomial import H2OMultinomialModel\nfrom h2o.model.ordinal import H2OOrdinalModel\nfrom h2o.model.regression import H2ORegressionModel\nfrom h2o.estimators.gbm import H2OGradientBoostingEstimator\nfrom h2o.estimators.deeplearning import H2ODeepLearningEstimator\nfrom h2o.estimators.random_forest import H2ORandomForestEstimator\nfrom h2o.estimators.glm import H2OGeneralizedLinearEstimator\nfrom h2o.estimators.kmeans import H2OKMeansEstimator\nfrom h2o.estimators.naive_bayes import H2ONaiveBayesEstimator\nfrom h2o.transforms.decomposition import H2OPCA\nfrom h2o.estimators.random_forest import H2ORandomForestEstimator\nfrom decimal import *\nimport urllib.request, urllib.error, urllib.parse\nimport numpy as np\nimport shutil\nimport string\nimport copy\nimport json\nimport math\nfrom random import shuffle\nimport scipy.special\nfrom h2o.utils.typechecks import assert_is_type\n\n\ndef check_models(model1, model2, use_cross_validation=False, op='e'):\n \"\"\"\n Check that the given models are equivalent.\n\n :param model1:\n :param model2:\n :param use_cross_validation: boolean. if True, use validation metrics to determine model equality. Otherwise, use\n training metrics.\n :param op: comparison operator to use. 'e':==, 'g':>, 'ge':>=\n :return: None. Throw meaningful error messages if the check fails\n \"\"\"\n # 1. Check model types\n model1_type = model1.__class__.__name__\n model2_type = model1.__class__.__name__\n assert model1_type is model2_type, \"The model types differ. The first model is of type {0} and the second \" \\\n \"models is of type {1}.\".format(model1_type, model2_type)\n\n # 2. Check model metrics\n if isinstance(model1,H2OBinomialModel): # 2a. Binomial\n # F1\n f1_1 = model1.F1(xval=use_cross_validation)\n f1_2 = model2.F1(xval=use_cross_validation)\n if op == 'e': assert f1_1[0][1] == f1_2[0][1], \"The first model has an F1 of {0} and the second model has an F1 of \" \\\n \"{1}. Expected the first to be == to the second.\".format(f1_1[0][1], f1_2[0][1])\n elif op == 'g': assert f1_1[0][1] > f1_2[0][1], \"The first model has an F1 of {0} and the second model has an F1 of \" \\\n \"{1}. Expected the first to be > than the second.\".format(f1_1[0][1], f1_2[0][1])\n elif op == 'ge': assert f1_1[0][1] >= f1_2[0][1], \"The first model has an F1 of {0} and the second model has an F1 of \" \\\n \"{1}. Expected the first to be >= than the second.\".format(f1_1[0][1], f1_2[0][1])\n elif isinstance(model1,H2ORegressionModel): # 2b. Regression\n # MSE\n mse1 = model1.mse(xval=use_cross_validation)\n mse2 = model2.mse(xval=use_cross_validation)\n if op == 'e': assert mse1 == mse2, \"The first model has an MSE of {0} and the second model has an MSE of \" \\\n \"{1}. Expected the first to be == to the second.\".format(mse1, mse2)\n elif op == 'g': assert mse1 > mse2, \"The first model has an MSE of {0} and the second model has an MSE of \" \\\n \"{1}. Expected the first to be > than the second.\".format(mse1, mse2)\n elif op == 'ge': assert mse1 >= mse2, \"The first model has an MSE of {0} and the second model has an MSE of \" \\\n \"{1}. Expected the first to be >= than the second.\".format(mse1, mse2)\n elif isinstance(model1,H2OMultinomialModel) or isinstance(model1,H2OOrdinalModel): # 2c. Multinomial\n # hit-ratio\n pass\n elif isinstance(model1,H2OClusteringModel): # 2d. Clustering\n # totss\n totss1 = model1.totss(xval=use_cross_validation)\n totss2 = model2.totss(xval=use_cross_validation)\n if op == 'e': assert totss1 == totss2, \"The first model has an TOTSS of {0} and the second model has an \" \\\n \"TOTSS of {1}. Expected the first to be == to the second.\".format(totss1,\n totss2)\n elif op == 'g': assert totss1 > totss2, \"The first model has an TOTSS of {0} and the second model has an \" \\\n \"TOTSS of {1}. Expected the first to be > than the second.\".format(totss1,\n totss2)\n elif op == 'ge': assert totss1 >= totss2, \"The first model has an TOTSS of {0} and the second model has an \" \\\n \"TOTSS of {1}. Expected the first to be >= than the second.\" \\\n \"\".format(totss1, totss2)\n\ndef check_dims_values(python_obj, h2o_frame, rows, cols, dim_only=False):\n \"\"\"\n Check that the dimensions and values of the python object and H2OFrame are equivalent. Assumes that the python\n object conforms to the rules specified in the h2o frame documentation.\n\n :param python_obj: a (nested) list, tuple, dictionary, numpy.ndarray, ,or pandas.DataFrame\n :param h2o_frame: an H2OFrame\n :param rows: number of rows\n :param cols: number of columns\n :param dim_only: check the dimensions only\n :return: None\n \"\"\"\n h2o_rows, h2o_cols = h2o_frame.dim\n assert h2o_rows == rows and h2o_cols == cols, \"failed dim check! h2o_rows:{0} rows:{1} h2o_cols:{2} cols:{3}\" \\\n \"\".format(h2o_rows, rows, h2o_cols, cols)\n if not dim_only:\n if isinstance(python_obj, (list, tuple)):\n for c in range(cols):\n for r in range(rows):\n pval = python_obj[r]\n if isinstance(pval, (list, tuple)): pval = pval[c]\n hval = h2o_frame[r, c]\n assert pval == hval or abs(pval - hval) < 1e-10, \\\n \"expected H2OFrame to have the same values as the python object for row {0} \" \\\n \"and column {1}, but h2o got {2} and python got {3}.\".format(r, c, hval, pval)\n elif isinstance(python_obj, dict):\n for r in range(rows):\n for k in list(python_obj.keys()):\n pval = python_obj[k][r] if hasattr(python_obj[k],'__iter__') else python_obj[k]\n hval = h2o_frame[r,k]\n assert pval == hval, \"expected H2OFrame to have the same values as the python object for row {0} \" \\\n \"and column {1}, but h2o got {2} and python got {3}.\".format(r, k, hval, pval)\n\ndef np_comparison_check(h2o_data, np_data, num_elements):\n \"\"\"\n Check values achieved by h2o against values achieved by numpy\n\n :param h2o_data: an H2OFrame or H2OVec\n :param np_data: a numpy array\n :param num_elements: number of elements to compare\n :return: None\n \"\"\"\n # Check for numpy\n try:\n imp.find_module('numpy')\n except ImportError:\n assert False, \"failed comparison check because unable to import numpy\"\n\n import numpy as np\n rows, cols = h2o_data.dim\n for i in range(num_elements):\n r = random.randint(0,rows-1)\n c = random.randint(0,cols-1)\n h2o_val = h2o_data[r,c]\n np_val = np_data[r,c] if len(np_data.shape) > 1 else np_data[r]\n if isinstance(np_val, np.bool_): np_val = bool(np_val) # numpy haz special bool type :(\n assert np.absolute(h2o_val - np_val) < 1e-5, \\\n \"failed comparison check! h2o computed {0} and numpy computed {1}\".format(h2o_val, np_val)\n\n # perform h2o predict and mojo predict. Frames containing h2o prediction is returned and mojo predict are\n# returned.\n\ndef mojo_predict(model, tmpdir, mojoname, glrmReconstruct=False, get_leaf_node_assignment=False):\n \"\"\"\n perform h2o predict and mojo predict. Frames containing h2o prediction is returned and mojo predict are returned.\n It is assumed that the input data set is saved as in.csv in tmpdir directory.\n\n :param model: h2o model where you want to use to perform prediction\n :param tmpdir: directory where your mojo zip files are stired\n :param mojoname: name of your mojo zip file.\n :param glrmReconstruct: True to return reconstructed dataset, else return the x factor.\n :return: the h2o prediction frame and the mojo prediction frame\n \"\"\"\n newTest = h2o.import_file(os.path.join(tmpdir, 'in.csv'), header=1) # Make sure h2o and mojo use same in.csv\n predict_h2o = model.predict(newTest)\n\n # load mojo and have it do predict\n outFileName = os.path.join(tmpdir, 'out_mojo.csv')\n mojoZip = os.path.join(tmpdir, mojoname) + \".zip\"\n genJarDir = str.split(str(tmpdir),'/')\n genJarDir = '/'.join(genJarDir[0:genJarDir.index('h2o-py')]) # locate directory of genmodel.jar\n\n java_cmd = [\"java\", \"-ea\", \"-cp\", os.path.join(genJarDir, \"h2o-assemblies/genmodel/build/libs/genmodel.jar\"),\n \"-Xmx12g\", \"-XX:MaxPermSize=2g\", \"-XX:ReservedCodeCacheSize=256m\", \"hex.genmodel.tools.PredictCsv\",\n \"--input\", os.path.join(tmpdir, 'in.csv'), \"--output\",\n outFileName, \"--mojo\", mojoZip, \"--decimal\"]\n if get_leaf_node_assignment:\n java_cmd.append(\"--leafNodeAssignment\")\n predict_h2o = model.predict_leaf_node_assignment(newTest)\n\n if glrmReconstruct: # used for GLRM to grab the x coefficients (factors) instead of the predicted values\n java_cmd.append(\"--glrmReconstruct\")\n\n p = subprocess.Popen(java_cmd, stdout=PIPE, stderr=STDOUT)\n o, e = p.communicate()\n pred_mojo = h2o.import_file(os.path.join(tmpdir, 'out_mojo.csv'), header=1) # load mojo prediction into a frame and compare\n if glrmReconstruct or ('glrm' not in model.algo):\n return predict_h2o, pred_mojo\n else:\n return newTest.frame_id, pred_mojo\n\n# perform pojo predict. Frame containing pojo predict is returned.\ndef pojo_predict(model, tmpdir, pojoname):\n h2o.download_pojo(model, path=tmpdir)\n h2o_genmodel_jar = os.path.join(tmpdir, \"h2o-genmodel.jar\")\n java_file = os.path.join(tmpdir, pojoname + \".java\")\n\n in_csv = (os.path.join(tmpdir, 'in.csv')) # import the test dataset\n print(\"Compiling Java Pojo\")\n javac_cmd = [\"javac\", \"-cp\", h2o_genmodel_jar, \"-J-Xmx12g\", java_file]\n subprocess.check_call(javac_cmd)\n\n out_pojo_csv = os.path.join(tmpdir, \"out_pojo.csv\")\n cp_sep = \";\" if sys.platform == \"win32\" else \":\"\n java_cmd = [\"java\", \"-ea\", \"-cp\", h2o_genmodel_jar + cp_sep + tmpdir, \"-Xmx12g\",\n \"-XX:ReservedCodeCacheSize=256m\", \"hex.genmodel.tools.PredictCsv\",\n \"--pojo\", pojoname, \"--input\", in_csv, \"--output\", out_pojo_csv, \"--decimal\"]\n\n p = subprocess.Popen(java_cmd, stdout=PIPE, stderr=STDOUT)\n o, e = p.communicate()\n print(\"Java output: {0}\".format(o))\n assert os.path.exists(out_pojo_csv), \"Expected file {0} to exist, but it does not.\".format(out_pojo_csv)\n predict_pojo = h2o.import_file(out_pojo_csv, header=1)\n return predict_pojo\n\ndef javapredict(algo, equality, train, test, x, y, compile_only=False, separator=\",\", setInvNumNA=False,**kwargs):\n print(\"Creating model in H2O\")\n if algo == \"gbm\": model = H2OGradientBoostingEstimator(**kwargs)\n elif algo == \"random_forest\": model = H2ORandomForestEstimator(**kwargs)\n elif algo == \"deeplearning\": model = H2ODeepLearningEstimator(**kwargs)\n elif algo == \"glm\": model = H2OGeneralizedLinearEstimator(**kwargs)\n elif algo == \"naive_bayes\": model = H2ONaiveBayesEstimator(**kwargs)\n elif algo == \"kmeans\": model = H2OKMeansEstimator(**kwargs)\n elif algo == \"pca\": model = H2OPCA(**kwargs)\n else: raise ValueError\n if algo == \"kmeans\" or algo == \"pca\": model.train(x=x, training_frame=train)\n else: model.train(x=x, y=y, training_frame=train)\n print(model)\n\n # HACK: munge model._id so that it conforms to Java class name. For example, change K-means to K_means.\n # TODO: clients should extract Java class name from header.\n regex = re.compile(\"[+\\\\-* !@#$%^&()={}\\\\[\\\\]|;:'\\\"<>,.?/]\")\n pojoname = regex.sub(\"_\", model._id)\n\n print(\"Downloading Java prediction model code from H2O\")\n tmpdir = os.path.normpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), \"..\", \"results\", pojoname))\n os.makedirs(tmpdir)\n h2o.download_pojo(model, path=tmpdir)\n h2o_genmodel_jar = os.path.join(tmpdir, \"h2o-genmodel.jar\")\n assert os.path.exists(h2o_genmodel_jar), \"Expected file {0} to exist, but it does not.\".format(h2o_genmodel_jar)\n print(\"h2o-genmodel.jar saved in {0}\".format(h2o_genmodel_jar))\n java_file = os.path.join(tmpdir, pojoname + \".java\")\n assert os.path.exists(java_file), \"Expected file {0} to exist, but it does not.\".format(java_file)\n print(\"java code saved in {0}\".format(java_file))\n\n print(\"Compiling Java Pojo\")\n javac_cmd = [\"javac\", \"-cp\", h2o_genmodel_jar, \"-J-Xmx12g\", \"-J-XX:MaxPermSize=256m\", java_file]\n subprocess.check_call(javac_cmd)\n\n if not compile_only:\n print(\"Predicting in H2O\")\n predictions = model.predict(test)\n predictions.summary()\n predictions.head()\n out_h2o_csv = os.path.join(tmpdir, \"out_h2o.csv\")\n h2o.download_csv(predictions, out_h2o_csv)\n assert os.path.exists(out_h2o_csv), \"Expected file {0} to exist, but it does not.\".format(out_h2o_csv)\n print(\"H2O Predictions saved in {0}\".format(out_h2o_csv))\n\n print(\"Setting up for Java POJO\")\n in_csv = os.path.join(tmpdir, \"in.csv\")\n h2o.download_csv(test[x], in_csv)\n\n # hack: the PredictCsv driver can't handle quoted strings, so remove them\n f = open(in_csv, \"r+\")\n csv = f.read()\n csv = re.sub('\\\"', \"\", csv)\n csv = re.sub(\",\", separator, csv) # replace with arbitrary separator for input dataset\n f.seek(0)\n f.write(csv)\n f.truncate()\n f.close()\n assert os.path.exists(in_csv), \"Expected file {0} to exist, but it does not.\".format(in_csv)\n print(\"Input CSV to PredictCsv saved in {0}\".format(in_csv))\n\n print(\"Running PredictCsv Java Program\")\n out_pojo_csv = os.path.join(tmpdir, \"out_pojo.csv\")\n cp_sep = \";\" if sys.platform == \"win32\" else \":\"\n java_cmd = [\"java\", \"-ea\", \"-cp\", h2o_genmodel_jar + cp_sep + tmpdir, \"-Xmx12g\", \"-XX:MaxPermSize=2g\",\n \"-XX:ReservedCodeCacheSize=256m\", \"hex.genmodel.tools.PredictCsv\",\n \"--pojo\", pojoname, \"--input\", in_csv, \"--output\", out_pojo_csv, \"--separator\", separator]\n if setInvNumNA:\n java_cmd.append(\"--setConvertInvalidNum\")\n p = subprocess.Popen(java_cmd, stdout=PIPE, stderr=STDOUT)\n o, e = p.communicate()\n print(\"Java output: {0}\".format(o))\n assert os.path.exists(out_pojo_csv), \"Expected file {0} to exist, but it does not.\".format(out_pojo_csv)\n predictions2 = h2o.upload_file(path=out_pojo_csv)\n print(\"Pojo predictions saved in {0}\".format(out_pojo_csv))\n\n print(\"Comparing predictions between H2O and Java POJO\")\n # Dimensions\n hr, hc = predictions.dim\n pr, pc = predictions2.dim\n assert hr == pr, \"Expected the same number of rows, but got {0} and {1}\".format(hr, pr)\n assert hc == pc, \"Expected the same number of cols, but got {0} and {1}\".format(hc, pc)\n\n # Value\n for r in range(hr):\n hp = predictions[r, 0]\n if equality == \"numeric\":\n pp = float.fromhex(predictions2[r, 0])\n assert abs(hp - pp) < 1e-4, \\\n \"Expected predictions to be the same (within 1e-4) for row %d, but got %r and %r\" % (r, hp, pp)\n elif equality == \"class\":\n pp = predictions2[r, 0]\n assert hp == pp, \"Expected predictions to be the same for row %d, but got %r and %r\" % (r, hp, pp)\n else:\n raise ValueError\n\ndef javamunge(assembly, pojoname, test, compile_only=False):\n \"\"\"\n Here's how to use:\n assembly is an already fit H2OAssembly;\n The test set should be used to compare the output here and the output of the POJO.\n \"\"\"\n print(\"Downloading munging POJO code from H2O\")\n tmpdir = os.path.normpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), \"..\", \"results\", pojoname))\n os.makedirs(tmpdir)\n assembly.to_pojo(pojoname, path=tmpdir, get_jar=True)\n h2o_genmodel_jar = os.path.join(tmpdir, \"h2o-genmodel.jar\")\n assert os.path.exists(h2o_genmodel_jar), \"Expected file {0} to exist, but it does not.\".format(h2o_genmodel_jar)\n print(\"h2o-genmodel.jar saved in {0}\".format(h2o_genmodel_jar))\n java_file = os.path.join(tmpdir, pojoname + \".java\")\n assert os.path.exists(java_file), \"Expected file {0} to exist, but it does not.\".format(java_file)\n print(\"java code saved in {0}\".format(java_file))\n\n print(\"Compiling Java Pojo\")\n javac_cmd = [\"javac\", \"-cp\", h2o_genmodel_jar, \"-J-Xmx12g\", \"-J-XX:MaxPermSize=256m\", java_file]\n subprocess.check_call(javac_cmd)\n\n if not compile_only:\n\n print(\"Setting up for Java POJO\")\n in_csv = os.path.join(tmpdir, \"in.csv\")\n h2o.download_csv(test, in_csv)\n assert os.path.exists(in_csv), \"Expected file {0} to exist, but it does not.\".format(in_csv)\n print(\"Input CSV to mungedCSV saved in {0}\".format(in_csv))\n\n print(\"Predicting in H2O\")\n munged = assembly.fit(test)\n munged.head()\n out_h2o_csv = os.path.join(tmpdir, \"out_h2o.csv\")\n h2o.download_csv(munged, out_h2o_csv)\n assert os.path.exists(out_h2o_csv), \"Expected file {0} to exist, but it does not.\".format(out_h2o_csv)\n print(\"Munged frame saved in {0}\".format(out_h2o_csv))\n\n print(\"Running PredictCsv Java Program\")\n out_pojo_csv = os.path.join(tmpdir, \"out_pojo.csv\")\n cp_sep = \";\" if sys.platform == \"win32\" else \":\"\n java_cmd = [\"java\", \"-ea\", \"-cp\", h2o_genmodel_jar + cp_sep + tmpdir, \"-Xmx12g\", \"-XX:MaxPermSize=2g\",\n \"-XX:ReservedCodeCacheSize=256m\", \"hex.genmodel.tools.MungeCsv\", \"--header\", \"--munger\", pojoname,\n \"--input\", in_csv, \"--output\", out_pojo_csv]\n print(\"JAVA COMMAND: \" + \" \".join(java_cmd))\n p = subprocess.Popen(java_cmd, stdout=PIPE, stderr=STDOUT)\n o, e = p.communicate()\n print(\"Java output: {0}\".format(o))\n assert os.path.exists(out_pojo_csv), \"Expected file {0} to exist, but it does not.\".format(out_pojo_csv)\n munged2 = h2o.upload_file(path=out_pojo_csv, col_types=test.types)\n print(\"Pojo predictions saved in {0}\".format(out_pojo_csv))\n\n print(\"Comparing predictions between H2O and Java POJO\")\n # Dimensions\n hr, hc = munged.dim\n pr, pc = munged2.dim\n assert hr == pr, \"Expected the same number of rows, but got {0} and {1}\".format(hr, pr)\n assert hc == pc, \"Expected the same number of cols, but got {0} and {1}\".format(hc, pc)\n\n # Value\n import math\n import numbers\n munged.show()\n munged2.show()\n for r in range(hr):\n for c in range(hc):\n hp = munged[r,c]\n pp = munged2[r,c]\n if isinstance(hp, numbers.Number):\n assert isinstance(pp, numbers.Number)\n assert (math.fabs(hp-pp) < 1e-8) or (math.isnan(hp) and math.isnan(pp)), \"Expected munged rows to be the same for row {0}, but got {1}, and {2}\".format(r, hp, pp)\n else:\n assert hp==pp, \"Expected munged rows to be the same for row {0}, but got {1}, and {2}\".format(r, hp, pp)\n\ndef locate(path):\n \"\"\"\n Search for a relative path and turn it into an absolute path.\n This is handy when hunting for data files to be passed into h2o and used by import file.\n Note: This function is for unit testing purposes only.\n\n Parameters\n ----------\n path : str\n Path to search for\n\n :return: Absolute path if it is found. None otherwise.\n \"\"\"\n if (test_is_on_hadoop()):\n # Jenkins jobs create symbolic links to smalldata and bigdata on the machine that starts the test. However,\n # in an h2o multinode hadoop cluster scenario, the clustered machines don't know about the symbolic link.\n # Consequently, `locate` needs to return the actual path to the data on the clustered machines. ALL jenkins\n # machines store smalldata and bigdata in /home/0xdiag/. If ON.HADOOP is set by the run.py, the path arg MUST\n # be an immediate subdirectory of /home/0xdiag/. Moreover, the only guaranteed subdirectories of /home/0xdiag/\n # are smalldata and bigdata.\n p = os.path.realpath(os.path.join(\"/home/0xdiag/\", path))\n if not os.path.exists(p): raise ValueError(\"File not found: \" + path)\n return p\n else:\n tmp_dir = os.path.realpath(os.getcwd())\n possible_result = os.path.join(tmp_dir, path)\n while (True):\n if (os.path.exists(possible_result)):\n return possible_result\n\n next_tmp_dir = os.path.dirname(tmp_dir)\n if (next_tmp_dir == tmp_dir):\n raise ValueError(\"File not found: \" + path)\n\n tmp_dir = next_tmp_dir\n possible_result = os.path.join(tmp_dir, path)\n\ndef hadoop_namenode_is_accessible():\n url = \"http://{0}:50070\".format(hadoop_namenode())\n try:\n urllib.urlopen(url)\n internal = True\n except:\n internal = False\n return internal\n\ndef test_is_on_hadoop():\n if hasattr(sys.modules[\"tests.pyunit_utils\"], '__on_hadoop__'):\n return sys.modules[\"tests.pyunit_utils\"].__on_hadoop__\n return False\n\ndef hadoop_namenode():\n if os.getenv(\"NAME_NODE\"):\n return os.getenv(\"NAME_NODE\").split(\".\")[0]\n elif hasattr(sys.modules[\"tests.pyunit_utils\"], '__hadoop_namenode__'):\n return sys.modules[\"tests.pyunit_utils\"].__hadoop_namenode__\n return None\n\ndef pyunit_exec(test_name):\n with open(test_name, \"r\") as t: pyunit = t.read()\n pyunit_c = compile(pyunit, os.path.abspath(test_name), 'exec')\n exec(pyunit_c, {})\n\ndef standalone_test(test):\n if not h2o.h2o.connection():\n h2o.init(strict_version_check=False)\n\n h2o.remove_all()\n\n h2o.log_and_echo(\"------------------------------------------------------------\")\n h2o.log_and_echo(\"\")\n h2o.log_and_echo(\"STARTING TEST\")\n h2o.log_and_echo(\"\")\n h2o.log_and_echo(\"------------------------------------------------------------\")\n test()\n\ndef make_random_grid_space(algo, ncols=None, nrows=None):\n \"\"\"\n Construct a dictionary of the form {gbm_parameter:list_of_values, ...}, which will eventually be passed to\n H2OGridSearch to build a grid object. The gbm parameters, and their associated values, are randomly selected.\n :param algo: a string {\"gbm\", \"rf\", \"dl\", \"km\", \"glm\"} representing the algo dimension of the grid space\n :param ncols: Used for mtries selection or k (pca)\n :param nrows: Used for k (pca)\n :return: a dictionary of parameter_name:list_of_values\n \"\"\"\n grid_space = {}\n if algo in [\"gbm\", \"rf\"]:\n if random.randint(0,1): grid_space['ntrees'] = random.sample(list(range(1,6)),random.randint(2,3))\n if random.randint(0,1): grid_space['max_depth'] = random.sample(list(range(1,6)),random.randint(2,3))\n if random.randint(0,1): grid_space['min_rows'] = random.sample(list(range(1,11)),random.randint(2,3))\n if random.randint(0,1): grid_space['nbins'] = random.sample(list(range(2,21)),random.randint(2,3))\n if random.randint(0,1): grid_space['nbins_cats'] = random.sample(list(range(2,1025)),random.randint(2,3))\n\n if algo == \"gbm\":\n if random.randint(0,1): grid_space['learn_rate'] = [random.random() for _ in range(random.randint(2,3))]\n grid_space['distribution'] = random.sample(['bernoulli', 'multinomial', 'gaussian', 'poisson', 'tweedie', 'gamma'], 1)\n if algo == \"rf\":\n if random.randint(0,1): grid_space['mtries'] = random.sample(list(range(1,ncols+1)),random.randint(2,3))\n if random.randint(0,1): grid_space['sample_rate'] = [random.random() for r in range(random.randint(2,3))]\n elif algo == \"km\":\n grid_space['k'] = random.sample(list(range(1,10)),random.randint(2,3))\n if random.randint(0,1): grid_space['max_iterations'] = random.sample(list(range(1,1000)),random.randint(2,3))\n if random.randint(0,1): grid_space['standardize'] = [True, False]\n if random.randint(0,1): grid_space['seed'] = random.sample(list(range(1,1000)),random.randint(2,3))\n if random.randint(0,1): grid_space['init'] = random.sample(['Random','PlusPlus','Furthest'],random.randint(2,3))\n elif algo == \"glm\":\n if random.randint(0,1): grid_space['alpha'] = [random.random() for r in range(random.randint(2,3))]\n grid_space['family'] = random.sample(['binomial','gaussian','poisson','tweedie','gamma'], 1)\n if grid_space['family'] == \"tweedie\":\n if random.randint(0,1):\n grid_space['tweedie_variance_power'] = [round(random.random()+1,6) for r in range(random.randint(2,3))]\n grid_space['tweedie_link_power'] = 1 - grid_space['tweedie_variance_power']\n elif algo == \"dl\":\n if random.randint(0,1): grid_space['activation'] = \\\n random.sample([\"Rectifier\", \"Tanh\", \"TanhWithDropout\", \"RectifierWithDropout\", \"MaxoutWithDropout\"],\n random.randint(2,3))\n if random.randint(0,1): grid_space['l2'] = [0.001*random.random() for r in range(random.randint(2,3))]\n grid_space['distribution'] = random.sample(['bernoulli','multinomial','gaussian','poisson','tweedie','gamma'],1)\n return grid_space\n elif algo == \"naiveBayes\":\n grid_space['laplace'] = 0\n if random.randint(0,1): grid_space['laplace'] = [round(random.random() + r, 6) for r in random.sample(list(range(0,11)), random.randint(2,3))]\n if random.randint(0,1): grid_space['min_sdev'] = [round(random.random(),6) for r in range(random.randint(2,3))]\n if random.randint(0,1): grid_space['eps_sdev'] = [round(random.random(),6) for r in range(random.randint(2,3))]\n elif algo == \"pca\":\n if random.randint(0,1): grid_space['max_iterations'] = random.sample(list(range(1,1000)),random.randint(2,3))\n if random.randint(0,1): grid_space['transform'] = random.sample([\"NONE\",\"STANDARDIZE\",\"NORMALIZE\",\"DEMEAN\",\"DESCALE\"], random.randint(2,3))\n grid_space['k'] = random.sample(list(range(1,min(ncols,nrows))),random.randint(2,3))\n else:\n raise ValueError\n return grid_space\n\n# Validate given models' parameters against expected values\ndef expect_model_param(models, attribute_name, expected_values):\n print(\"param: {0}\".format(attribute_name))\n actual_values = list(set([m.params[attribute_name]['actual'] \\\n if type(m.params[attribute_name]['actual']) != list\n else m.params[attribute_name]['actual'][0] for m in models.models]))\n # possible for actual to be a list (GLM)\n if type(expected_values) != list:\n expected_values = [expected_values]\n # limit precision. Rounding happens in some models like RF\n actual_values = [x if isinstance(x,basestring) else round(float(x),5) for x in actual_values]\n expected_values = [x if isinstance(x,basestring) else round(float(x),5) for x in expected_values]\n print(\"actual values: {0}\".format(actual_values))\n print(\"expected values: {0}\".format(expected_values))\n actual_values_len = len(actual_values)\n expected_values_len = len(expected_values)\n assert actual_values_len == expected_values_len, \"Expected values len: {0}. Actual values len: \" \\\n \"{1}\".format(expected_values_len, actual_values_len)\n actual_values = sorted(actual_values)\n expected_values = sorted(expected_values)\n for i in range(len(actual_values)):\n if isinstance(actual_values[i], float):\n assert abs(actual_values[i]-expected_values[i]) < 1.1e-5, \"Too large of a difference betewen actual and \" \\\n \"expected value. Actual value: {}. Expected value: {}\"\\\n .format(actual_values[i], expected_values[i])\n else:\n assert actual_values[i] == expected_values[i], \"Expected: {}. Actual: {}\"\\\n .format(expected_values[i], actual_values[i])\n\n\ndef rest_ctr():\n return h2o.connection().requests_count\n\n\ndef write_syn_floating_point_dataset_glm(csv_training_data_filename, csv_validation_data_filename,\n csv_test_data_filename, csv_weight_name, row_count, col_count, data_type,\n max_p_value, min_p_value, max_w_value, min_w_value, noise_std, family_type,\n valid_row_count, test_row_count, class_number=2,\n class_method=('probability', 'probability', 'probability'),\n class_margin=[0.0, 0.0, 0.0]):\n \"\"\"\n Generate random data sets to test the GLM algo using the following steps:\n 1. randomly generate the intercept and weight vector;\n 2. generate a set of predictors X;\n 3. generate the corresponding response y using the formula: y = w^T x+b+e where T is transpose, e is a random\n Gaussian noise added. For the Binomial family, the relationship between the response Y and predictor vector X\n is assumed to be Prob(Y = 1|X) = exp(W^T * X + e)/(1+exp(W^T * X + e)). For the Multinomial family, the\n relationship between the response Y (K possible classes) and predictor vector X is assumed to be\n Prob(Y = c|X) = exp(Wc^T * X + e)/(sum k=0 to K-1 (ep(Wk^T *X+e))\n\n :param csv_training_data_filename: string representing full path filename to store training data set. Set to\n null string if no training data set is to be generated.\n :param csv_validation_data_filename: string representing full path filename to store validation data set. Set to\n null string if no validation data set is to be generated.\n :param csv_test_data_filename: string representing full path filename to store test data set. Set to null string if\n no test data set is to be generated.\n :param csv_weight_name: string representing full path filename to store intercept and weight used to generate\n all data sets.\n :param row_count: integer representing number of samples (predictor, response) in training data set\n :param col_count: integer representing the number of predictors in the data set\n :param data_type: integer representing the type of predictors or weights (1: integers, 2: real)\n :param max_p_value: integer representing maximum predictor values\n :param min_p_value: integer representing minimum predictor values\n :param max_w_value: integer representing maximum intercept/weight values\n :param min_w_value: integer representing minimum intercept/weight values\n :param noise_std: Gaussian noise standard deviation used to generate noise e to add to response\n :param family_type: string represents the various distribution families (gaussian, multinomial, binomial) supported\n by our GLM algo\n :param valid_row_count: integer representing number of samples (predictor, response) in validation data set\n :param test_row_count: integer representing number of samples (predictor, response) in test data set\n :param class_number: integer, optional, representing number of classes for binomial and multinomial\n :param class_method: string tuple, optional, describing how we derive the final response from the class\n probabilities generated for binomial and multinomial family_type for training/validation/test data set respectively.\n If set to 'probability', response y is generated randomly according to the class probabilities calculated. If set\n to 'threshold', response y is set to the class with the maximum class probability if the maximum class probability\n exceeds the second highest class probability by the value set in margin. If the maximum class probability fails\n to be greater by the margin than the second highest class probability, the data sample is discarded.\n :param class_margin: float tuple, optional, denotes the threshold by how much the maximum class probability has to\n exceed the second highest class probability in order for us to keep the data sample for\n training/validation/test data set respectively. This field is only meaningful if class_method is set to\n 'threshold'\n\n :return: None\n \"\"\"\n # generate bias b and weight as a column vector\n weights = generate_weights_glm(csv_weight_name, col_count, data_type, min_w_value, max_w_value,\n family_type=family_type, class_number=class_number)\n\n # generate training data set\n if len(csv_training_data_filename) > 0:\n generate_training_set_glm(csv_training_data_filename, row_count, col_count, min_p_value, max_p_value, data_type,\n family_type, noise_std, weights,\n class_method=class_method[0], class_margin=class_margin[0], weightChange=True)\n\n # generate validation data set\n if len(csv_validation_data_filename) > 0:\n generate_training_set_glm(csv_validation_data_filename, valid_row_count, col_count, min_p_value, max_p_value,\n data_type, family_type, noise_std, weights,\n class_method=class_method[1], class_margin=class_margin[1])\n # generate test data set\n if len(csv_test_data_filename) > 0:\n generate_training_set_glm(csv_test_data_filename, test_row_count, col_count, min_p_value, max_p_value,\n data_type, family_type, noise_std, weights,\n class_method=class_method[2], class_margin=class_margin[2])\n\n\ndef write_syn_mixed_dataset_glm(csv_training_data_filename, csv_training_data_filename_true_one_hot,\n csv_validation_data_filename, csv_validation_filename_true_one_hot,\n csv_test_data_filename, csv_test_filename_true_one_hot, csv_weight_filename, row_count,\n col_count, max_p_value, min_p_value, max_w_value, min_w_value, noise_std, family_type,\n valid_row_count, test_row_count, enum_col, enum_level_vec, class_number=2,\n class_method=['probability', 'probability', 'probability'],\n class_margin=[0.0, 0.0, 0.0]):\n \"\"\"\n This function differs from write_syn_floating_point_dataset_glm in one small point. The predictors in this case\n contains categorical data as well as real data.\n\n Generate random data sets to test the GLM algo using the following steps:\n 1. randomly generate the intercept and weight vector;\n 2. generate a set of predictors X;\n 3. generate the corresponding response y using the formula: y = w^T x+b+e where T is transpose, e is a random\n Gaussian noise added. For the Binomial family, the relationship between the response Y and predictor vector X\n is assumed to be Prob(Y = 1|X) = exp(W^T * X + e)/(1+exp(W^T * X + e)). For the Multinomial family, the\n relationship between the response Y (K possible classes) and predictor vector X is assumed to be\n Prob(Y = c|X) = exp(Wc^T * X + e)/(sum k=0 to K-1 (ep(Wk^T *X+e))\n\n\n :param csv_training_data_filename: string representing full path filename to store training data set. Set to null\n string if no training data set is to be generated.\n :param csv_training_data_filename_true_one_hot: string representing full path filename to store training data set\n with true one-hot encoding. Set to null string if no training data set is to be generated.\n :param csv_validation_data_filename: string representing full path filename to store validation data set. Set to\n null string if no validation data set is to be generated.\n :param csv_validation_filename_true_one_hot: string representing full path filename to store validation data set\n with true one-hot. Set to null string if no validation data set is to be generated.\n :param csv_test_data_filename: string representing full path filename to store test data set. Set to null\n string if no test data set is to be generated.\n :param csv_test_filename_true_one_hot: string representing full path filename to store test data set with true\n one-hot encoding. Set to null string if no test data set is to be generated.\n :param csv_weight_filename: string representing full path filename to store intercept and weight used to generate\n all data sets.\n :param row_count: integer representing number of samples (predictor, response) in training data set\n :param col_count: integer representing the number of predictors in the data set\n :param max_p_value: integer representing maximum predictor values\n :param min_p_value: integer representing minimum predictor values\n :param max_w_value: integer representing maximum intercept/weight values\n :param min_w_value: integer representing minimum intercept/weight values\n :param noise_std: Gaussian noise standard deviation used to generate noise e to add to response\n :param family_type: string represents the various distribution families (gaussian, multinomial, binomial) supported\n by our GLM algo\n :param valid_row_count: integer representing number of samples (predictor, response) in validation data set\n :param test_row_count: integer representing number of samples (predictor, response) in test data set\n :param enum_col: integer representing actual number of categorical columns in data set\n :param enum_level_vec: vector containing maximum integer value for each categorical column\n :param class_number: integer, optional, representing number classes for binomial and multinomial\n :param class_method: string tuple, optional, describing how we derive the final response from the class\n probabilities generated for binomial and multinomial family_type for training/validation/test data set respectively.\n If set to 'probability', response y is generated randomly according to the class probabilities calculated. If set\n to 'threshold', response y is set to the class with the maximum class probability if the maximum class probability\n exceeds the second highest class probability by the value set in margin. If the maximum class probability fails\n to be greater by margin than the second highest class probability, the data sample is discarded.\n :param class_margin: float tuple, optional, denotes the threshold by how much the maximum class probability has to\n exceed the second highest class probability by in order for us to keep the data sample for\n training/validation/test data set respectively. This field is only meaningful if class_method is set to\n 'threshold'\n\n :return: None\n \"\"\"\n # add column count of encoded categorical predictors, if maximum value for enum is 3, it has 4 levels.\n # hence 4 bits are used to encode it with true one hot encoding. That is why we are adding 1 bit per\n # categorical columns added to our predictors\n new_col_count = col_count - enum_col + sum(enum_level_vec) + enum_level_vec.shape[0]\n\n # generate the weights to be applied to the training/validation/test data sets\n # this is for true one hot encoding. For reference+one hot encoding, will skip\n # few extra weights\n weights = generate_weights_glm(csv_weight_filename, new_col_count, 2, min_w_value, max_w_value,\n family_type=family_type, class_number=class_number)\n\n # generate training data set\n if len(csv_training_data_filename) > 0:\n generate_training_set_mixed_glm(csv_training_data_filename, csv_training_data_filename_true_one_hot, row_count,\n col_count, min_p_value, max_p_value, family_type, noise_std, weights, enum_col,\n enum_level_vec, class_number=class_number,\n class_method=class_method[0], class_margin=class_margin[0], weightChange=True)\n\n # generate validation data set\n if len(csv_validation_data_filename) > 0:\n generate_training_set_mixed_glm(csv_validation_data_filename, csv_validation_filename_true_one_hot,\n valid_row_count, col_count, min_p_value, max_p_value, family_type, noise_std,\n weights, enum_col, enum_level_vec, class_number=class_number,\n class_method=class_method[1], class_margin=class_margin[1])\n # generate test data set\n if len(csv_test_data_filename) > 0:\n generate_training_set_mixed_glm(csv_test_data_filename, csv_test_filename_true_one_hot, test_row_count,\n col_count, min_p_value, max_p_value, family_type, noise_std, weights, enum_col,\n enum_level_vec, class_number=class_number,\n class_method=class_method[2], class_margin=class_margin[2])\n\n\ndef generate_weights_glm(csv_weight_filename, col_count, data_type, min_w_value, max_w_value, family_type='gaussian',\n class_number=2):\n \"\"\"\n Generate random intercept and weight vectors (integer or real) for GLM algo and save\n the values in a file specified by csv_weight_filename.\n\n :param csv_weight_filename: string representing full path filename to store intercept and weight used to generate\n all data set\n :param col_count: integer representing the number of predictors in the data set\n :param data_type: integer representing the type of predictors or weights (1: integers, 2: real)\n :param max_w_value: integer representing maximum intercept/weight values\n :param min_w_value: integer representing minimum intercept/weight values\n :param family_type: string ,optional, represents the various distribution families (gaussian, multinomial, binomial)\n supported by our GLM algo\n :param class_number: integer, optional, representing number classes for binomial and multinomial\n\n :return: column vector of size 1+colCount representing intercept and weight or matrix of size\n 1+colCount by class_number\n \"\"\"\n\n # first generate random intercept and weight\n if 'gaussian' in family_type.lower():\n if data_type == 1: # generate random integer intercept/weight\n weight = np.random.random_integers(min_w_value, max_w_value, [col_count+1, 1])\n elif data_type == 2: # generate real intercept/weights\n weight = np.random.uniform(min_w_value, max_w_value, [col_count+1, 1])\n else:\n assert False, \"dataType must be 1 or 2 for now.\"\n elif ('binomial' in family_type.lower()) or ('multinomial' in family_type.lower()\n or ('ordinal' in family_type.lower())):\n if 'binomial' in family_type.lower(): # for binomial, only need 1 set of weight\n class_number -= 1\n\n if class_number <= 0:\n assert False, \"class_number must be >= 2!\"\n\n if isinstance(col_count, np.ndarray):\n temp_col_count = col_count[0]\n else:\n temp_col_count = col_count\n\n if data_type == 1: # generate random integer intercept/weight\n weight = np.random.random_integers(min_w_value, max_w_value, [temp_col_count+1, class_number])\n elif data_type == 2: # generate real intercept/weights\n weight = np.random.uniform(min_w_value, max_w_value, [temp_col_count+1, class_number])\n else:\n assert False, \"dataType must be 1 or 2 for now.\"\n\n # special treatment for ordinal weights\n if 'ordinal' in family_type.lower():\n num_pred = len(weight)\n for index in range(class_number):\n weight[0,index] = 0\n for indP in range(1,num_pred):\n weight[indP,index] = weight[indP,0] # make sure betas for all classes are the same\n\n np.savetxt(csv_weight_filename, weight.transpose(), delimiter=\",\")\n return weight\n\n\ndef generate_training_set_glm(csv_filename, row_count, col_count, min_p_value, max_p_value, data_type, family_type,\n noise_std, weight, class_method='probability', class_margin=0.0, weightChange=False):\n \"\"\"\n Generate supervised data set given weights for the GLM algo. First randomly generate the predictors, then\n call function generate_response_glm to generate the corresponding response y using the formula: y = w^T x+b+e\n where T is transpose, e is a random Gaussian noise added. For the Binomial family, the relationship between\n the response Y and predictor vector X is assumed to be Prob(Y = 1|X) = exp(W^T * X + e)/(1+exp(W^T * X + e)).\n For the Multinomial family, the relationship between the response Y (K possible classes) and predictor vector\n X is assumed to be Prob(Y = c|X) = exp(Wc^T * X + e)/(sum k=0 to K-1 (ep(Wk^T *X+e)). The predictors and\n responses are saved in a file specified by csv_filename.\n\n :param csv_filename: string representing full path filename to store supervised data set\n :param row_count: integer representing the number of training samples in the data set\n :param col_count: integer representing the number of predictors in the data set\n :param max_p_value: integer representing maximum predictor values\n :param min_p_value: integer representing minimum predictor values\n :param data_type: integer representing the type of predictors or weights (1: integers, 2: real)\n :param family_type: string represents the various distribution families (gaussian, multinomial, binomial) supported\n by our GLM algo\n :param noise_std: Gaussian noise standard deviation used to generate noise e to add to response\n :param weight: vector representing w in our formula to generate the response.\n :param class_method: string tuple, optional, describing how we derive the final response from the class\n probabilities generated for binomial and multinomial family-type for training/validation/test data set respectively.\n If set to 'probability', response y is generated randomly according to the class probabilities calculated. If set\n to 'threshold', response y is set to the class with the maximum class probability if the maximum class probability\n exceeds the second highest class probability by the value set in the margin. If the maximum class probability fails\n to be greater by the margin than the second highest class probability, the data sample is discarded.\n :param class_margin: float tuple, optional, denotes the threshold by how much the maximum class probability has to\n exceed the second highest class probability in order for us to keep the data sample for\n training/validation/test data set respectively. This field is only meaningful if class_method is set to\n 'threshold'\n\n :return: None\n \"\"\"\n\n if data_type == 1: # generate random integers\n x_mat = np.random.random_integers(min_p_value, max_p_value, [row_count, col_count])\n elif data_type == 2: # generate random real numbers\n x_mat = np.random.uniform(min_p_value, max_p_value, [row_count, col_count])\n else:\n assert False, \"dataType must be 1 or 2 for now. \"\n\n # generate the response vector to the input predictors\n response_y = generate_response_glm(weight, x_mat, noise_std, family_type,\n class_method=class_method, class_margin=class_margin, weightChange=weightChange)\n\n # for family_type = 'multinomial' or 'binomial', response_y can be -ve to indicate bad sample data.\n # need to delete this data sample before proceeding\n # if ('multinomial' in family_type.lower()) or ('binomial' in family_type.lower()) or ('ordinal' in family_type.lower()):\n # if 'threshold' in class_method.lower():\n # if np.any(response_y < 0): # remove negative entries out of data set\n # (x_mat, response_y) = remove_negative_response(x_mat, response_y)\n\n # write to file in csv format\n np.savetxt(csv_filename, np.concatenate((x_mat, response_y), axis=1), delimiter=\",\")\n\n\ndef generate_clusters(cluster_center_list, cluster_pt_number_list, cluster_radius_list):\n \"\"\"\n This function is used to generate clusters of points around cluster_centers listed in\n cluster_center_list. The radius of the cluster of points are specified by cluster_pt_number_list.\n The size of each cluster could be different and it is specified in cluster_radius_list.\n\n :param cluster_center_list: list of coordinates of cluster centers\n :param cluster_pt_number_list: number of points to generate for each cluster center\n :param cluster_radius_list: list of size of each cluster\n :return: list of sample points that belong to various clusters\n \"\"\"\n\n k = len(cluster_pt_number_list) # number of clusters to generate clusters for\n\n if (not(k == len(cluster_center_list))) or (not(k == len(cluster_radius_list))):\n assert False, \"Length of list cluster_center_list, cluster_pt_number_list, cluster_radius_list must be the same!\"\n\n training_sets = []\n for k_ind in range(k):\n new_cluster_data = generate_one_cluster(cluster_center_list[k_ind], cluster_pt_number_list[k_ind],\n cluster_radius_list[k_ind])\n if k_ind > 0:\n training_sets = np.concatenate((training_sets, new_cluster_data), axis=0)\n else:\n training_sets = new_cluster_data\n\n # want to shuffle the data samples so that the clusters are all mixed up\n map(np.random.shuffle, training_sets)\n\n return training_sets\n\n\ndef generate_one_cluster(cluster_center, cluster_number, cluster_size):\n \"\"\"\n This function will generate a full cluster wither cluster_number points centered on cluster_center\n with maximum radius cluster_size\n\n :param cluster_center: python list denoting coordinates of cluster center\n :param cluster_number: integer denoting number of points to generate for this cluster\n :param cluster_size: float denoting radius of cluster\n :return: np matrix denoting a cluster\n \"\"\"\n\n pt_dists = np.random.uniform(0, cluster_size, [cluster_number, 1])\n coord_pts = len(cluster_center) # dimension of each cluster point\n one_cluster_data = np.zeros((cluster_number, coord_pts), dtype=np.float)\n\n for p_ind in range(cluster_number):\n coord_indices = list(range(coord_pts))\n random.shuffle(coord_indices) # randomly determine which coordinate to generate\n left_radius = pt_dists[p_ind]\n\n for c_ind in range(coord_pts):\n coord_index = coord_indices[c_ind]\n one_cluster_data[p_ind, coord_index] = random.uniform(-1*left_radius+cluster_center[coord_index],\n left_radius+cluster_center[coord_index])\n left_radius = math.sqrt(pow(left_radius, 2)-pow((one_cluster_data[p_ind, coord_index]-\n cluster_center[coord_index]), 2))\n\n return one_cluster_data\n\n\ndef remove_negative_response(x_mat, response_y):\n \"\"\"\n Recall that when the user chooses to generate a data set for multinomial or binomial using the 'threshold' method,\n response y is set to the class with the maximum class probability if the maximum class probability\n exceeds the second highest class probability by the value set in margin. If the maximum class probability fails\n to be greater by margin than the second highest class probability, the data sample is discarded. However, when we\n generate the data set, we keep all samples. For data sample with maximum class probability that fails to be\n greater by margin than the second highest class probability, the response is set to be -1. This function will\n remove all data samples (predictors and responses) with response set to -1.\n\n :param x_mat: predictor matrix containing all predictor values\n :param response_y: response that can be negative if that data sample is to be removed\n\n :return: tuple containing x_mat, response_y with negative data samples removed.\n \"\"\"\n y_response_negative = np.where(response_y < 0) # matrix of True or False\n x_mat = np.delete(x_mat,y_response_negative[0].transpose(),axis=0) # remove predictor row with negative response\n\n # remove rows with negative response\n response_y = response_y[response_y >= 0]\n\n return x_mat,response_y.transpose()\n\n\ndef generate_training_set_mixed_glm(csv_filename, csv_filename_true_one_hot, row_count, col_count, min_p_value,\n max_p_value, family_type, noise_std, weight, enum_col, enum_level_vec,\n class_number=2, class_method='probability', class_margin=0.0, weightChange=False):\n \"\"\"\n Generate supervised data set given weights for the GLM algo with mixed categorical and real value\n predictors. First randomly generate the predictors, then call function generate_response_glm to generate the\n corresponding response y using the formula: y = w^T x+b+e where T is transpose, e is a random Gaussian noise\n added. For the Binomial family, the relationship between the response Y and predictor vector X is assumed to\n be Prob(Y = 1|X) = exp(W^T * X + e)/(1+exp(W^T * X + e)). For the Multinomial family, the relationship between\n the response Y (K possible classes) and predictor vector X is assumed to be\n Prob(Y = c|X) = exp(Wc^T * X + e)/(sum k=0 to K-1 (ep(Wk^T *X+e)) e is the random Gaussian noise added to the\n response. The predictors and responses are saved in a file specified by csv_filename.\n\n :param csv_filename: string representing full path filename to store supervised data set\n :param csv_filename_true_one_hot: string representing full path filename to store data set with true one-hot\n encoding.\n :param row_count: integer representing the number of training samples in the data set\n :param col_count: integer representing the number of predictors in the data set\n :param max_p_value: integer representing maximum predictor values\n :param min_p_value: integer representing minimum predictor values\n :param family_type: string represents the various distribution families (gaussian, multinomial, binomial)\n supported by our GLM algo\n :param noise_std: Gaussian noise standard deviation used to generate noise e to add to response\n :param weight: vector representing w in our formula to generate the response.\n :param enum_col: integer representing actual number of categorical columns in data set\n :param enum_level_vec: vector containing maximum integer value for each categorical column\n :param class_number: integer, optional, representing number classes for binomial and multinomial\n :param class_method: string, optional, describing how we derive the final response from the class probabilities\n generated for binomial and multinomial family_type. If set to 'probability', response y is generated randomly\n according to the class probabilities calculated. If set to 'threshold', response y is set to the class with\n the maximum class probability if the maximum class probability exceeds the second highest class probability by\n the value set in margin. If the maximum class probability fails to be greater by margin than the second highest\n class probability, the data sample is discarded.\n :param class_margin: float, optional, denotes the threshold by how much the maximum class probability has to\n exceed the second highest class probability by in order for us to keep the data set sample. This field is only\n meaningful if class_method is set to 'threshold'\n\n :return: None\n \"\"\"\n # generate the random training data sets\n enum_dataset = np.zeros((row_count, enum_col), dtype=np.int) # generate the categorical predictors\n\n # generate categorical data columns\n for indc in range(enum_col):\n enum_dataset[:, indc] = np.random.random_integers(0, enum_level_vec[indc], row_count)\n\n # generate real data columns\n x_mat = np.random.uniform(min_p_value, max_p_value, [row_count, col_count-enum_col])\n x_mat = np.concatenate((enum_dataset, x_mat), axis=1) # concatenate categorical and real predictor columns\n\n if len(csv_filename_true_one_hot) > 0:\n generate_and_save_mixed_glm(csv_filename_true_one_hot, x_mat, enum_level_vec, enum_col, True, weight, noise_std,\n family_type, class_method=class_method, class_margin=class_margin, weightChange=weightChange)\n\n if len(csv_filename) > 0:\n generate_and_save_mixed_glm(csv_filename, x_mat, enum_level_vec, enum_col, False, weight, noise_std,\n family_type, class_method=class_method, class_margin=class_margin, weightChange=False)\n\n\ndef generate_and_save_mixed_glm(csv_filename, x_mat, enum_level_vec, enum_col, true_one_hot, weight, noise_std,\n family_type, class_method='probability', class_margin=0.0, weightChange=False):\n \"\"\"\n Given the weights and input data matrix with mixed categorical and real value predictors, this function will\n generate a supervised data set and save the input data and response in a csv format file specified by\n csv_filename. It will first encode the enums without using one hot encoding with or without a reference\n level first before generating a response Y.\n\n :param csv_filename: string representing full path filename to store supervised data set with reference level\n plus true one-hot encoding.\n :param x_mat: predictor matrix with mixed columns (categorical/real values)\n :param enum_level_vec: vector containing maximum integer value for each categorical column\n :param enum_col: integer representing actual number of categorical columns in data set\n :param true_one_hot: bool indicating whether we are using true one hot encoding or reference level plus\n one hot encoding\n :param weight: vector representing w in our formula to generate the response\n :param noise_std: Gaussian noise standard deviation used to generate noise e to add to response\n :param family_type: string represents the various distribution families (gaussian, multinomial, binomial) supported\n by our GLM algo\n :param class_method: string, optional, describing how we derive the final response from the class probabilities\n generated for binomial and multinomial family_type. If set to 'probability', response y is generated randomly\n according to the class probabilities calculated. If set to 'threshold', response y is set to the class with the\n maximum class probability if the maximum class probability exceeds the second highest class probability by the\n value set in the margin. If the maximum class probability fails to be greater by margin than the second highest\n class probability, the data sample is discarded.\n :param class_margin: float, optional, denotes the threshold by how much the maximum class probability has to exceed\n the second highest class probability in order for us to keep the data sample. This field is only meaningful if\n class_method is set to 'threshold'\n\n :return: None\n \"\"\"\n # encode the enums\n x_mat_encoded = encode_enum_dataset(x_mat, enum_level_vec, enum_col, true_one_hot, False)\n\n # extract the correct weight dimension for the data set\n if not true_one_hot:\n (num_row, num_col) = x_mat_encoded.shape\n weight = weight[0:num_col+1] # +1 to take care of the intercept term\n\n # generate the corresponding response vector given the weight and encoded input predictors\n response_y = generate_response_glm(weight, x_mat_encoded, noise_std, family_type,\n class_method=class_method, class_margin=class_margin, weightChange=weightChange)\n\n # for familyType = 'multinomial' or 'binomial', response_y can be -ve to indicate bad sample data.\n # need to delete this before proceeding\n if ('multinomial' in family_type.lower()) or ('binomial' in family_type.lower()):\n if 'threshold' in class_method.lower():\n (x_mat,response_y) = remove_negative_response(x_mat, response_y)\n\n # write generated data set to file in csv format\n np.savetxt(csv_filename, np.concatenate((x_mat, response_y), axis=1), delimiter=\",\")\n\n\ndef encode_enum_dataset(dataset, enum_level_vec, enum_col, true_one_hot, include_nans):\n \"\"\"\n Given 2-d numpy array of predictors with categorical and real columns, this function will\n encode the enum columns with 1-hot encoding or with reference plus one hot encoding\n\n :param dataset: 2-d numpy array of predictors with both categorical and real columns\n :param enum_level_vec: vector containing maximum level for each categorical column\n :param enum_col: number of categorical columns in the data set\n :param true_one_hot: bool indicating if we are using true one hot encoding or with one reference level + one hot\n encoding\n :param include_nans: bool indicating if we have nans in categorical columns\n\n :return: data set with categorical columns encoded with 1-hot encoding or 1-hot encoding plus reference\n \"\"\"\n (num_row, num_col) = dataset.shape\n\n # split the data set into categorical and real parts\n enum_arrays = dataset[:, 0:enum_col]\n new_enum_arrays = []\n\n # perform the encoding for each element of categorical part\n for indc in range(enum_col):\n enum_col_num = enum_level_vec[indc]+1\n if not true_one_hot:\n enum_col_num -= 1\n\n if include_nans and np.any(enum_arrays[:, indc]):\n enum_col_num += 1\n\n new_temp_enum = np.zeros((num_row, enum_col_num[0]))\n one_hot_matrix = one_hot_encoding(enum_col_num)\n last_col_index = enum_col_num-1\n\n # encode each enum using 1-hot encoding or plus reference value\n for indr in range(num_row):\n enum_val = enum_arrays[indr, indc]\n if true_one_hot: # not using true one hot\n new_temp_enum[indr, :] = replace_with_encoded_bits(one_hot_matrix, enum_val, 0, last_col_index)\n else:\n if enum_val:\n new_temp_enum[indr, :] = replace_with_encoded_bits(one_hot_matrix, enum_val, 1, last_col_index)\n\n if indc == 0:\n new_enum_arrays = new_temp_enum\n else:\n new_enum_arrays = np.concatenate((new_enum_arrays, new_temp_enum), axis=1)\n\n return np.concatenate((new_enum_arrays, dataset[:, enum_col:num_col]), axis=1)\n\n\ndef replace_with_encoded_bits(one_hot_matrix, enum_val, add_value, last_col_index):\n \"\"\"\n Generate encoded bits for a categorical data value using one hot encoding.\n\n :param one_hot_matrix: matrix representing the encoding of categorical data value to 1-hot encoding\n :param enum_val: categorical data value, could be np.nan\n :param add_value: set to 1 if a reference value is needed in addition to 1-hot encoding\n :param last_col_index: index into encoding for np.nan if exists\n\n :return: vector representing the encoded values for a enum value\n \"\"\"\n if np.isnan(enum_val): # if data value is np.nan\n return one_hot_matrix[last_col_index]\n else:\n return one_hot_matrix[int(enum_val-add_value)]\n\n\ndef one_hot_encoding(enum_level):\n \"\"\"\n Generate the one_hot_encoding matrix given the number of enum_level.\n\n :param enum_level: generate the actual one-hot encoding matrix\n\n :return: numpy array for the enum_level specified. Note, enum_level <= 6\n \"\"\"\n\n if enum_level >= 2:\n base_array = np.array([[0, 1], [1, 0]]) # for 2 enum levels\n\n for enum_index in range(3, enum_level+1): # loop to build encoding for enum levels > 2\n (num_row, num_col) = base_array.shape\n col_zeros = np.asmatrix(np.zeros(num_row)).transpose() # column of zero matrix\n base_array = np.concatenate((col_zeros, base_array), axis=1) # add column of zero\n row_zeros = np.asmatrix(np.zeros(num_row+1)) # add row of zeros\n row_zeros[0, 0] = 1 # set first element to 1\n base_array = np.concatenate((base_array, row_zeros), axis=0)\n\n\n return base_array\n else:\n assert False, \"enum_level must be >= 2.\"\n\n\ndef generate_response_glm(weight, x_mat, noise_std, family_type, class_method='probability',\n class_margin=0.0, weightChange=False, even_distribution=True):\n \"\"\"\n Generate response vector given weight matrix, predictors matrix for the GLM algo.\n\n :param weight: vector representing w in our formula to generate the response\n :param x_mat: random numpy matrix (2-D ndarray) containing the predictors\n :param noise_std: Gaussian noise standard deviation used to generate noise e to add to response\n :param family_type: string represents the various distribution families (Gaussian, multinomial, binomial)\n supported by our GLM algo\n :param class_method: string, optional, describing how we derive the final response from the class probabilities\n generated for binomial and multinomial familyType. If set to 'probability', response y is generated randomly\n according to the class probabilities calculated. If set to 'threshold', response y is set to the class with the\n maximum class probability if the maximum class probability exceeds the second highest class probability by the\n value set in the margin. If the maximum class probability fails to be greater by margin than the second highest\n class probability, the data sample is discarded.\n :param class_margin: float, optional, denotes the threshold by how much the maximum class probability has to exceed\n the second highest class probability in order for us to keep the data set sample. This field is only meaningful if\n class_method is set to 'threshold'\n\n :return: vector representing the response\n \"\"\"\n (num_row, num_col) = x_mat.shape\n\n temp_ones_col = np.asmatrix(np.ones(num_row)).transpose()\n x_mat = np.concatenate((temp_ones_col, x_mat), axis=1)\n response_y = x_mat * weight + noise_std * np.random.standard_normal([num_row, 1])\n\n if 'ordinal' in family_type.lower():\n (num_sample, num_class) = response_y.shape\n lastClass = num_class - 1\n if weightChange:\n tresp = []\n # generate the new y threshold\n for indP in range(num_sample):\n tresp.append(-response_y[indP,0])\n tresp.sort()\n num_per_class = int(len(tresp)/num_class)\n\n if (even_distribution):\n for indC in range(lastClass):\n weight[0,indC] = tresp[(indC+1)*num_per_class]\n\n else: # do not generate evenly distributed class, generate randomly distributed classes\n splitInd = []\n lowV = 0.1\n highV = 1\n v1 = 0\n acc = 0\n for indC in range(lastClass):\n tempf = random.uniform(lowV, highV)\n splitInd.append(v1+int(tempf*num_per_class))\n v1 = splitInd[indC] # from last class\n acc += 1-tempf\n highV = 1+acc\n\n for indC in range(lastClass): # put in threshold\n weight[0,indC] = tresp[splitInd[indC]]\n\n response_y = x_mat * weight + noise_std * np.random.standard_normal([num_row, 1])\n\n discrete_y = np.zeros((num_sample, 1), dtype=np.int)\n for indR in range(num_sample):\n discrete_y[indR, 0] = lastClass\n for indC in range(lastClass):\n if (response_y[indR, indC] >= 0):\n discrete_y[indR, 0] = indC\n break\n return discrete_y\n\n # added more to form Multinomial response\n if ('multinomial' in family_type.lower()) or ('binomial' in family_type.lower()):\n temp_mat = np.exp(response_y) # matrix of n by K where K = 1 for binomials\n if 'binomial' in family_type.lower():\n ntemp_mat = temp_mat + 1\n btemp_mat = temp_mat / ntemp_mat\n temp_mat = np.concatenate((1-btemp_mat, btemp_mat), axis=1) # inflate temp_mat to 2 classes\n\n response_y = derive_discrete_response(temp_mat, class_method, class_margin, family_type)\n\n return response_y\n\n\ndef derive_discrete_response(prob_mat, class_method, class_margin, family_type='binomial'):\n \"\"\"\n This function is written to generate the final class response given the probabilities (Prob(y=k)). There are\n two methods that we use and is specified by the class_method. If class_method is set to 'probability',\n response y is generated randomly according to the class probabilities calculated. If set to 'threshold',\n response y is set to the class with the maximum class probability if the maximum class probability exceeds the\n second highest class probability by the value set in margin. If the maximum class probability fails to be\n greater by margin than the second highest class probability, the data sample will be discarded later by\n marking the final response as -1.\n\n :param prob_mat: probability matrix specifying the probability that y=k where k is a class\n :param class_method: string set to 'probability' or 'threshold'\n :param class_margin: if class_method='threshold', class_margin is the margin used to determine if a response is to\n be kept or discarded.\n\n :return: response vector representing class of y or -1 if an data sample is to be discarded.\n \"\"\"\n\n (num_sample, num_class) = prob_mat.shape\n discrete_y = np.argmax(prob_mat, axis=1)\n\n return discrete_y\n\n\ndef normalize_matrix(mat):\n \"\"\"\n This function will normalize a matrix across each row such that the row sum is 1.\n\n :param mat: matrix containing prob(y=k)\n\n :return: normalized matrix containing prob(y=k)\n \"\"\"\n (n, K) = mat.shape\n kronmat = np.ones((1, K), dtype=float)\n row_sum = np.sum(mat, axis=1)\n\n row_sum_mat = np.kron(row_sum, kronmat)\n return mat/row_sum_mat\n\n\ndef move_files(dir_path, old_name, new_file, action='move'):\n \"\"\"\n Simple function to move or copy a data set (old_name) to a special directory (dir_path)\n with new name (new_file) so that we will be able to re-run the tests if we\n have found something wrong with the algorithm under test with the data set.\n This is done to avoid losing the data set.\n\n :param dir_path: string representing full directory path where a file is to be moved to\n :param old_name: string representing file (filename with full directory path) to be moved to new directory.\n :param new_file: string representing the file name of the moved in the new directory\n :param action: string, optional, represent the action 'move' or 'copy' file\n\n :return: None\n \"\"\"\n new_name = os.path.join(dir_path, new_file) # generate new filename including directory path\n\n if os.path.isfile(old_name): # only move/copy file old_name if it actually exists\n if 'move' in action:\n motion = 'mv '\n elif 'copy' in action:\n motion = 'cp '\n else:\n assert False, \"Illegal action setting. It can only be 'move' or 'copy'!\"\n\n cmd = motion+old_name+' '+new_name # generate cmd line string to move the file\n\n subprocess.call(cmd, shell=True)\n\n\ndef remove_files(filename):\n \"\"\"\n Simple function to remove data set saved in filename if the dynamic test is completed with no\n error. Some data sets we use can be rather big. This is performed to save space.\n\n :param filename: string representing the file to be removed. Full path is included.\n\n :return: None\n \"\"\"\n cmd = 'rm ' + filename\n subprocess.call(cmd, shell=True)\n\n\ndef random_col_duplication(num_cols, duplication_threshold, max_number, to_scale, max_scale_factor):\n \"\"\"\n This function will randomly determine for each column if it should be duplicated.\n If it is to be duplicated, how many times, the duplication should be. In addition, a\n scaling factor will be randomly applied to each duplicated column if enabled.\n\n :param num_cols: integer representing number of predictors used\n :param duplication_threshold: threshold to determine if a column is to be duplicated. Set\n this number to be low if you want to encourage column duplication and vice versa\n :param max_number: maximum number of times a column is to be duplicated\n :param to_scale: bool indicating if a duplicated column is to be scaled\n :param max_scale_factor: real representing maximum scale value for repeated columns\n\n :return: a tuple containing two vectors: col_return, col_scale_return.\n col_return: vector indicating the column indices of the original data matrix that will be included\n in the new data matrix with duplicated columns\n col_scale_return: vector indicating for each new column in the new data matrix with duplicated columns,\n what scale should be applied to that column.\n \"\"\"\n\n col_indices = list(range(num_cols)) # contains column indices of predictors in original data set\n col_scales = [1]*num_cols # scaling factor for original data set, all ones.\n\n for ind in range(num_cols): # determine for each column if to duplicate it\n temp = random.uniform(0, 1) # generate random number from 0 to 1\n if temp > duplication_threshold: # duplicate column if random number generated exceeds duplication_threshold\n rep_num = random.randint(1, max_number) # randomly determine how many times to repeat a column\n\n more_col_indices = [ind]*rep_num\n col_indices.extend(more_col_indices)\n temp_scale = []\n\n for ind in range(rep_num):\n if to_scale: # for each duplicated column, determine a scaling factor to multiply the column with\n temp_scale.append(random.uniform(0, max_scale_factor))\n else:\n temp_scale.append(1)\n\n col_scales.extend(temp_scale)\n\n # randomly shuffle the predictor column orders and the corresponding scaling factors\n new_col_indices = list(range(len(col_indices)))\n random.shuffle(new_col_indices)\n col_return = [col_indices[i] for i in new_col_indices]\n col_scale_return = [col_scales[i] for i in new_col_indices]\n\n return col_return, col_scale_return\n\n\ndef duplicate_scale_cols(col_indices, col_scale, old_filename, new_filename):\n \"\"\"\n This function actually performs the column duplication with scaling giving the column\n indices and scaling factors for each column. It will first load the original data set\n from old_filename. After performing column duplication and scaling, the new data set\n will be written to file with new_filename.\n\n :param col_indices: vector indicating the column indices of the original data matrix that will be included\n in the new data matrix with duplicated columns\n :param col_scale: vector indicating for each new column in the new data matrix with duplicated columns,\n what scale should be applied to that column\n :param old_filename: string representing full directory path and filename where data set is stored\n :param new_filename: string representing full directory path and filename where new data set is to be stored\n\n :return: None\n \"\"\"\n # pd_frame = pd.read_csv(old_filename, header=None) # read in original data set\n #\n # pd_frame_new = pd.DataFrame() # new empty data frame\n #\n # for ind in range(len(col_indices)): # for each column\n # tempc = pd_frame.ix[:, col_indices[ind]]*col_scale[ind] # extract a column from old data frame and scale it\n # pd_frame_new = pd.concat([pd_frame_new, tempc], axis=1) # add it to the new data frame\n\n np_frame = np.asmatrix(np.genfromtxt(old_filename, delimiter=',', dtype=None))\n (num_row, num_col) = np_frame.shape\n np_frame_new = np.asmatrix(np.zeros((num_row, len(col_indices)), dtype=np.float))\n\n for ind in range(len(col_indices)):\n np_frame_new[:, ind] = np_frame[:, col_indices[ind]]*col_scale[ind]\n\n # done changing the data frame. Save it in a new file\n np.savetxt(new_filename, np_frame_new, delimiter=\",\")\n\n\ndef insert_nan_in_data(old_filename, new_filename, missing_fraction):\n \"\"\"\n Give the filename of a data set stored in old_filename, this function will randomly determine\n for each predictor to replace its value with nan or not with probability missing_frac. The\n new data set will be stored in filename new_filename.\n\n :param old_filename: string representing full directory path and filename where data set is stored\n :param new_filename: string representing full directory path and filename where new data set with missing\n values is to be stored\n :param missing_fraction: real value representing the probability of replacing a predictor with nan.\n\n\n :return: None\n \"\"\"\n # pd_frame = pd.read_csv(old_filename, header=None) # read in a dataset\n np_frame = np.asmatrix(np.genfromtxt(old_filename, delimiter=',', dtype=None))\n (row_count, col_count) = np_frame.shape\n random_matrix = np.random.uniform(0, 1, [row_count, col_count-1])\n\n for indr in range(row_count): # for each predictor value, determine if to replace value with nan\n for indc in range(col_count-1):\n if random_matrix[indr, indc] < missing_fraction:\n np_frame[indr, indc] = np.nan\n\n # save new data set with missing values to new file\n np.savetxt(new_filename, np_frame, delimiter=\",\")\n # pd_frame.to_csv(new_filename, sep=',', header=False, index=False, na_rep='nan')\n\n\ndef print_message_values(start_string, nump_array):\n \"\"\"\n This function prints the value of a nump_array with a string message in front of it.\n\n :param start_string: string representing message to be printed\n :param nump_array: array storing something\n\n :return: None\n \"\"\"\n print(start_string)\n print(nump_array)\n\n\ndef show_test_results(test_name, curr_test_val, new_test_val):\n \"\"\"\n This function prints the test execution results which can be passed or failed. A message will be printed on\n screen to warn user of the test result.\n\n :param test_name: string representing test name\n :param curr_test_val: integer representing number of tests failed so far before the test specified in test_name\n is executed\n :param new_test_val: integer representing number of tests failed after the test specified in test_name is\n executed\n\n :return: integer: 0 if test passed and 1 if test faild.\n \"\"\"\n failed_string = \"Ooops, \" + test_name + \" failed. I am sorry...\"\n pass_string = \"Yeah, \" + test_name + \" passed!\"\n\n if (curr_test_val < new_test_val): # this test has failed\n print(failed_string)\n\n return 1\n else:\n print(pass_string)\n return 0\n\ndef assert_H2OTwoDimTable_equal_upto(table1, table2, col_header_list, tolerance=1e-6):\n '''\n This method will compare two H2OTwoDimTables that are almost of the same size. table1 can be shorter\n than table2. However, for whatever part of table2 table1 has, they must be the same.\n :param table1:\n :param table2:\n :param col_header_list:\n :param tolerance:\n :return:\n '''\n size1 = len(table1.cell_values)\n\n for cname in col_header_list:\n colindex = table1.col_header.index(cname)\n\n for cellind in range(size1):\n val1 = table1.cell_values[cellind][colindex]\n val2 = table2.cell_values[cellind][colindex]\n\n if isinstance(val1, float) and isinstance(val2, float):\n assert abs(val1-val2) < tolerance, \\\n \"table 1 value {0} and table 2 value {1} in {2} differ more than tolerance of \" \\\n \"{3}\".format(val1, val2, cname, tolerance)\n else:\n assert val1==val2, \"table 1 value {0} and table 2 value {1} in {2} differ more than tolerance of \" \\\n \"{3}\".format(val1, val2, cname, tolerance)\n print(\"******* Congrats! Test passed. \")\n\n\n\ndef extract_col_value_H2OTwoDimTable(table, col_name):\n '''\n This function given the column name will extract a list containing the value used for the column name from the\n H2OTwoDimTable.\n\n :param table:\n :param col_name:\n :return:\n '''\n\n tableList = []\n col_header = table.col_header\n colIndex = col_header.index(col_name)\n for ind in range(len(table.cell_values)):\n temp = table.cell_values[ind]\n tableList.append(temp[colIndex])\n\n return tableList\n\n\ndef assert_H2OTwoDimTable_equal_upto(table1, table2, col_header_list, tolerance=1e-6):\n '''\n This method will compare two H2OTwoDimTables that are almost of the same size. table1 can be shorter\n than table2. However, for whatever part of table2 table1 has, they must be the same.\n\n :param table1:\n :param table2:\n :param col_header_list:\n :param tolerance:\n :return:\n '''\n size1 = len(table1.cell_values)\n\n for cname in col_header_list:\n colindex = table1.col_header.index(cname)\n\n for cellind in range(size1):\n val1 = table1.cell_values[cellind][colindex]\n val2 = table2.cell_values[cellind][colindex]\n\n if isinstance(val1, float) and isinstance(val2, float):\n assert abs(val1-val2) < tolerance, \\\n \"table 1 value {0} and table 2 value {1} in {2} differ more than tolerance of \" \\\n \"{3}\".format(val1, val2, cname, tolerance)\n else:\n assert val1==val2, \"table 1 value {0} and table 2 value {1} in {2} differ more than tolerance of \" \\\n \"{3}\".format(val1, val2, cname, tolerance)\n print(\"******* Congrats! Test passed. \")\n\n\ndef assert_H2OTwoDimTable_equal(table1, table2, col_header_list, tolerance=1e-6, check_sign=False, check_all=True,\n num_per_dim=10):\n \"\"\"\n This method compares two H2OTwoDimTables and verify that their difference is less than value set in tolerance. It\n is probably an overkill for I have assumed that the order of col_header_list may not be in the same order as\n the values in the table.cell_values[ind][0]. In addition, I do not assume an order for the names in the\n table.cell_values[ind][0] either for there is no reason for an order to exist.\n\n To limit the test run time, we can test a randomly sampled of points instead of all points\n\n :param table1: H2OTwoDimTable to be compared\n :param table2: the other H2OTwoDimTable to be compared\n :param col_header_list: list of strings denote names that we want the comparison to be performed\n :param tolerance: default to 1e-6\n :param check_sign: bool, determine if the sign of values are important or not. For eigenvectors, they are not.\n :param check_all: bool, determine if we need to compare every single element\n :param num_per_dim: integer, number of elements to sample per dimension. We have 3 here.\n :return: None if comparison succeed and raise an error if comparison failed for whatever reason\n \"\"\"\n num_comparison = len(set(col_header_list))\n size1 = len(table1.cell_values)\n size2 = len(table2.cell_values)\n worst_error = 0\n\n assert size1==size2, \"The two H2OTwoDimTables are of different size!\"\n assert num_comparison<=size1, \"H2OTwoDimTable do not have all the attributes specified in col_header_list.\"\n flip_sign_vec = generate_sign_vec(table1, table2) if check_sign else [1]*len(table1.cell_values[0]) # correct for sign change for eigenvector comparisons\n randRange1 = generate_for_indices(len(table1.cell_values), check_all, num_per_dim, 0)\n randRange2 = generate_for_indices(len(table2.cell_values), check_all, num_per_dim, 0)\n\n\n for ind in range(num_comparison):\n col_name = col_header_list[ind]\n next_name=False\n\n for name_ind1 in randRange1:\n if col_name!=str(table1.cell_values[name_ind1][0]):\n continue\n\n for name_ind2 in randRange2:\n if not(col_name==str(table2.cell_values[name_ind2][0])):\n continue\n\n # now we have the col header names, do the actual comparison\n if str(table1.cell_values[name_ind1][0])==str(table2.cell_values[name_ind2][0]):\n randRange3 = generate_for_indices(min(len(table2.cell_values[name_ind2]), len(table1.cell_values[name_ind1])), check_all, num_per_dim,1)\n for indC in randRange3:\n val1 = table1.cell_values[name_ind1][indC]\n val2 = table2.cell_values[name_ind2][indC]*flip_sign_vec[indC]\n\n if isinstance(val1, float) and isinstance(val2, float):\n compare_val_ratio = abs(val1-val2)/max(1, abs(val1), abs(val2))\n if compare_val_ratio > tolerance:\n print(\"Table entry difference is {0} at dimension {1} and eigenvector number \"\n \"{2}\".format(compare_val_ratio, name_ind1, indC))\n print(\"The first vector is {0} and the second vector is {1}\".format(table1.cell_values[name_ind1], table2.cell_values[name_ind2]))\n assert False, \"Table entries are not equal within tolerance.\"\n\n worst_error = max(worst_error, compare_val_ratio)\n else:\n assert False, \"Tables contains non-numerical values. Comparison is for numericals only!\"\n next_name=True\n break\n else:\n assert False, \"Unknown metric names found in col_header_list.\"\n if next_name: # ready to go to the next name in col_header_list\n break\n print(\"******* Congrats! Test passed. Maximum difference of your comparison is {0}\".format(worst_error))\n\ndef generate_for_indices(list_size, check_all, num_per_dim, start_val):\n if check_all:\n return list(range(start_val, list_size))\n else:\n randomList = list(range(start_val, list_size))\n shuffle(randomList)\n return randomList[0:min(list_size, num_per_dim)]\n\ndef generate_sign_vec(table1, table2):\n sign_vec = [1]*len(table1.cell_values[0])\n for indC in range(1, len(table2.cell_values[0])): # may need to look at other elements since some may be zero\n for indR in range(0, len(table2.cell_values)):\n if (abs(table1.cell_values[indR][indC]) > 0) and (abs(table2.cell_values[indR][indC]) > 0):\n sign_vec[indC] = int(np.sign(table1.cell_values[indR][indC]) * np.sign(table2.cell_values[indR][indC]))\n # if (np.sign(table1.cell_values[indR][indC])!=np.sign(table2.cell_values[indR][indC])):\n # sign_vec[indC] = -1\n # else:\n # sign_vec[indC] = 1\n break # found what we need. Goto next column\n\n return sign_vec\n\ndef equal_two_arrays(array1, array2, eps, tolerance, throwError=True):\n \"\"\"\n This function will compare the values of two python tuples. First, if the values are below\n eps which denotes the significance level that we care, no comparison is performed. Next,\n False is returned if the different between any elements of the two array exceeds some tolerance.\n\n :param array1: numpy array containing some values of interest\n :param array2: numpy array containing some values of interest that we would like to compare it with array1\n :param eps: significance level that we care about in order to perform the comparison\n :param tolerance: threshold for which we allow the two array elements to be different by\n\n :return: True if elements in array1 and array2 are close and False otherwise\n \"\"\"\n\n size1 = len(array1)\n if size1 == len(array2): # arrays must be the same size\n # compare two arrays\n for ind in range(size1):\n if not ((array1[ind] < eps) and (array2[ind] < eps)):\n # values to be compared are not too small, perform comparison\n\n # look at differences between elements of array1 and array2\n compare_val_h2o_Py = abs(array1[ind] - array2[ind])\n\n if compare_val_h2o_Py > tolerance: # difference is too high, return false\n if throwError:\n assert False, \"Array 1 value {0} and array 2 value {1} do not agree.\".format(array1[ind], array2[ind])\n else:\n return False\n\n return True # return True, elements of two arrays are close enough\n else:\n if throwError:\n assert False, \"The two arrays are of different size!\"\n else:\n return False\n\ndef equal_2D_tables(table1, table2, tolerance=1e-6):\n \"\"\"\n This function will compare the values of two python tuples. First, if the values are below\n eps which denotes the significance level that we care, no comparison is performed. Next,\n False is returned if the different between any elements of the two array exceeds some tolerance.\n\n :param array1: numpy array containing some values of interest\n :param array2: numpy array containing some values of interest that we would like to compare it with array1\n :param eps: significance level that we care about in order to perform the comparison\n :param tolerance: threshold for which we allow the two array elements to be different by\n\n :return: True if elements in array1 and array2 are close and False otherwise\n \"\"\"\n\n size1 = len(table1)\n if size1 == len(table2): # arrays must be the same size\n # compare two arrays\n for ind in range(size1):\n if len(table1[ind]) == len(table2[ind]):\n for ind2 in range(len(table1[ind])):\n if type(table1[ind][ind2]) == float:\n if abs(table1[ind][ind2]-table2[ind][ind2]) > tolerance:\n return False\n else:\n assert False, \"The two arrays are of different size!\"\n return True\n\n else:\n assert False, \"The two arrays are of different size!\"\n\n\ndef compare_two_arrays(array1, array2, eps, tolerance, comparison_string, array1_string, array2_string, error_string,\n success_string, template_is_better, just_print=False):\n \"\"\"\n This function is written to print out the performance comparison results for various values that\n we care about. It will return 1 if the values of the two arrays exceed threshold specified in tolerance.\n The actual comparison is performed by calling function equal_two_array.\n\n :param array1: numpy array containing some values of interest\n :param array2: numpy array containing some values of interest that we would like to compare it with array1\n :param eps: significance level that we care about in order to perform the comparison\n :param tolerance: threshold for which we allow the two array elements to be different by\n :param comparison_string: string stating what the comparison is about, e.g. \"Comparing p-values ....\"\n :param array1_string: string stating what is the array1 attribute of interest, e.g. \"H2O p-values: \"\n :param array2_string: string stating what is the array2 attribute of interest, e.g. \"Theoretical p-values: \"\n :param error_string: string stating what you want to say if the difference between array1 and array2\n exceeds tolerance, e.g \"P-values are not equal!\"\n :param success_string: string stating what you want to say if the difference between array1 and array2 does not\n exceed tolerance \"P-values are close enough!\"\n :param template_is_better: bool, True, will return 1 if difference among elements of array1 and array2 exceeds\n tolerance. False, will always return 0 even if difference among elements of array1 and array2 exceeds tolerance.\n In this case, the system under test actually performs better than the template.\n :param just_print: bool if True will print attribute values without doing comparison. False will print\n attribute values and perform comparison\n\n :return: if template_is_better = True, return 0 if elements in array1 and array2 are close and 1 otherwise;\n if template_is_better = False, will always return 0 since system under tests performs better than\n template system.\n \"\"\"\n\n # display array1, array2 with proper description\n print(comparison_string)\n print(array1_string, array1)\n print(array2_string, array2)\n\n if just_print: # just print the two values and do no comparison\n return 0\n else: # may need to actually perform comparison\n if template_is_better:\n try:\n assert equal_two_arrays(array1, array2, eps, tolerance), error_string\n print(success_string)\n sys.stdout.flush()\n return 0\n except:\n sys.stdout.flush()\n return 1\n else:\n print(\"Test result is actually better than comparison template!\")\n return 0\n\n\ndef make_Rsandbox_dir(base_dir, test_name, make_dir):\n \"\"\"\n This function will remove directory \"Rsandbox/test_name\" off directory base_dir and contents if it exists.\n If make_dir is True, it will create a clean directory \"Rsandbox/test_name\" off directory base_dir.\n\n :param base_dir: string contains directory path where we want to build our Rsandbox/test_name off from\n :param test_name: string contains unit test name that the Rsandbox is created for\n :param make_dir: bool, True: will create directory baseDir/Rsandbox/test_name, False: will not create\n directory.\n\n :return: syndatasets_dir: string containing the full path of the directory name specified by base_dir, test_name\n \"\"\"\n\n # create the Rsandbox directory path for the test.\n syndatasets_dir = os.path.join(base_dir, \"Rsandbox_\" + test_name)\n if os.path.exists(syndatasets_dir): # remove Rsandbox directory if it exists\n shutil.rmtree(syndatasets_dir)\n\n if make_dir: # create Rsandbox directory if make_dir is True\n os.makedirs(syndatasets_dir)\n\n return syndatasets_dir\n\n\ndef get_train_glm_params(model, what_param, family_type='gaussian'):\n \"\"\"\n This function will grab the various attributes (like coefficients, p-values, and others) off a GLM\n model that has been built.\n\n :param model: GLM model that we want to extract information from\n :param what_param: string indicating the model attribute of interest like 'p-value','weights',...\n :param family_type: string, optional, represents the various distribution families (gaussian, multinomial, binomial)\n supported by our GLM algo\n\n :return: attribute value of interest\n \"\"\"\n coeff_pvalues = model._model_json[\"output\"][\"coefficients_table\"].cell_values\n if what_param == 'p-values':\n if 'gaussian' in family_type.lower():\n p_value_h2o = []\n\n for ind in range(len(coeff_pvalues)):\n p_value_h2o.append(coeff_pvalues[ind][-1])\n return p_value_h2o\n\n else:\n assert False, \"P-values are only available to Gaussian family.\"\n\n elif what_param == 'weights':\n if 'gaussian' in family_type.lower():\n weights = []\n\n for ind in range(len(coeff_pvalues)):\n weights.append(coeff_pvalues[ind][1])\n return weights\n elif ('multinomial' in family_type.lower()) or ('binomial' in family_type.lower()):\n # for multinomial, the coefficients are organized as features by number of classes for\n # nonstandardized and then standardized weights. Need to grab the correct matrix as\n # number of classes by n_features matrix\n num_feature = len(coeff_pvalues)\n num_class = (len(coeff_pvalues[0])-1)/2\n\n coeffs = np.zeros((num_class,num_feature), dtype=np.float)\n\n end_index = int(num_class+1)\n for col_index in range(len(coeff_pvalues)):\n coeffs[:, col_index] = coeff_pvalues[col_index][1:end_index]\n\n return coeffs\n elif what_param == 'best_lambda':\n lambda_str = model._model_json[\"output\"][\"model_summary\"].cell_values[0][4].split('=')\n return float(str(lambda_str[-2]).split(',')[0])\n elif what_param == 'confusion_matrix':\n if 'multinomial' in family_type.lower():\n return model._model_json[\"output\"][\"training_metrics\"]._metric_json[\"cm\"][\"table\"]\n elif 'binomial' in family_type.lower():\n return model.confusion_matrix().table\n else:\n assert False, \"parameter value not found in GLM model\"\n\n\ndef less_than(val1, val2):\n \"\"\"\n Simple function that returns True if val1 <= val2 and False otherwise.\n\n :param val1: first value of interest\n :param val2: second value of interest\n\n :return: bool: True if val1 <= val2 and False otherwise\n \"\"\"\n if round(val1, 3) <= round(val2, 3): # only care to the 3rd position after decimal point\n return True\n else:\n return False\n\n\ndef replace_nan_with_mean(data_with_nans, nans_row_col_indices, col_means):\n \"\"\"\n Given a data set with nans, row and column indices of where the nans are and the col_means, this\n function will replace the nans with the corresponding col_means.\n\n :param data_with_nans: data set matrix with nans\n :param nans_row_col_indices: matrix containing the row and column indices of where the nans are\n :param col_means: vector containing the column means of data_with_NAs\n\n :return: data_with_NAs: data set with nans replaced with column means\n \"\"\"\n num_NAs = len(nans_row_col_indices[0])\n\n for ind in range(num_NAs):\n data_with_nans[nans_row_col_indices[0][ind], nans_row_col_indices[1][ind]] = \\\n col_means[nans_row_col_indices[1][ind]]\n\n return data_with_nans\n\n\ndef remove_csv_files(dir_path, suffix=\".csv\", action='remove', new_dir_path=\"\"):\n \"\"\"\n Given a directory, this function will gather all function ending with string specified\n in suffix. Next, it is going to delete those files if action is set to 'remove'. If\n action is set to 'copy', a new_dir_path must be specified where the files ending with suffix\n will be moved to this new directory instead.\n\n :param dir_path: string representing full path to directory of interest\n :param suffix: string representing suffix of filename that are to be found and deleted\n :param action: string, optional, denote the action to perform on files, 'remove' or 'move'\n :param new_dir_path: string, optional, representing full path to new directory\n\n :return: None\n \"\"\"\n filenames = os.listdir(dir_path) # list all files in directory\n\n # only collect files with filename ending with suffix\n to_remove = [filename for filename in filenames if filename.endswith(suffix)]\n\n # delete files ending with suffix\n for fn in to_remove:\n temp_fn = os.path.join(dir_path, fn)\n\n # only remove if file actually exists.\n if os.path.isfile(temp_fn):\n if 'remove' in action:\n remove_files(temp_fn)\n elif 'copy' in action:\n move_files(new_dir_path, temp_fn, fn, action=action)\n else:\n assert False, \"action string can only be 'remove' or 'copy.\"\n\n\ndef extract_comparison_attributes_and_print(model_h2o, h2o_model_test_metrics, end_test_str, want_p_values,\n attr1_bool, attr2_bool, att1_template, att2_template, att3_template,\n att4_template, compare_att1_str, h2o_att1_str, template_att1_str,\n att1_str_fail, att1_str_success, compare_att2_str, h2o_att2_str,\n template_att2_str, att2_str_fail, att2_str_success, compare_att3_str,\n h2o_att3_str, template_att3_str, att3_str_fail, att3_str_success,\n compare_att4_str, h2o_att4_str, template_att4_str, att4_str_fail,\n att4_str_success, failed_test_number, ignored_eps, allowed_diff,\n noise_var, template_must_be_better, attr3_bool=True, attr4_bool=True):\n \"\"\"\n This function basically will compare four attributes (weight, p-values, training data MSE, test data MSE) of a test\n with a template model. If the difference of comparison exceeds a certain threshold, the test will be determined as\n failed and vice versa. There are times when we do not care about p-values and/or weight comparisons but mainly\n concerned with MSEs. We can set the input parameters to indicate if this is the case.\n\n :param model_h2o: H2O model that we want to evaluate\n :param h2o_model_test_metrics: test performance of H2O model under evaluation\n :param end_test_str: string representing end test banner to be printed\n :param want_p_values: bool True if we want to care about p-values and False if we don't\n :param attr1_bool: bool True if we want to compare weight difference between H2O model and template model\n and False otherwise.\n :param attr2_bool: bool True if we want to compare p-value difference between H2O model and template model\n and False otherwise.\n :param att1_template: value of first template attribute, the weight vector\n :param att2_template: value of second template attribute, the p-value vector\n :param att3_template: value of third template attribute, the training data set MSE\n :param att4_template: value of fourth template attribute, the test data set MSE\n :param compare_att1_str: string describing the comparison of first attribute, e.g. \"Comparing intercept and\n weights ....\"\n :param h2o_att1_str: string describing H2O model first attribute values, e.g. \"H2O intercept and weights: \"\n :param template_att1_str: string describing template first attribute values, e.g. \"Theoretical intercept and\n weights: \"\n :param att1_str_fail: string describing message to print out if difference exceeds threshold, e.g.\n \"Intercept and weights are not equal!\"\n :param att1_str_success: string describing message to print out if difference < threshold, e.g.\n \"Intercept and weights are close enough!\"\n :param compare_att2_str: string describing the comparison of first attribute, e.g. \"Comparing p-values ....\"\n :param h2o_att2_str: string describing H2O model first attribute values, e.g. \"H2O p-values: \"\n :param template_att2_str: string describing template first attribute values, e.g. \"Theoretical p-values: \"\n :param att2_str_fail: string describing message to print out if difference exceeds threshold, e.g.\n \"P-values are not equal!\"\n :param att2_str_success: string describing message to print out if difference < threshold, e.g.\n \"P-values are close enough!\"\n :param compare_att3_str: string describing the comparison of first attribute, e.g. \"Comparing training MSEs ....\"\n :param h2o_att3_str: string describing H2O model first attribute values, e.g. \"H2O training MSE: \"\n :param template_att3_str: string describing template first attribute values, e.g. \"Theoretical train MSE: \"\n :param att3_str_fail: string describing message to print out if difference exceeds threshold, e.g.\n \"Training MSEs are not equal!\"\n :param att3_str_success: string describing message to print out if difference < threshold, e.g.\n \"Training MSEs are close enough!\"\n :param compare_att4_str: string describing the comparison of first attribute, e.g. \"Comparing test MSEs ....\"\n :param h2o_att4_str: string describing H2O model first attribute values, e.g. \"H2O test MSE: \"\n :param template_att4_str: string describing template first attribute values, e.g. \"Theoretical test MSE: \"\n :param att4_str_fail: string describing message to print out if difference exceeds threshold, e.g.\n \"Test MSEs are not equal!\"\n :param att4_str_success: string describing message to print out if difference < threshold, e.g.\n \"Test MSEs are close enough!\"\n :param failed_test_number: integer denote the number of tests failed\n :param ignored_eps: if value < than this value, no comparison is performed\n :param allowed_diff: threshold if exceeded will fail a test\n :param noise_var: Gaussian noise variance used to generate data set\n :param template_must_be_better: bool: True: template value must be lower, False: don't care\n :param attr3_bool: bool denoting if we should compare attribute 3 values\n :param attr4_bool: bool denoting if we should compare attribute 4 values\n\n\n :return: a tuple containing test h2o model training and test performance metrics that include: weight, pValues,\n mse_train, r2_train, mse_test, r2_test\n \"\"\"\n\n # grab weight from h2o model\n test1_weight = get_train_glm_params(model_h2o, 'weights')\n\n # grab p-values from h2o model\n test1_p_values = []\n if want_p_values:\n test1_p_values = get_train_glm_params(model_h2o, 'p-values')\n\n # grab other performance metrics\n test1_mse_train = model_h2o.mse()\n test1_r2_train = model_h2o.r2()\n test1_mse_test = h2o_model_test_metrics.mse()\n test1_r2_test = h2o_model_test_metrics.r2()\n\n # compare performances of template and h2o model weights\n failed_test_number += compare_two_arrays(test1_weight, att1_template, ignored_eps, allowed_diff*100, compare_att1_str,\n h2o_att1_str, template_att1_str, att1_str_fail, att1_str_success,\n attr1_bool)\n\n # p-values\n if want_p_values:\n if np.isnan(np.asarray(test1_p_values)).any(): # p-values contain nan\n failed_test_number += 1\n\n failed_test_number += compare_two_arrays(test1_p_values, att2_template, ignored_eps, allowed_diff,\n compare_att2_str, h2o_att2_str, template_att2_str, att2_str_fail,\n att2_str_success, attr2_bool)\n\n # Training MSE\n need_to_compare = less_than(att3_template, test1_mse_train)\n\n # in some cases, template value should always be better. Training data MSE should always\n # be better without regularization than with regularization\n if (not need_to_compare) and template_must_be_better:\n failed_test_number += 1\n\n failed_test_number += compare_two_arrays([test1_mse_train], [att3_template], ignored_eps, noise_var,\n compare_att3_str, h2o_att3_str,\n template_att3_str, att3_str_fail, att3_str_success, attr3_bool)\n\n # Test MSE\n need_to_compare = less_than(att4_template, test1_mse_test)\n failed_test_number += compare_two_arrays([test1_mse_test], [att4_template], ignored_eps, noise_var,\n compare_att4_str, h2o_att4_str, template_att4_str, att4_str_fail,\n att4_str_success, need_to_compare, attr4_bool)\n\n # print end test banner\n print(end_test_str)\n print(\"*******************************************************************************************\")\n\n sys.stdout.flush()\n\n return test1_weight, test1_p_values, test1_mse_train, test1_r2_train, test1_mse_test,\\\n test1_r2_test, failed_test_number\n\n\ndef extract_comparison_attributes_and_print_multinomial(model_h2o, h2o_model_test_metrics, family_type, end_test_str,\n compare_att_str=[\"\", \"\", \"\", \"\", \"\", \"\", \"\"],\n h2o_att_str=[\"\", \"\", \"\", \"\", \"\", \"\", \"\"],\n template_att_str=[\"\", \"\", \"\", \"\", \"\", \"\", \"\"],\n att_str_fail=[\"\", \"\", \"\", \"\", \"\", \"\", \"\"],\n att_str_success=[\"\", \"\", \"\", \"\", \"\", \"\", \"\"],\n test_model=None, test_model_metric=None, template_params=None,\n can_be_better_than_template=[\n False, False, False, False, False, False],\n just_print=[True, True, True, True, True, True],\n ignored_eps=1e-15, allowed_diff=1e-5, failed_test_number=0):\n \"\"\"\n This function basically will compare and print out six performance metrics of a test with a\n template model. If the difference of comparison exceeds a certain threshold, the test will be determined as\n failed and vice versa. There are times when we do not care about comparisons but mainly concerned with\n logloss/prediction accuracy in determining if a test shall fail. We can set the input parameters to indicate\n if this is the case.\n\n :param model_h2o: H2O model that we want to evaluate\n :param h2o_model_test_metrics: test performance of H2O model under evaluation\n :param family_type: string represents the various distribution families (gaussian, multinomial, binomial)\n supported by our GLM algo\n :param end_test_str: string to be printed at the end of a test\n :param compare_att_str: array of strings describing what we are trying to compare\n :param h2o_att_str: array of strings describing each H2O attribute of interest\n :param template_att_str: array of strings describing template attribute of interest\n :param att_str_fail: array of strings to be printed if the comparison failed\n :param att_str_success: array of strings to be printed if comparison succeeded\n :param test_model: template model whose attributes we want to compare our H2O model with\n :param test_model_metric: performance on test data set of template model\n :param template_params: array containing template attribute values that we want to compare our H2O model with\n :param can_be_better_than_template: array of bool: True: template value must be lower, False: don't care\n :param just_print: array of bool for each attribute if True, no comparison is performed, just print the attributes\n and if False, will compare the attributes and print the attributes as well\n :param ignored_eps: if value < than this value, no comparison is performed\n :param allowed_diff: threshold if exceeded will fail a test\n :param failed_test_number: integer denote the number of tests failed so far\n\n :return: accumulated number of tests that have failed so far\n \"\"\"\n\n # grab performance metrics from h2o model\n (h2o_weight, h2o_logloss_train, h2o_confusion_matrix_train, h2o_accuracy_train, h2o_logloss_test,\n h2o_confusion_matrix_test, h2o_accuracy_test) = grab_model_params_metrics(model_h2o, h2o_model_test_metrics,\n family_type)\n # grab performance metrics from template model\n if test_model and test_model_metric:\n (template_weight, template_logloss_train, template_confusion_matrix_train, template_accuracy_train,\n template_logloss_test, template_confusion_matrix_test, template_accuracy_test) = \\\n grab_model_params_metrics(test_model, test_model_metric, family_type)\n elif template_params:\n # grab template comparison values from somewhere else\n\n (template_weight, template_logloss_train, template_confusion_matrix_train, template_accuracy_train,\n template_logloss_test, template_confusion_matrix_test, template_accuracy_test) = template_params\n else:\n assert False, \"No valid template parameters are given for comparison.\"\n\n # print and/or compare the weights between template and H2O\n compare_index = 0\n failed_test_number += compare_two_arrays(h2o_weight, template_weight, ignored_eps, allowed_diff,\n compare_att_str[compare_index], h2o_att_str[compare_index],\n template_att_str[compare_index], att_str_fail[compare_index],\n att_str_success[compare_index], True, just_print[compare_index])\n compare_index += 1\n # this is logloss from training data set,\n if not(just_print[compare_index]) and not(can_be_better_than_template[compare_index]):\n if (h2o_logloss_train < template_logloss_train) and \\\n (abs(h2o_logloss_train-template_logloss_train) > 1e-5):\n\n # H2O performed better than template which is not allowed\n failed_test_number += 1 # increment failed_test_number and just print the results\n compare_two_arrays([h2o_logloss_train], [template_logloss_train], ignored_eps, allowed_diff,\n compare_att_str[compare_index], h2o_att_str[compare_index],\n template_att_str[compare_index], att_str_fail[compare_index],\n att_str_success[compare_index], True, True)\n else:\n failed_test_number += compare_two_arrays([h2o_logloss_train], [template_logloss_train], ignored_eps,\n allowed_diff, compare_att_str[compare_index],\n h2o_att_str[compare_index], template_att_str[compare_index],\n att_str_fail[compare_index], att_str_success[compare_index], True,\n False)\n\n else:\n template_better = is_template_better(just_print[compare_index], can_be_better_than_template[compare_index],\n h2o_logloss_train, template_logloss_train, False)\n # print and compare the logloss between template and H2O for training data\n failed_test_number += compare_two_arrays([h2o_logloss_train], [template_logloss_train], ignored_eps,\n allowed_diff, compare_att_str[compare_index],\n h2o_att_str[compare_index], template_att_str[compare_index],\n att_str_fail[compare_index], att_str_success[compare_index],\n template_better, just_print[compare_index])\n compare_index += 1\n template_better = is_template_better(just_print[compare_index], can_be_better_than_template[compare_index],\n h2o_logloss_test, template_logloss_test, False)\n # print and compare the logloss between template and H2O for test data\n failed_test_number += compare_two_arrays([h2o_logloss_test], [template_logloss_test], ignored_eps, allowed_diff,\n compare_att_str[compare_index], h2o_att_str[compare_index],\n template_att_str[compare_index], att_str_fail[compare_index],\n att_str_success[compare_index], template_better, just_print[compare_index])\n compare_index += 1\n # print the confusion matrix from training data\n failed_test_number += compare_two_arrays(h2o_confusion_matrix_train, template_confusion_matrix_train, ignored_eps,\n allowed_diff, compare_att_str[compare_index], h2o_att_str[compare_index],\n template_att_str[compare_index], att_str_fail[compare_index],\n att_str_success[compare_index], True, just_print[compare_index])\n compare_index += 1\n # print the confusion matrix from test data\n failed_test_number += compare_two_arrays(h2o_confusion_matrix_test, template_confusion_matrix_test, ignored_eps,\n allowed_diff, compare_att_str[compare_index], h2o_att_str[compare_index],\n template_att_str[compare_index], att_str_fail[compare_index],\n att_str_success[compare_index], True, just_print[compare_index])\n compare_index += 1\n template_better = is_template_better(just_print[compare_index], can_be_better_than_template[compare_index],\n h2o_accuracy_train, template_accuracy_train, True)\n # print accuracy from training dataset\n failed_test_number += compare_two_arrays([h2o_accuracy_train], [template_accuracy_train], ignored_eps, allowed_diff,\n compare_att_str[compare_index], h2o_att_str[compare_index],\n template_att_str[compare_index], att_str_fail[compare_index],\n att_str_success[compare_index], template_better, just_print[compare_index])\n compare_index += 1\n # print accuracy from test dataset\n template_better = is_template_better(just_print[compare_index], can_be_better_than_template[compare_index],\n h2o_accuracy_test, template_accuracy_test, True)\n failed_test_number += compare_two_arrays([h2o_accuracy_test], [template_accuracy_test], ignored_eps, allowed_diff,\n compare_att_str[compare_index], h2o_att_str[compare_index],\n template_att_str[compare_index], att_str_fail[compare_index],\n att_str_success[compare_index], template_better, just_print[compare_index])\n # print end test banner\n print(end_test_str)\n print(\"*******************************************************************************************\")\n sys.stdout.flush()\n\n return failed_test_number\n\n\ndef is_template_better(just_print, can_be_better_than_template, h2o_att, template_att, bigger_is_better):\n \"\"\"\n This function is written to determine if the system under test performs better than the template model\n performance.\n\n :param just_print: bool representing if we are just interested in printing the attribute values\n :param can_be_better_than_template: bool stating that it is okay in this case for the system under test to perform\n better than the template system.\n :param h2o_att: number representing the h2o attribute under test\n :param template_att: number representing the template attribute\n :param bigger_is_better: bool representing if metric is perceived to be better if its value is higher\n :return: bool indicating if the template attribute is better.\n \"\"\"\n\n if just_print: # not interested in comparison, just want to print attribute values\n return True # does not matter what we return here\n else:\n if bigger_is_better: # metric is better if it is greater\n return not(h2o_att > template_att)\n else: # metric is better if it is less\n return not(h2o_att < template_att)\n\n\ndef grab_model_params_metrics(model_h2o, h2o_model_test_metrics, family_type):\n \"\"\"\n This function will extract and return the various metrics from a H2O GLM model and the corresponding H2O model\n test metrics.\n\n :param model_h2o: GLM H2O model\n :param h2o_model_test_metrics: performance on test data set from H2O GLM model\n :param family_type: string representing 'gaussian', 'binomial' or 'multinomial'\n\n :return: tuple containing weight, logloss/confusion matrix/prediction accuracy calculated from training data set\n and test data set respectively\n \"\"\"\n\n # grab weight from h2o model\n h2o_weight = get_train_glm_params(model_h2o, 'weights', family_type=family_type)\n\n # grab other performance metrics\n h2o_logloss_train = model_h2o.logloss()\n h2o_confusion_matrix_train = get_train_glm_params(model_h2o, 'confusion_matrix', family_type=family_type)\n last_index = len(h2o_confusion_matrix_train.cell_values)-1\n\n h2o_logloss_test = h2o_model_test_metrics.logloss()\n\n if 'multinomial' in family_type.lower():\n h2o_confusion_matrix_test = h2o_model_test_metrics.confusion_matrix()\n h2o_accuracy_train = 1-h2o_confusion_matrix_train.cell_values[last_index][last_index]\n h2o_accuracy_test = 1-h2o_confusion_matrix_test.cell_values[last_index][last_index]\n elif 'binomial' in family_type.lower():\n h2o_confusion_matrix_test = h2o_model_test_metrics.confusion_matrix().table\n real_last_index = last_index+1\n h2o_accuracy_train = 1-float(h2o_confusion_matrix_train.cell_values[last_index][real_last_index])\n h2o_accuracy_test = 1-float(h2o_confusion_matrix_test.cell_values[last_index][real_last_index])\n else:\n assert False, \"Only 'multinomial' and 'binomial' distribution families are supported for \" \\\n \"grab_model_params_metrics function!\"\n\n return h2o_weight, h2o_logloss_train, h2o_confusion_matrix_train, h2o_accuracy_train, h2o_logloss_test,\\\n h2o_confusion_matrix_test, h2o_accuracy_test\n\n\ndef prepare_data_sklearn_multinomial(training_data_xy):\n \"\"\"\n Sklearn model requires that the input matrix should contain a column of ones in order for\n it to generate the intercept term. In addition, it wants the response vector to be in a\n certain format as well.\n\n :param training_data_xy: matrix containing both the predictors and response column\n\n :return: tuple containing the predictor columns with a column of ones as the first column and\n the response vector in the format that Sklearn wants.\n \"\"\"\n (num_row, num_col) = training_data_xy.shape\n\n # change response to be enum and not real\n y_ind = num_col-1\n training_data_xy[y_ind] = training_data_xy[y_ind].astype(int)\n\n # prepare response column for sklearn logistic regression\n response_y = training_data_xy[:, y_ind]\n response_y = np.ravel(response_y)\n\n training_data = training_data_xy[:, range(0, y_ind)]\n\n # added column of ones into data matrix X_MAT\n temp_ones = np.asmatrix(np.ones(num_row)).transpose()\n x_mat = np.concatenate((temp_ones, training_data), axis=1)\n\n return response_y, x_mat\n\ndef get_gridables(params_in_json):\n \"\"\"\n This function is written to walk through all parameters of a model and grab the parameters, its type and\n its default values as three lists of all the gridable parameters.\n\n :param params_in_json: a list of parameters associated with a H2O model. Each list is a dict containing fields\n of interest like name, type, gridable, default values, ....\n\n :return: three lists: gridable_params, gridable_types and gridable_defaults containing the names of the parameter,\n its associated type like int, float, unicode, bool and default parameter values\n \"\"\"\n\n # grab all gridable parameters and its type\n gridable_parameters = []\n gridable_types = []\n gridable_defaults = []\n\n for each_param in params_in_json:\n if each_param['gridable']:\n gridable_parameters.append(str(each_param[\"name\"]))\n gridable_types.append(each_param[\"type\"])\n\n if type(each_param[\"default_value\"]) == 'unicode': # hyper-parameters cannot be unicode\n gridable_defaults.append(str(each_param[\"default_value\"]))\n else:\n gridable_defaults.append(each_param[\"default_value\"])\n\n return gridable_parameters, gridable_types, gridable_defaults\n\n\ndef add_fold_weights_offset_columns(h2o_frame, nfold_max_weight_offset, column_names, column_type='fold_assignment'):\n \"\"\"\n Add fold_columns to H2O training frame specified in h2o_frame according to nfold. The new added\n columns should use the names in column_names. Returns a h2o_frame with newly added fold_columns.\n Copied from Eric's code.\n\n :param h2o_frame: H2O frame containing training data\n :param nfold_max_weight_offset: integer, number of fold in the cross-validation or maximum weight scale or offset\n :param column_names: list of strings denoting the column names for the new fold columns\n :param column_type: optional string denoting whether we are trying to generate fold_assignment or\n weights_column or offset_column\n\n :return: H2O frame with added fold column assignments\n \"\"\"\n\n number_row = h2o_frame.nrow\n\n # copied this part from Eric's code\n for index in range(len(column_names)):\n if 'fold_assignment' in column_type:\n temp_a = np.random.random_integers(0, nfold_max_weight_offset - 1, [number_row, 1]) # inclusive\n elif 'weights_column' in column_type:\n temp_a = np.random.uniform(0, nfold_max_weight_offset, [number_row, 1])\n elif 'offset_column' in column_type:\n temp_a = random.uniform(0, nfold_max_weight_offset)*np.asmatrix(np.ones(number_row)).transpose()\n else:\n assert False, \"column_type must be either 'fold_assignment' or 'weights_column'!\"\n\n fold_assignments = h2o.H2OFrame(temp_a)\n fold_assignments.set_names([column_names[index]])\n h2o_frame = h2o_frame.cbind(fold_assignments)\n\n return h2o_frame\n\n\ndef gen_grid_search(model_params, hyper_params, exclude_parameters, gridable_parameters, gridable_types,\n gridable_defaults, max_int_number, max_int_val, min_int_val, max_real_number, max_real_val,\n min_real_val, quantize_level='1.00000000'):\n \"\"\"\n This function is written to randomly generate griddable parameters for a gridsearch. For parameters already\n found in hyper_params, no random list will be generated. In addition, we will check to make sure that the\n griddable parameters are actually used by the model before adding them to the hyper_params dict.\n\n :param model_params: list of string containing names of argument to the model\n :param hyper_params: dict structure containing a list of gridable parameters names with their list\n :param exclude_parameters: list containing parameter names not to be added to hyper_params\n :param gridable_parameters: list of gridable parameter names\n :param gridable_types: list of gridable parameter types\n :param gridable_defaults: list of gridable parameter default values\n :param max_int_number: integer, size of integer gridable parameter list\n :param max_int_val: integer, maximum integer value for integer gridable parameter\n :param min_int_val: integer, minimum integer value for integer gridable parameter\n :param max_real_number: integer, size of real gridable parameter list\n :param max_real_val: float, maximum real value for real gridable parameter\n :param min_real_val: float, minimum real value for real gridable parameter\n :param quantize_level: string representing the quantization level of floating point values generated randomly.\n\n :return: a tuple of hyper_params: dict of hyper parameters for gridsearch, true_gridable_parameters:\n a list of string containing names of truely gridable parameters, true_gridable_types: a list of string\n denoting parameter types and true_gridable_defaults: default values of those truly gridable parameters\n \"\"\"\n count_index = 0\n true_gridable_parameters = []\n true_gridable_types = []\n true_gridable_defaults = []\n\n for para_name in gridable_parameters:\n # parameter must not in exclusion list\n if (para_name in model_params) and (para_name not in exclude_parameters):\n true_gridable_parameters.append(para_name)\n true_gridable_types.append(gridable_types[count_index])\n true_gridable_defaults.append(gridable_defaults[count_index])\n\n if para_name not in hyper_params.keys(): # add default value to user defined parameter list\n # gridable parameter not seen before. Randomly generate values for it\n if ('int' in gridable_types[count_index]) or ('long' in gridable_types[count_index]):\n # make sure integer values are not duplicated, using set action to remove duplicates\n hyper_params[para_name] = list(set([random.randint(min_int_val, max_int_val) for p in\n range(0, max_int_number)]))\n elif ('double' in gridable_types[count_index]) or ('float' in gridable_types[count_index]):\n hyper_params[para_name] = fix_float_precision(list(np.random.uniform(min_real_val, max_real_val,\n max_real_number)), quantize_level=quantize_level)\n\n count_index += 1\n\n return hyper_params, true_gridable_parameters, true_gridable_types, true_gridable_defaults\n\n\ndef fix_float_precision(float_list, quantize_level='1.00000000'):\n \"\"\"\n This function takes in a floating point tuple and attempt to change it to floating point number with fixed\n precision.\n\n :param float_list: tuple/list of floating point numbers\n :param quantize_level: string, optional, represent the number of fix points we care\n\n :return: tuple of floats to the exact precision specified in quantize_level\n \"\"\"\n fixed_float = []\n for num in float_list:\n fixed_float.append(float(Decimal(num).quantize(Decimal(quantize_level))))\n\n return list(set(fixed_float))\n\n\ndef extract_used_params_xval(a_grid_model, model_param_names, params_dict, algo=\"GBM\"):\n \"\"\"\n This function performs similar functions to function extract_used_params. However, for max_runtime_secs,\n we need to go into each cross-valudation model and grab the max_runtime_secs and add them up in order to\n get the correct value. In addition, we put your algo model specific parameters into params_dict.\n\n :param a_grid_model: list of models generated by gridsearch\n :param model_param_names: hyper-parameter names that are specified for the gridsearch.\n :param params_dict: dict containing name/value pairs specified to an algo.\n :param algo: string, optional, denoting the algo we are looking at.\n\n :return: params_used: a dict structure containing parameters that take on values as name/value pairs which\n will be used to build a model by hand using the same parameter setting as the model built by gridsearch.\n \"\"\"\n params_used = dict()\n\n # need to extract the max_runtime_secs ONE cross-validation model or the base model\n if a_grid_model._is_xvalidated:\n xv_keys = a_grid_model._xval_keys\n\n for id in xv_keys: # only need to get info from one model\n each_xv_model = h2o.get_model(id) # get each model\n params_used = extract_used_params(model_param_names, each_xv_model.params, params_dict, algo)\n break\n else:\n params_used = extract_used_params(model_param_names, a_grid_model.params, params_dict, algo)\n\n return params_used\n\n\ndef extract_used_params(model_param_names, grid_model_params, params_dict, algo=\"GLM\"):\n \"\"\"\n This function is used to build a dict out of parameters used by our gridsearch to build a H2O model given\n the dict structure that describes the parameters and their values used by gridsearch to build that\n particular mode.\n\n :param model_param_names: list contains parameter names that we are interested in extracting\n :param grid_model_params: dict contains key as names of parameter and values as list of two values: default and\n actual.\n :param params_dict: dict containing extra parameters to add to params_used like family, e.g. 'gaussian',\n 'binomial', ...\n\n :return: params_used: a dict structure containing parameters that take on values as name/value pairs which\n will be used to build a model by hand using the same parameter setting as the model built by gridsearch.\n \"\"\"\n\n params_used = dict()\n grid_model_params_keys = grid_model_params.keys()\n\n for each_parameter in model_param_names:\n parameter_name = str(each_parameter)\n\n if parameter_name in grid_model_params_keys:\n params_used[parameter_name] = grid_model_params[each_parameter]['actual']\n\n if params_dict:\n for key, value in params_dict.items():\n params_used[key] = value # add distribution family to parameters used list\n\n # only for GLM, change lambda to Lambda\n if algo ==\"GLM\":\n if 'lambda' in params_used.keys():\n params_used['Lambda'] = params_used['lambda']\n del params_used['lambda']\n\n return params_used\n\n\ndef insert_error_grid_search(hyper_params, gridable_parameters, gridable_types, error_number):\n \"\"\"\n This function will randomly introduce errors into a copy of hyper_params. Depending on the random number\n error_number generated, the following errors can be introduced:\n\n error_number = 0: randomly alter the name of a hyper-parameter name;\n error_number = 1: randomly choose a hyper-parameter and remove all elements in its list\n error_number = 2: add randomly generated new hyper-parameter names with random list\n error_number other: randomly choose a hyper-parameter and insert an illegal type into it\n\n :param hyper_params: dict containing all legal hyper-parameters for our grid search\n :param gridable_parameters: name of griddable parameters (some may not be griddable)\n :param gridable_types: type of griddable parameters\n :param error_number: integer representing which errors to introduce into the gridsearch hyper-parameters\n\n :return: new dict with errors in either parameter names or parameter values\n \"\"\"\n error_hyper_params = copy.deepcopy(hyper_params)\n # error_hyper_params = {k : v for k, v in hyper_params.items()}\n\n param_index = random.randint(0, len(hyper_params)-1)\n param_name = list(hyper_params)[param_index]\n param_type = gridable_types[gridable_parameters.index(param_name)]\n\n if error_number == 0: # grab a hyper-param randomly and copy its name twice\n new_name = param_name+param_name\n error_hyper_params[new_name] = error_hyper_params[param_name]\n del error_hyper_params[param_name]\n elif error_number == 1:\n error_hyper_params[param_name] = []\n elif error_number == 2:\n new_param = generate_random_words(random.randint(20,100))\n error_hyper_params[new_param] = error_hyper_params[param_name]\n else:\n error_hyper_params = insert_bad_value(error_hyper_params, param_name, param_type)\n\n return error_hyper_params\n\n\ndef insert_bad_value(error_hyper_params, param_name, param_type):\n \"\"\"\n This function is written to insert a value that is of a different type into an array than the one\n its other elements are for.\n\n :param error_hyper_params: dict containing all hyper-parameters for a grid search\n :param param_name: string denoting the hyper-parameter we want to insert bad element to\n :param param_type: string denoting hyper-parameter type\n\n :return: dict containing new inserted error value\n \"\"\"\n if 'int' in param_type: # insert a real number into integer\n error_hyper_params[param_name].append(random.uniform(-10,10))\n elif 'enum' in param_type: # insert an float into enums\n error_hyper_params[param_name].append(random.uniform(-10,10))\n elif 'double' in param_type: # insert an enum into float\n error_hyper_params[param_name].append(random.uniform(0,1) > 0.5)\n else: # insert a random string for all other cases\n error_hyper_params[param_name].append(generate_random_words(random.randint(20,100)))\n\n return error_hyper_params\n\n\ndef generate_random_words(word_length):\n \"\"\"\n This function will generate a random word consisting of letters, numbers and\n punctuation given the word_length.\n\n :param word_length: integer denoting length of the word\n\n :return: string representing the random word\n \"\"\"\n\n if word_length > 0:\n all_chars = string.ascii_letters + string.digits + string.punctuation\n\n return ''.join((random.choice(all_chars)) for index in range(int(word_length)))\n else:\n assert False, \"word_length must be an integer greater than 0.\"\n\n\ndef generate_redundant_parameters(hyper_params, gridable_parameters, gridable_defaults, error_number):\n \"\"\"\n This function will randomly choose a set of hyper_params and make a dict out of it so we can\n duplicate the parameter specification in both the model and grid search.\n\n :param hyper_params: dict containing all griddable parameters as hyper_param to grid search\n :param gridable_parameters: list of gridable parameters (not truly)\n :param gridable_defaults: list of default values for gridable parameters\n :param error_number: int, indicate ways to change the model parameter and the hyper-parameter\n\n Here are the actions performed on the model parameter and hyper-parameters.\n error_number = 0: set model parameter to be a value out of the hyper-parameter value list, should not\n generate error;\n error_number = 1: set model parameter to be default value, should not generate error in this case;\n error_number = 3: make sure model parameter is not set to default and choose a value not in the\n hyper-parameter value list.\n\n :return: 2 dicts containing duplicated parameters with specification, new hyperparameter specification\n \"\"\"\n error_hyper_params = copy.deepcopy(hyper_params)\n # error_hyper_params = {k : v for k, v in hyper_params.items()}\n\n params_dict = {}\n num_params = random.randint(1, len(error_hyper_params))\n params_list = list(error_hyper_params)\n\n # remove default values out of hyper_params\n for key in params_list:\n default_value = gridable_defaults[gridable_parameters.index(key )]\n\n if default_value in error_hyper_params[key]:\n error_hyper_params[key].remove(default_value)\n\n for index in range(num_params):\n param_name = params_list[index]\n\n hyper_params_len = len(error_hyper_params[param_name])\n\n if error_number == 0:\n # randomly assigned the parameter to take one value out of the list\n param_value_index = random.randint(0, len(error_hyper_params[param_name])-1)\n params_dict[param_name] = error_hyper_params[param_name][param_value_index]\n elif error_number == 1:\n param_value_index = gridable_parameters.index(param_name)\n params_dict[param_name] = gridable_defaults[param_value_index]\n else:\n # randomly assign model parameter to one of the hyper-parameter values, should create error condition here\n param_value_index = random.randint(0, hyper_params_len-1)\n params_dict[param_name] = error_hyper_params[param_name][param_value_index]\n\n # final check to make sure lambda is Lambda\n if 'lambda' in list(params_dict):\n params_dict[\"Lambda\"] = params_dict['lambda']\n del params_dict[\"lambda\"]\n\n return params_dict, error_hyper_params\n\n\ndef count_models(hyper_params):\n \"\"\"\n Given a hyper_params dict, this function will return the maximum number of models that can be built out of all\n the combination of hyper-parameters.\n\n :param hyper_params: dict containing parameter name and a list of values to iterate over\n :return: max_model_number: int representing maximum number of models built\n \"\"\"\n max_model_number = 1\n\n for key in list(hyper_params):\n max_model_number *= len(hyper_params[key])\n\n return max_model_number\n\n\ndef error_diff_2_models(grid_table1, grid_table2, metric_name):\n \"\"\"\n This function will take two models generated by gridsearch and calculate the mean absolute differences of\n the metric values specified by the metric_name in the two model. It will return the mean differences.\n\n :param grid_table1: first H2OTwoDimTable generated by gridsearch\n :param grid_table2: second H2OTwoDimTable generated by gridsearch\n :param metric_name: string, name of the metric of interest\n\n :return: real number which is the mean absolute metric difference between the two models\n \"\"\"\n num_model = len(grid_table1.cell_values)\n metric_diff = 0\n\n for model_index in range(num_model):\n metric_diff += abs(grid_table1.cell_values[model_index][-1] - grid_table2.cell_values[model_index][-1])\n\n if (num_model > 0):\n return metric_diff/num_model\n else:\n assert False, \"error_diff_2_models: your table contains zero models.\"\n\n\ndef find_grid_runtime(model_list):\n \"\"\"\n This function given a grid_model built by gridsearch will go into the model and calculate the total amount of\n time it took to actually build all the models in second\n\n :param model_list: list of model built by gridsearch, cartesian or randomized with cross-validation\n enabled.\n :return: total_time_sec: total number of time in seconds in building all the models\n \"\"\"\n total_time_sec = 0\n\n for each_model in model_list:\n total_time_sec += each_model._model_json[\"output\"][\"run_time\"] # time in ms\n\n # if cross validation is used, need to add those run time in here too\n if each_model._is_xvalidated:\n xv_keys = each_model._xval_keys\n\n for id in xv_keys:\n each_xv_model = h2o.get_model(id)\n total_time_sec += each_xv_model._model_json[\"output\"][\"run_time\"]\n\n return total_time_sec/1000.0 # return total run time in seconds\n\n\ndef evaluate_metrics_stopping(model_list, metric_name, bigger_is_better, search_criteria, possible_model_number):\n \"\"\"\n This function given a list of dict that contains the value of metric_name will manually go through the\n early stopping condition and see if the randomized grid search will give us the correct number of models\n generated. Note that you cannot assume the model_list is in the order of when a model is built. It actually\n already come sorted which we do not want....\n\n :param model_list: list of models built sequentially that contains metric of interest among other fields\n :param metric_name: string representing name of metric that we want to based our stopping condition on\n :param bigger_is_better: bool indicating if the metric is optimized by getting bigger if True and vice versa\n :param search_criteria: dict structure containing the search criteria for randomized gridsearch\n :param possible_model_number: integer, represent the absolute possible number of models built based on the\n hyper-parameter size\n\n :return: bool indicating if the early topping condition is justified\n \"\"\"\n\n tolerance = search_criteria[\"stopping_tolerance\"]\n stop_round = search_criteria[\"stopping_rounds\"]\n\n min_list_len = 2*stop_round # minimum length of metrics needed before we start early stopping evaluation\n\n metric_list = [] # store metric of optimization\n stop_now = False\n\n # provide metric list sorted by time. Oldest model appear first.\n metric_list_time_ordered = sort_model_by_time(model_list, metric_name)\n\n for metric_value in metric_list_time_ordered:\n metric_list.append(metric_value)\n\n if len(metric_list) > min_list_len: # start early stopping evaluation now\n stop_now = evaluate_early_stopping(metric_list, stop_round, tolerance, bigger_is_better)\n\n if stop_now:\n if len(metric_list) < len(model_list): # could have stopped early in randomized gridsearch\n return False\n else: # randomized gridsearch stopped at the correct condition\n return True\n\n if len(metric_list) == possible_model_number: # never meet early stopping condition at end of random gridsearch\n return True # if max number of model built, still ok\n else:\n return False # early stopping condition never met but random gridsearch did not build all models, bad!\n\n\ndef sort_model_by_time(model_list, metric_name):\n \"\"\"\n This function is written to sort the metrics that we care in the order of when the model was built. The\n oldest model metric will be the first element.\n\n :param model_list: list of models built sequentially that contains metric of interest among other fields\n :param metric_name: string representing name of metric that we want to based our stopping condition on\n :return: model_metric_list sorted by time\n \"\"\"\n\n model_num = len(model_list)\n\n model_metric_list = [None] * model_num\n\n for index in range(model_num):\n model_index = int(model_list[index]._id.split('_')[-1])\n model_metric_list[model_index] = \\\n model_list[index]._model_json[\"output\"][\"cross_validation_metrics\"]._metric_json[metric_name]\n\n return model_metric_list\n\n\ndef evaluate_early_stopping(metric_list, stop_round, tolerance, bigger_is_better):\n \"\"\"\n This function mimics the early stopping function as implemented in ScoreKeeper.java. Please see the Java file\n comment to see the explanation of how the early stopping works.\n\n :param metric_list: list containing the optimization metric under consideration for gridsearch model\n :param stop_round: integer, determine averaging length\n :param tolerance: real, tolerance to see if the grid search model has improved enough to keep going\n :param bigger_is_better: bool: True if metric is optimized as it gets bigger and vice versa\n\n :return: bool indicating if we should stop early and sorted metric_list\n \"\"\"\n metric_len = len(metric_list)\n metric_list.sort(reverse=bigger_is_better)\n shortest_len = 2*stop_round\n\n bestInLastK = 1.0*sum(metric_list[0:stop_round])/stop_round\n lastBeforeK = 1.0*sum(metric_list[stop_round:shortest_len])/stop_round\n\n if not(np.sign(bestInLastK) == np.sign(lastBeforeK)):\n return False\n\n ratio = bestInLastK/lastBeforeK\n\n if math.isnan(ratio):\n return False\n\n if bigger_is_better:\n return not (ratio > 1+tolerance)\n else:\n return not (ratio < 1-tolerance)\n\n\ndef check_and_count_models(hyper_params, params_zero_one, params_more_than_zero, params_more_than_one,\n params_zero_positive, max_grid_model):\n \"\"\"\n This function will look at the hyper-parameter space set in hyper_params, generate a new hyper_param space that\n will contain a smaller number of grid_models. It will determine how many models will be built from\n this new hyper_param space. In order to arrive at the correct answer, it must discount parameter settings that\n are illegal.\n\n :param hyper_params: dict containing model parameter names and list of values to set it to\n :param params_zero_one: list containing model parameter names whose values must be between 0 and 1\n :param params_more_than_zero: list containing model parameter names whose values must exceed zero\n :param params_more_than_one: list containing model parameter names whose values must exceed one\n :param params_zero_positive: list containing model parameter names whose values must equal to or exceed zero\n :param max_grid_model: maximum number of grid_model that can be generated from the new hyper_params space\n\n :return: total model: integer denoting number of grid models that can be built from all legal parameter settings\n in new hyper_parameter space\n final_hyper_params: dict of new hyper parameter space derived from the original hyper_params\n \"\"\"\n\n total_model = 1\n param_len = 0\n hyper_keys = list(hyper_params)\n shuffle(hyper_keys) # get all hyper_parameter names in random order\n final_hyper_params = dict()\n\n for param in hyper_keys:\n\n # this param should be > 0 and <= 2\n if param == \"col_sample_rate_change_per_level\":\n param_len = len([x for x in hyper_params[\"col_sample_rate_change_per_level\"] if (x > 0)\n and (x <= 2)])\n elif param in params_zero_one:\n param_len = len([x for x in hyper_params[param] if (x >= 0)\n and (x <= 1)])\n elif param in params_more_than_zero:\n param_len = len([x for x in hyper_params[param] if (x > 0)])\n elif param in params_more_than_one:\n param_len = len([x for x in hyper_params[param] if (x > 1)])\n elif param in params_zero_positive:\n param_len = len([x for x in hyper_params[param] if (x >= 0)])\n else:\n param_len = len(hyper_params[param])\n\n if (param_len >= 0) and ((total_model*param_len) <= max_grid_model):\n total_model *= param_len\n final_hyper_params[param] = hyper_params[param]\n elif (total_model*param_len) > max_grid_model:\n break\n\n return total_model, final_hyper_params\n\n\ndef write_hyper_parameters_json(dir1, dir2, json_filename, hyper_parameters):\n \"\"\"\n Write a json file of the hyper_parameters in directories dir1 and dir2 for debugging purposes.\n\n :param dir1: String containing first directory where you want to write the json file to\n :param dir2: String containing second directory where you want to write the json file to\n :param json_filename: String containing json file name\n :param hyper_parameters: dict containing hyper-parameters used\n \"\"\"\n # save hyper-parameter file in test directory\n with open(os.path.join(dir1, json_filename), 'w') as test_file:\n json.dump(hyper_parameters, test_file)\n\n # save hyper-parameter file in sandbox\n with open(os.path.join(dir2, json_filename), 'w') as test_file:\n json.dump(hyper_parameters, test_file)\n\n\ndef compare_frames(frame1, frame2, numElements, tol_time=0, tol_numeric=0, strict=False, compare_NA=True):\n \"\"\"\n This function will compare two H2O frames to make sure their dimension, and values in all cells are the same.\n It will not compare the column names though.\n\n :param frame1: H2O frame to be compared\n :param frame2: H2O frame to be compared\n :param numElements: integer to denote number of rows to compare. Done to reduce compare time.\n Set to 0 or negative number if you want to compare all elements.\n :param tol_time: optional parameter to limit time value difference.\n :param tol_numerica: optional parameter to limit numeric value difference.\n :param strict: optional parameter to enforce strict comparison or not. If True, column type must\n match in order to pass the test.\n :param compare_NA: optional parameter to compare NA or not. For csv file generated from orc file, the\n NAs are represented as some other symbol but our CSV will not be able to parse it correctly as NA.\n In this case, do not compare the number of NAs.\n :return: boolean: True, the two frames are equal and False otherwise.\n \"\"\"\n\n # check frame dimensions\n rows1, cols1 = frame1.dim\n rows2, cols2 = frame2.dim\n\n assert rows1 == rows2 and cols1 == cols2, \"failed dim check! frame 1 rows:{0} frame 2 rows:{1} frame 1 cols:{2} \" \\\n \"frame2 cols:{3}\".format(rows1, rows2, cols1, cols2)\n\n na_frame1 = frame1.isna().sum().sum(axis=1)[:,0]\n na_frame2 = frame2.isna().sum().sum(axis=1)[:,0]\n\n if compare_NA: # check number of missing values\n assert na_frame1.flatten() == na_frame2.flatten(), \"failed numbers of NA check! Frame 1 NA number: {0}, frame 2 \" \\\n \"NA number: {1}\".format(na_frame1, na_frame2)\n\n # check column types are the same before proceeding to check each row content.\n for col_ind in range(cols1):\n\n c1_key = frame1.columns[col_ind]\n c2_key = frame2.columns[col_ind]\n c2_type = frame2.types[c2_key]\n c1_type = frame1.types[c1_key]\n\n print(\"###### Comparing column: {0} and column type is {1}.\".format(col_ind, c1_type))\n\n if strict: # every column type must match\n assert c1_type == c2_type, \"failed column type check! frame1 col type: {0}, frame2 col type: \" \\\n \"{1}\".format(c1_type, c2_type)\n else:\n if str(c2_type) == 'enum': # orc files do not have enum column type. We convert it here\n frame1[col_ind].asfactor()\n\n # compare string\n if (str(c1_type) == 'string') or (str(c1_type) == 'enum'):\n compareOneStringColumn(frame1, frame2, col_ind, rows1, numElements)\n else:\n if str(c2_type) == 'time': # compare time columns\n compareOneNumericColumn(frame1, frame2, col_ind, rows1, tol_time, numElements)\n else:\n compareOneNumericColumn(frame1, frame2, col_ind, rows1, tol_numeric, numElements)\n return True\n\n\ndef compareOneStringColumn(frame1, frame2, col_ind, rows, numElements):\n \"\"\"\n This function will compare two String columns of two H2O frames to make sure that they are the same.\n\n :param frame1: H2O frame to be compared\n :param frame2: H2O frame to be compared\n :param col_ind: integer denoting column index to compare the two frames\n :param rows: integer denoting number of rows in the column\n :param numElements: integer to denote number of rows to compare. Done to reduce compare time\n :return: None. Will throw exceptions if comparison failed.\n \"\"\"\n\n row_indices = list(range(rows))\n if numElements > 0:\n random.shuffle(row_indices)\n else:\n numElements = rows\n\n for ele_ind in range(numElements):\n row_ind = row_indices[ele_ind]\n\n val1 = frame1[row_ind, col_ind]\n val2 = frame2[row_ind, col_ind]\n\n assert val1 == val2, \"failed frame values check! frame1 value: {0}, frame2 value: {1} at row {2}, column \" \\\n \"{3}\".format(val1, val2, row_ind, col_ind)\n\n\ndef compareOneNumericColumn(frame1, frame2, col_ind, rows, tolerance, numElements):\n \"\"\"\n This function compares two numeric columns of two H2O frames to make sure that they are close.\n\n :param frame1: H2O frame to be compared\n :param frame2: H2O frame to be compared\n :param col_ind: integer denoting column index to compare the two frames\n :param rows: integer denoting number of rows in the column\n :param tolerance: double parameter to limit numerical value difference.\n :param numElements: integer to denote number of rows to compare. Done to reduce compare time.\n :return: None. Will throw exceptions if comparison failed.\n \"\"\"\n\n row_indices = []\n if numElements > 0:\n row_indices = random.sample(range(rows), numElements)\n else:\n numElements = rows # Compare all elements\n row_indices = list(range(rows))\n\n\n for ele_ind in range(numElements):\n row_ind = row_indices[ele_ind]\n\n val1 = frame1[row_ind, col_ind]\n val2 = frame2[row_ind, col_ind]\n\n if not(math.isnan(val1)) and not(math.isnan(val2)): # both frames contain valid elements\n diff = abs(val1-val2)/max(1, abs(val1), abs(val2))\n assert diff <= tolerance, \"failed frame values check! frame1 value = {0}, frame2 value = {1}, \" \\\n \"at row {2}, column {3}. The difference is {4}.\".format(val1, val2, row_ind,\n col_ind, diff)\n elif math.isnan(val1) and math.isnan(val2): # both frame contains missing values\n continue\n else: # something is wrong, one frame got a missing value while the other is fine.\n assert 1 == 2, \"failed frame values check! frame1 value {0}, frame2 value {1} at row {2}, \" \\\n \"column {3}\".format(val1, val2, row_ind, col_ind)\n\nimport warnings\n\ndef expect_warnings(filewithpath, warn_phrase=\"warn\", warn_string_of_interest=\"warn\", number_of_times=1, in_hdfs=False):\n \"\"\"\n This function will execute a command to run and analyze the print outs of\n running the command. The goal here is to capture any warnings that we may expect\n out of running those commands.\n\n :param filewithpath: name of file to be parsed with path\n :param warn_phrase: capture the warning header, sometimes it is warn or userwarn.\n :param warn_string_of_interest: specific warning message string\n :param number_of_times: number of warning lines we are expecting.\n :return: True if warning was found and False otherwise\n \"\"\"\n\n number_warngings = 0\n\n buffer = StringIO() # redirect warning messages to string buffer for later analysis\n sys.stderr = buffer\n frame = None\n\n if in_hdfs:\n frame = h2o.import_file(filewithpath)\n else:\n frame = h2o.import_file(path=locate(filewithpath))\n\n sys.stderr = sys.__stderr__ # redirect it back to stdout.\n try: # for python 2.7\n if len(buffer.buflist) > 0:\n for index in range(len(buffer.buflist)):\n print(\"*** captured warning message: {0}\".format(buffer.buflist[index]))\n if (warn_phrase in buffer.buflist[index]) and (warn_string_of_interest in buffer.buflist[index]):\n number_warngings = number_warngings+1\n\n except: # for python 3.\n warns = buffer.getvalue()\n print(\"*** captured warning message: {0}\".format(warns))\n if (warn_phrase in warns) and (warn_string_of_interest in warns):\n number_warngings = number_warngings+1\n\n print(\"Number of warnings found: {0} and number of times that warnings should appear {1}.\".format(number_warngings,\n number_of_times))\n if number_warngings >= number_of_times:\n return True\n else:\n return False\n\n\ndef compare_frame_summary(frame1_summary, frame2_summary, compareNames=False, compareTypes=False):\n \"\"\"\n This method is written to compare the frame summary between two frames.\n\n :param frame1_summary:\n :param frame2_summary:\n :param compareNames:\n :param compareTypes:\n :return:\n \"\"\"\n\n frame1_column_number = len(frame1_summary)\n frame2_column_number = len(frame2_summary)\n\n assert frame1_column_number == frame2_column_number, \"failed column number check! Frame 1 column number: {0},\" \\\n \"frame 2 column number: {1}\".format(frame1_column_number,\n frame2_column_number)\n\n for col_index in range(frame1_column_number): # check summary for each column\n for key_val in list(frame1_summary[col_index]):\n\n if not(compareNames) and (str(key_val) == 'label'):\n continue\n\n if not(compareTypes) and (str(key_val) == 'type'):\n continue\n\n if str(key_val) == 'precision': # skip comparing precision\n continue\n\n val1 = frame1_summary[col_index][key_val]\n val2 = frame2_summary[col_index][key_val]\n\n if isinstance(val1, list) or isinstance(val1, dict):\n if isinstance(val1, dict):\n assert val1 == val2, \"failed column summary comparison for column {0} and summary \" \\\n \"type {1}, frame 1 value is {2}, frame 2 value is \" \\\n \"{3}\".format(col_index, str(key_val), val1, val2)\n else:\n if len(val1) > 0:\n # find if elements are float\n float_found = False\n\n for ind in range(len(val1)):\n if isinstance(val1[ind], float):\n float_found = True\n break\n\n if float_found:\n for ind in range(len(val1)):\n if not(str(val1[ind] == 'NaN')):\n assert abs(val1[ind]-val2[ind]) < 1e-5, \"failed column summary comparison for \" \\\n \"column {0} and summary type {1}, frame 1\" \\\n \" value is {2}, frame 2 value is \" \\\n \"{3}\".format(col_index, str(key_val),\n val1[ind], val2[ind])\n else:\n assert val1 == val2, \"failed column summary comparison for column {0} and summary\" \\\n \" type {1}, frame 1 value is {2}, frame 2 value is \" \\\n \"{3}\".format(col_index, str(key_val), val1, val2)\n else:\n if isinstance(val1, float):\n assert abs(val1-val2) < 1e-5, \"failed column summary comparison for column {0} and summary type \" \\\n \"{1}, frame 1 value is {2}, frame 2 value is \" \\\n \"{3}\".format(col_index, str(key_val), val1, val2)\n else:\n assert val1 == val2, \"failed column summary comparison for column {0} and summary type \" \\\n \"{1}, frame 1 value is {2}, frame 2 value is \" \\\n \"{3}\".format(col_index, str(key_val), val1, val2)\n\n\ndef cannaryHDFSTest(hdfs_name_node, file_name):\n \"\"\"\n This function is written to detect if the hive-exec version is too old. It will return\n True if it is too old and false otherwise.\n\n :param hdfs_name_node:\n :param file_name:\n :return:\n \"\"\"\n url_orc = \"hdfs://{0}{1}\".format(hdfs_name_node, file_name)\n\n try:\n tempFrame = h2o.import_file(url_orc)\n h2o.remove(tempFrame)\n print(\"Your hive-exec version is good. Parsing success for {0}.\".format(url_orc))\n return False\n except Exception as e:\n print(\"Error exception is {0}\".format(str(e)))\n\n if \"NoSuchFieldError: vector\" in str(e):\n return True\n else: # exception is caused by other reasons.\n return False\n\n\ndef extract_scoring_history_field(aModel, fieldOfInterest, takeFirst=False):\n \"\"\"\n Given a fieldOfInterest that are found in the model scoring history, this function will extract the list\n of field values for you from the model.\n\n :param aModel: H2O model where you want to extract a list of fields from the scoring history\n :param fieldOfInterest: string representing a field of interest.\n :return: List of field values or None if it cannot be found\n \"\"\"\n return extract_from_twoDimTable(aModel._model_json[\"output\"][\"scoring_history\"], fieldOfInterest, takeFirst)\n\n\n\ndef extract_from_twoDimTable(metricOfInterest, fieldOfInterest, takeFirst=False):\n \"\"\"\n Given a fieldOfInterest that are found in the model scoring history, this function will extract the list\n of field values for you from the model.\n\n :param aModel: H2O model where you want to extract a list of fields from the scoring history\n :param fieldOfInterest: string representing a field of interest.\n :return: List of field values or None if it cannot be found\n \"\"\"\n\n allFields = metricOfInterest._col_header\n if fieldOfInterest in allFields:\n cellValues = []\n fieldIndex = allFields.index(fieldOfInterest)\n for eachCell in metricOfInterest.cell_values:\n cellValues.append(eachCell[fieldIndex])\n if takeFirst: # only grab the result from the first iteration.\n break\n return cellValues\n else:\n return None\n\ndef model_run_time_sorted_by_time(model_list):\n \"\"\"\n This function is written to sort the metrics that we care in the order of when the model was built. The\n oldest model metric will be the first element.\n :param model_list: list of models built sequentially that contains metric of interest among other fields\n :return: model run time in secs sorted by order of building\n \"\"\"\n\n model_num = len(model_list)\n\n model_runtime_sec_list = [None] * model_num\n\n\n for index in range(model_num):\n model_index = int(model_list[index]._id.split('_')[-1])\n model_runtime_sec_list[model_index] = \\\n (model_list[index]._model_json[\"output\"][\"run_time\"]/1000.0)\n\n return model_runtime_sec_list\n\n\ndef model_seed_sorted_by_time(model_list):\n \"\"\"\n This function is written to find the seed used by each model in the order of when the model was built. The\n oldest model metric will be the first element.\n :param model_list: list of models built sequentially that contains metric of interest among other fields\n :return: model seed sorted by order of building\n \"\"\"\n\n model_num = len(model_list)\n\n model_seed_list = [None] * model_num\n\n\n for index in range(model_num):\n model_index = int(model_list[index]._id.split('_')[-1])\n\n for pIndex in range(len(model_list.models[0]._model_json[\"parameters\"])):\n if model_list.models[index]._model_json[\"parameters\"][pIndex][\"name\"]==\"seed\":\n model_seed_list[model_index]=model_list.models[index]._model_json[\"parameters\"][pIndex][\"actual_value\"]\n break\n\n return model_seed_list\n\n\ndef check_ignore_cols_automl(models,names,x,y):\n models = sum(models.as_data_frame().values.tolist(),[])\n for model in models:\n if \"StackedEnsemble\" in model:\n continue\n else:\n assert set(h2o.get_model(model).params[\"ignored_columns\"][\"actual\"]) == set(names) - {y} - set(x), \\\n \"ignored columns are not honored for model \" + model\n\n\ndef compare_numeric_frames(f1, f2, prob=0.5, tol=1e-6):\n assert (f1.nrow==f2.nrow) and (f1.ncol==f2.ncol), \"The two frames are of different sizes.\"\n temp1 = f1.asnumeric()\n temp2 = f2.asnumeric()\n for colInd in range(f1.ncol):\n for rowInd in range(f2.nrow):\n if (random.uniform(0,1) < prob):\n if (math.isnan(temp1[rowInd, colInd])):\n assert math.isnan(temp2[rowInd, colInd]), \"Failed frame values check at row {2} and column {3}! \" \\\n \"frame1 value: {0}, frame2 value: \" \\\n \"{1}\".format(temp1[rowInd, colInd], temp2[rowInd, colInd], rowInd, colInd)\n else:\n diff = abs(temp1[rowInd, colInd]-temp2[rowInd, colInd])/max(1.0, abs(temp1[rowInd, colInd]),\n abs(temp2[rowInd, colInd]))\n assert diff<=tol, \"Failed frame values check at row {2} and column {3}! frame1 value: {0}, frame2 value: \" \\\n \"{1}\".format(temp1[rowInd, colInd], temp2[rowInd, colInd], rowInd, colInd)\n\ndef check_sorted_2_columns(frame1, sorted_column_indices, prob=0.5, ascending=[True, True]):\n for colInd in sorted_column_indices:\n for rowInd in range(0, frame1.nrow-1):\n if (random.uniform(0.0,1.0) < prob):\n if colInd == sorted_column_indices[0]:\n if not(math.isnan(frame1[rowInd, colInd])) and not(math.isnan(frame1[rowInd+1,colInd])):\n if ascending[colInd]:\n assert frame1[rowInd,colInd] <= frame1[rowInd+1,colInd], \"Wrong sort order: value at row {0}: {1}, value at \" \\\n \"row {2}: {3}\".format(rowInd, frame1[rowInd,colInd],\n rowInd+1, frame1[rowInd+1,colInd])\n else:\n assert frame1[rowInd,colInd] >= frame1[rowInd+1,colInd], \"Wrong sort order: value at row {0}: {1}, value at \" \\\n \"row {2}: {3}\".format(rowInd, frame1[rowInd,colInd],\n rowInd+1, frame1[rowInd+1,colInd])\n else: # for second column\n if not(math.isnan(frame1[rowInd, sorted_column_indices[0]])) and not(math.isnan(frame1[rowInd+1,sorted_column_indices[0]])):\n if (frame1[rowInd,sorted_column_indices[0]]==frame1[rowInd+1, sorted_column_indices[0]]): # meaningful to compare row entries then\n if not(math.isnan(frame1[rowInd, colInd])) and not(math.isnan(frame1[rowInd+1,colInd])):\n if ascending[colInd]:\n assert frame1[rowInd,colInd] <= frame1[rowInd+1,colInd], \"Wrong sort order: value at row {0}: {1}, value at \" \\\n \"row {2}: {3}\".format(rowInd, frame1[rowInd,colInd],\n rowInd+1, frame1[rowInd+1,colInd])\n else:\n assert frame1[rowInd,colInd] >= frame1[rowInd+1,colInd], \"Wrong sort order: value at row {0}: {1}, value at \" \\\n \"row {2}: {3}\".format(rowInd, frame1[rowInd,colInd],\n rowInd+1, frame1[rowInd+1,colInd])\n\ndef assert_correct_frame_operation(sourceFrame, h2oResultFrame, operString):\n \"\"\"\n This method checks each element of a numeric H2OFrame and throw an assert error if its value does not\n equal to the same operation carried out by python.\n\n :param sourceFrame: original H2OFrame.\n :param h2oResultFrame: H2OFrame after operation on original H2OFrame is carried out.\n :param operString: str representing one of 'abs', 'acos', 'acosh', 'asin', 'asinh', 'atan', 'atanh',\n 'ceil', 'cos', 'cosh', 'cospi', 'cumprod', 'cumsum', 'digamma', 'exp', 'expm1', 'floor', 'round',\n 'sin', 'sign', 'round', 'sinh', 'tan', 'tanh'\n :return: None.\n \"\"\"\n validStrings = ['acos', 'acosh', 'asin', 'asinh', 'atan', 'atanh', 'ceil', 'cos', 'cosh',\n 'exp', 'floor', 'gamma', 'lgamma', 'log', 'log10', 'sin', 'sinh',\n 'sqrt', 'tan', 'tanh', 'trigamma', 'expm1']\n npValidStrings = ['log2', 'sign']\n nativeStrings = ['round', 'abs', 'cumsum']\n multpi = ['cospi', 'sinpi', 'tanpi']\n others = ['log1p', 'signif', 'trigamma', 'digamma', 'cumprod']\n # check for valid operString\n assert operString in validStrings+npValidStrings+nativeStrings+multpi+others, \"Illegal operator \" \\\n \"{0} specified.\".format(operString)\n result_comp = lambda x:x # default method\n\n if operString == \"log1p\":\n result_comp = lambda x:math.log(x+1)\n elif operString == 'signif':\n result_comp = lambda x:round(x, 7)\n elif operString == 'trigamma':\n result_comp = lambda x:scipy.special.polygamma(1, x)\n elif operString == 'digamma':\n result_comp = lambda x:scipy.special.polygamma(0, x)\n elif operString=='cumprod':\n result_comp = lambda x:factorial(x)\n # stringOperations = 'result_val = factorial(sourceFrame[row_ind, col_ind])'\n elif operString in validStrings:\n result_comp = lambda x:getattr(math, operString)(x)\n elif operString in nativeStrings:\n result_comp =lambda x:__builtins__.get(operString)(x)\n stringOperations = 'result_val = '+operString+'(sourceFrame[row_ind, col_ind])'\n elif operString in npValidStrings:\n result_comp = lambda x:getattr(np, operString)(x)\n # stringOperations = 'result_val = np.'+operString+'(sourceFrame[row_ind, col_ind])'\n elif operString in multpi:\n result_comp = lambda x:getattr(math, operString.split('p')[0])(x*math.pi)\n #stringOperations = 'result_val = math.'+operString.split('p')[0]+'(sourceFrame[row_ind, col_ind]*math.pi)'\n\n for col_ind in range(sourceFrame.ncols):\n for row_ind in range(sourceFrame.nrows):\n result_val = result_comp(sourceFrame[row_ind, col_ind])\n assert abs(h2oResultFrame[row_ind, col_ind]-result_val) <= 1e-6, \\\n \" command {0}({3}) is not working. Expected: {1}. Received: {2}\".format(operString, result_val,\n h2oResultFrame[row_ind, col_ind], sourceFrame[row_ind, col_ind])\n\ndef factorial(n):\n \"\"\"\n Defined my own factorial just in case using python2.5 or less.\n\n :param n:\n :return:\n \"\"\"\n if n>0 and n<2:\n return 1\n if n>=2:\n return n*factorial(n-1)\n\ndef cumop(items, op, colInd=0): # take in one column only\n res = [None]*len(items)\n for index in range(len(items)):\n res[index] = op(res[index-1], items[index, colInd]) if index > 0 else items[index, colInd]\n return res\n\ndef compare_string_frames_local(f1, f2, prob=0.5):\n temp1 = f1.as_data_frame(use_pandas=False)\n temp2 = f2.as_data_frame(use_pandas=False)\n cname1 = temp1[0]\n cname2 = temp2[0]\n assert (f1.nrow==f2.nrow) and (f1.ncol==f2.ncol), \"The two frames are of different sizes.\"\n for colInd in range(f1.ncol):\n name1 = cname1[colInd]\n for rowInd in range(1, f2.nrow):\n if random.uniform(0,1) < prob:\n assert temp1[rowInd][colInd]==temp2[rowInd][cname2.index(name1)], \"Failed frame values check at row {2} and column {3}! \" \\\n \"frame1 value: {0}, frame2 value: \" \\\n \"{1}\".format(temp1[rowInd][colInd], temp2[rowInd][colInd], rowInd, colInd)\n\n\ndef check_data_rows(f1, f2, index_list=[], num_rows=10):\n '''\n This method will compare the relationships of the data rows within each frames. In particular, we are\n interested in the relative direction of each row vectors and the relative distances. No assertions will\n be thrown.\n\n :param f1:\n :param f2:\n :param index_list:\n :param num_rows:\n :return:\n '''\n temp1 = f1.as_data_frame(use_pandas=True).as_matrix()\n temp2 = f2.as_data_frame(use_pandas=True).as_matrix()\n if len(index_list)==0:\n index_list = random.sample(range(f1.nrow), num_rows)\n\n maxInnerProduct = 0\n maxDistance = 0\n\n for row_index in range(1, len(index_list)):\n r1 = np.inner(temp1[index_list[row_index-1]], temp1[index_list[row_index]])\n r2 = np.inner(temp2[index_list[row_index-1]], temp2[index_list[row_index]])\n d1 = np.linalg.norm(temp1[index_list[row_index-1]]-temp1[index_list[row_index]])\n d2 = np.linalg.norm(temp2[index_list[row_index-1]]-temp2[index_list[row_index]])\n\n diff1 = min(abs(r1-r2), abs(r1-r2)/max(abs(r1), abs(r2)))\n maxInnerProduct = max(maxInnerProduct, diff1)\n diff2 = min(abs(d1-d2), abs(d1-d2)/max(abs(d1), abs(d2)))\n maxDistance = max(maxDistance, diff2)\n\n print(\"Maximum inner product different is {0}. Maximum distance difference is \"\n \"{1}\".format(maxInnerProduct, maxDistance))\n\n\ndef compare_data_rows(f1, f2, index_list=[], num_rows=10, tol=1e-3):\n '''\n This method will compare the relationships of the data rows within each frames. In particular, we are\n interested in the relative direction of each row vectors and the relative distances. An assertion will be\n thrown if they are different beyond a tolerance.\n\n :param f1:\n :param f2:\n :param index_list:\n :param num_rows:\n :return:\n '''\n temp1 = f1.as_data_frame(use_pandas=True).as_matrix()\n temp2 = f2.as_data_frame(use_pandas=True).as_matrix()\n if len(index_list)==0:\n index_list = random.sample(range(f1.nrow), num_rows)\n\n maxInnerProduct = 0\n maxDistance = 0\n for row_index in range(1, len(index_list)):\n r1 = np.inner(temp1[index_list[row_index-1]], temp1[index_list[row_index]])\n r2 = np.inner(temp2[index_list[row_index-1]], temp2[index_list[row_index]])\n d1 = np.linalg.norm(temp1[index_list[row_index-1]]-temp1[index_list[row_index]])\n d2 = np.linalg.norm(temp2[index_list[row_index-1]]-temp2[index_list[row_index]])\n\n diff1 = min(abs(r1-r2), abs(r1-r2)/max(abs(r1), abs(r2)))\n maxInnerProduct = max(maxInnerProduct, diff1)\n diff2 = min(abs(d1-d2), abs(d1-d2)/max(abs(d1), abs(d2)))\n maxDistance = max(maxDistance, diff2)\n\n assert diff1 < tol, \\\n \"relationship between data row {0} and data row {1} are different among the two dataframes. Inner \" \\\n \"product from frame 1 is {2}. Inner product from frame 2 is {3}. The difference between the two is\" \\\n \" {4}\".format(index_list[row_index-1], index_list[row_index], r1, r2, diff1)\n\n\n assert diff2 < tol, \\\n \"distance betwee data row {0} and data row {1} are different among the two dataframes. Distance \" \\\n \"between 2 rows from frame 1 is {2}. Distance between 2 rows from frame 2 is {3}. The difference\" \\\n \" between the two is {4}\".format(index_list[row_index-1], index_list[row_index], d1, d2, diff2)\n print(\"Maximum inner product different is {0}. Maximum distance difference is \"\n \"{1}\".format(maxInnerProduct, maxDistance))\n\ndef compute_frame_diff(f1, f2):\n '''\n This method will take the absolute difference two frames and sum across all elements\n :param f1:\n :param f2:\n :return:\n '''\n\n frameDiff = h2o.H2OFrame.sum(h2o.H2OFrame.sum(h2o.H2OFrame.abs(f1-f2)), axis=1)[0,0]\n return frameDiff\n\ndef compare_frames_local(f1, f2, prob=0.5, tol=1e-6, returnResult=False):\n temp1 = f1.as_data_frame(use_pandas=False)\n temp2 = f2.as_data_frame(use_pandas=False)\n assert (f1.nrow==f2.nrow) and (f1.ncol==f2.ncol), \"The two frames are of different sizes.\"\n for colInd in range(f1.ncol):\n for rowInd in range(1,f2.nrow):\n if (random.uniform(0,1) < prob):\n if (math.isnan(float(temp1[rowInd][colInd]))):\n if returnResult:\n if not(math.isnan(float(temp2[rowInd][colInd]))):\n return False\n assert math.isnan(float(temp2[rowInd][colInd])), \"Failed frame values check at row {2} and column {3}! \" \\\n \"frame1 value: {0}, frame2 value: \" \\\n \"{1}\".format(temp1[rowInd][colInd], temp2[rowInd][colInd], rowInd, colInd)\n else:\n v1 = float(temp1[rowInd][colInd])\n v2 = float(temp2[rowInd][colInd])\n diff = abs(v1-v2)/max(1.0, abs(v1), abs(v2))\n if returnResult:\n if diff > tol:\n return False\n assert diff<=tol, \"Failed frame values check at row {2} and column {3}! frame1 value: {0}, frame2 value: \" \\\n \"{1}\".format(v1, v2, rowInd, colInd)\n if returnResult:\n return True\n\n\n# frame compare with NAs in column\ndef compare_frames_local_onecolumn_NA(f1, f2, prob=0.5, tol=1e-6):\n temp1 = f1.as_data_frame(use_pandas=False)\n temp2 = f2.as_data_frame(use_pandas=False)\n assert (f1.nrow==f2.nrow) and (f1.ncol==f2.ncol), \"The two frames are of different sizes.\"\n for colInd in range(f1.ncol):\n for rowInd in range(1,f2.nrow):\n if (random.uniform(0,1) < prob):\n if len(temp1[rowInd]) == 0 or len(temp2[rowInd]) == 0:\n assert len(temp1[rowInd]) == len(temp2[rowInd]), \"Failed frame values check at row {2} ! \" \\\n \"frame1 value: {0}, frame2 value: \" \\\n \"{1}\".format(temp1[rowInd], temp2[rowInd], rowInd)\n else:\n v1 = float(temp1[rowInd][colInd])\n v2 = float(temp2[rowInd][colInd])\n diff = abs(v1-v2)/max(1.0, abs(v1), abs(v2))\n assert diff<=tol, \"Failed frame values check at row {2} and column {3}! frame1 value: {0}, frame2 value: \" \\\n \"{1}\".format(v1, v2, rowInd, colInd)\n# frame compare with NAs in column\ndef compare_frames_local_onecolumn_NA_enum(f1, f2, prob=0.5, tol=1e-6):\n temp1 = f1.as_data_frame(use_pandas=False)\n temp2 = f2.as_data_frame(use_pandas=False)\n assert (f1.nrow==f2.nrow) and (f1.ncol==f2.ncol), \"The two frames are of different sizes.\"\n for colInd in range(f1.ncol):\n for rowInd in range(1,f2.nrow):\n if (random.uniform(0,1) < prob):\n if len(temp1[rowInd]) == 0 or len(temp2[rowInd]) == 0:\n assert len(temp1[rowInd]) == len(temp2[rowInd]), \"Failed frame values check at row {2} ! \" \\\n \"frame1 value: {0}, frame2 value: \" \\\n \"{1}\".format(temp1[rowInd], temp2[rowInd], rowInd)\n else:\n assert temp1[rowInd][colInd]==temp2[rowInd][colInd], \"Failed frame values check at row {2} and column {3}! frame1 value: {0}, frame2 value: \" \\\n \"{1}\".format(temp1[rowInd][colInd], temp1[rowInd][colInd], rowInd, colInd)\n\n# frame compare with NAs in column\ndef compare_frames_local_onecolumn_NA_string(f1, f2, prob=0.5):\n temp1 = f1.as_data_frame(use_pandas=False)\n temp2 = f2.as_data_frame(use_pandas=False)\n assert (f1.nrow==f2.nrow) and (f1.ncol==f2.ncol), \"The two frames are of different sizes.\"\n for colInd in range(f1.ncol):\n for rowInd in range(1,f2.nrow):\n if (random.uniform(0,1) < prob):\n if len(temp1[rowInd]) == 0 or len(temp2[rowInd]) == 0:\n assert len(temp1[rowInd]) == len(temp2[rowInd]), \"Failed frame values check at row {2} ! \" \\\n \"frame1 value: {0}, frame2 value: \" \\\n \"{1}\".format(temp1[rowInd], temp2[rowInd], rowInd)\n else:\n assert temp1[rowInd][colInd]==temp2[rowInd][colInd], \"Failed frame values check at row {2} and column {3}! frame1 value: {0}, frame2 value: \" \\\n \"{1}\".format(temp1[rowInd][colInd], temp1[rowInd][colInd], rowInd, colInd)\n\n\ndef build_save_model_GLM(params, x, train, respName):\n # build a model\n model = H2OGeneralizedLinearEstimator(**params)\n model.train(x=x, y=respName, training_frame=train)\n # save model\n regex = re.compile(\"[+\\\\-* !@#$%^&()={}\\\\[\\\\]|;:'\\\"<>,.?/]\")\n MOJONAME = regex.sub(\"_\", model._id)\n\n print(\"Downloading Java prediction model code from H2O\")\n TMPDIR = os.path.normpath(os.path.join(os.path.dirname(os.path.realpath('__file__')), \"..\", \"results\", MOJONAME))\n os.makedirs(TMPDIR)\n model.download_mojo(path=TMPDIR) # save mojo\n return model\n\ndef build_save_model_GBM(params, x, train, respName):\n # build a model\n model = H2OGradientBoostingEstimator(**params)\n model.train(x=x, y=respName, training_frame=train)\n # save model\n regex = re.compile(\"[+\\\\-* !@#$%^&()={}\\\\[\\\\]|;:'\\\"<>,.?/]\")\n MOJONAME = regex.sub(\"_\", model._id)\n\n print(\"Downloading Java prediction model code from H2O\")\n TMPDIR = os.path.normpath(os.path.join(os.path.dirname(os.path.realpath('__file__')), \"..\", \"results\", MOJONAME))\n os.makedirs(TMPDIR)\n model.download_mojo(path=TMPDIR) # save mojo\n return model\n\ndef build_save_model_DRF(params, x, train, respName):\n # build a model\n model = H2ORandomForestEstimator(**params)\n model.train(x=x, y=respName, training_frame=train)\n # save model\n regex = re.compile(\"[+\\\\-* !@#$%^&()={}\\\\[\\\\]|;:'\\\"<>,.?/]\")\n MOJONAME = regex.sub(\"_\", model._id)\n\n print(\"Downloading Java prediction model code from H2O\")\n TMPDIR = os.path.normpath(os.path.join(os.path.dirname(os.path.realpath('__file__')), \"..\", \"results\", MOJONAME))\n os.makedirs(TMPDIR)\n model.download_mojo(path=TMPDIR) # save mojo\n return model\n\n\n# generate random dataset, copied from Pasha\ndef random_dataset(response_type, verbose=True, NTESTROWS=200, missing_fraction=0.0, seed=None):\n \"\"\"Create and return a random dataset.\"\"\"\n if verbose: print(\"\\nCreating a dataset for a %s problem:\" % response_type)\n fractions = {k + \"_fraction\": random.random() for k in \"real categorical integer time string binary\".split()}\n fractions[\"string_fraction\"] = 0 # Right now we are dropping string columns, so no point in having them.\n fractions[\"binary_fraction\"] /= 3\n fractions[\"time_fraction\"] /= 2\n\n sum_fractions = sum(fractions.values())\n for k in fractions:\n fractions[k] /= sum_fractions\n if response_type == 'binomial':\n response_factors = 2\n else:\n response_factors = random.randint(3, 10)\n df = h2o.create_frame(rows=random.randint(15000, 25000) + NTESTROWS, cols=random.randint(3, 20),\n missing_fraction=missing_fraction,\n has_response=True, response_factors=response_factors, positive_response=True, factors=10,\n seed=seed, **fractions)\n if verbose:\n print()\n df.show()\n return df\n\n# generate random dataset of ncolumns of Strings, copied from Pasha\ndef random_dataset_strings_only(nrow, ncol, seed=None):\n \"\"\"Create and return a random dataset.\"\"\"\n fractions = dict()\n fractions[\"real_fraction\"] = 0 # Right now we are dropping string columns, so no point in having them.\n fractions[\"categorical_fraction\"] = 0\n fractions[\"integer_fraction\"] = 0\n fractions[\"time_fraction\"] = 0\n fractions[\"string_fraction\"] = 1 # Right now we are dropping string columns, so no point in having them.\n fractions[\"binary_fraction\"] = 0\n return h2o.create_frame(rows=nrow, cols=ncol, missing_fraction=0, has_response=False, seed=seed, **fractions)\n\n# generate random dataset of ncolumns of enums only, copied from Pasha\ndef random_dataset_enums_only(nrow, ncol, factorL=10, misFrac=0.01, randSeed=None):\n \"\"\"Create and return a random dataset.\"\"\"\n fractions = dict()\n fractions[\"real_fraction\"] = 0 # Right now we are dropping string columns, so no point in having them.\n fractions[\"categorical_fraction\"] = 1\n fractions[\"integer_fraction\"] = 0\n fractions[\"time_fraction\"] = 0\n fractions[\"string_fraction\"] = 0 # Right now we are dropping string columns, so no point in having them.\n fractions[\"binary_fraction\"] = 0\n\n df = h2o.create_frame(rows=nrow, cols=ncol, missing_fraction=misFrac, has_response=False, factors=factorL,\n seed=randSeed, **fractions)\n return df\n\n# generate random dataset of ncolumns of enums only, copied from Pasha\ndef random_dataset_int_only(nrow, ncol, rangeR=10, misFrac=0.01, randSeed=None):\n \"\"\"Create and return a random dataset.\"\"\"\n fractions = dict()\n fractions[\"real_fraction\"] = 0 # Right now we are dropping string columns, so no point in having them.\n fractions[\"categorical_fraction\"] = 0\n fractions[\"integer_fraction\"] = 1\n fractions[\"time_fraction\"] = 0\n fractions[\"string_fraction\"] = 0 # Right now we are dropping string columns, so no point in having them.\n fractions[\"binary_fraction\"] = 0\n\n df = h2o.create_frame(rows=nrow, cols=ncol, missing_fraction=misFrac, has_response=False, integer_range=rangeR,\n seed=randSeed, **fractions)\n return df\n\n# generate random dataset of ncolumns of integer and reals, copied from Pasha\ndef random_dataset_numeric_only(nrow, ncol, integerR=100, misFrac=0.01, randSeed=None):\n \"\"\"Create and return a random dataset.\"\"\"\n fractions = dict()\n fractions[\"real_fraction\"] = 0.25 # Right now we are dropping string columns, so no point in having them.\n fractions[\"categorical_fraction\"] = 0\n fractions[\"integer_fraction\"] = 0.75\n fractions[\"time_fraction\"] = 0\n fractions[\"string_fraction\"] = 0 # Right now we are dropping string columns, so no point in having them.\n fractions[\"binary_fraction\"] = 0\n\n df = h2o.create_frame(rows=nrow, cols=ncol, missing_fraction=misFrac, has_response=False, integer_range=integerR,\n seed=randSeed, **fractions)\n return df\n\ndef getMojoName(modelID):\n regex = re.compile(\"[+\\\\-* !@#$%^&()={}\\\\[\\\\]|;:'\\\"<>,.?/]\")\n return regex.sub(\"_\", modelID)\n\n\ndef convertH2OFrameToDMatrix(h2oFrame, yresp, enumCols=[]):\n \"\"\"\n This method will convert a H2OFrame containing to a DMatrix that is can be used by native XGBoost. The\n H2OFrame can contain numerical and enum columns. Note that H2O one-hot-encoding introduces a missing(NA)\n column. There can be NAs in any columns.\n\n :param h2oFrame: H2OFrame to be converted to DMatrix\n :param yresp: string denoting the response column name\n :param enumCols: list of enum column names in the H2OFrame\n\n :return: DMatrix\n \"\"\"\n import xgboost as xgb\n\n pandas = __convertH2OFrameToPandas__(h2oFrame, yresp, enumCols);\n\n return xgb.DMatrix(data=pandas[0], label=pandas[1])\n\ndef convertH2OFrameToDMatrixSparse(h2oFrame, yresp, enumCols=[]):\n \"\"\"\n This method will convert a H2OFrame containing to a DMatrix that is can be used by native XGBoost. The\n H2OFrame can contain numerical and enum columns. Note that H2O one-hot-encoding introduces a missing(NA)\n column. There can be NAs in any columns.\n\n :param h2oFrame: H2OFrame to be converted to DMatrix\n :param yresp: string denoting the response column name\n :param enumCols: list of enum column names in the H2OFrame\n\n :return: DMatrix\n \"\"\"\n import xgboost as xgb\n\n pandas = __convertH2OFrameToPandas__(h2oFrame, yresp, enumCols);\n\n return xgb.DMatrix(data=csr_matrix(pandas[0]), label=pandas[1])\n\n\ndef __convertH2OFrameToPandas__(h2oFrame, yresp, enumCols=[]):\n \"\"\"\n This method will convert a H2OFrame containing to a DMatrix that is can be used by native XGBoost. The\n H2OFrame can contain numerical and enum columns. Note that H2O one-hot-encoding introduces a missing(NA)\n column. There can be NAs in any columns.\n\n :param h2oFrame: H2OFrame to be converted to DMatrix\n :param yresp: string denoting the response column name\n :param enumCols: list of enum column names in the H2OFrame\n\n :return: DMatrix\n \"\"\"\n import xgboost as xgb\n\n pandaFtrain = h2oFrame.as_data_frame(use_pandas=True, header=True)\n nrows = h2oFrame.nrow\n\n if len(enumCols) > 0: # start with first enum column\n pandaTrainPart = generatePandaEnumCols(pandaFtrain, enumCols[0], nrows)\n pandaFtrain.drop([enumCols[0]], axis=1, inplace=True)\n\n for colInd in range(1, len(enumCols)):\n cname=enumCols[colInd]\n ctemp = generatePandaEnumCols(pandaFtrain, cname, nrows)\n pandaTrainPart=pd.concat([pandaTrainPart, ctemp], axis=1)\n pandaFtrain.drop([cname], axis=1, inplace=True)\n\n pandaFtrain = pd.concat([pandaTrainPart, pandaFtrain], axis=1)\n\n c0= h2oFrame[yresp].asnumeric().as_data_frame(use_pandas=True, header=True)\n pandaFtrain.drop([yresp], axis=1, inplace=True)\n pandaF = pd.concat([c0, pandaFtrain], axis=1)\n pandaF.rename(columns={c0.columns[0]:yresp}, inplace=True)\n newX = list(pandaFtrain.columns.values)\n data = pandaF.as_matrix(newX)\n label = pandaF.as_matrix([yresp])\n\n return (data,label)\n\ndef generatePandaEnumCols(pandaFtrain, cname, nrows):\n \"\"\"\n For a H2O Enum column, we perform one-hot-encoding here and added one more column \"missing(NA)\" to it.\n\n :param pandaFtrain:\n :param cname:\n :param nrows:\n :return:\n \"\"\"\n cmissingNames=[cname+\".missing(NA)\"]\n tempnp = np.zeros((nrows,1), dtype=np.int)\n # check for nan and assign it correct value\n colVals = pandaFtrain[cname]\n for ind in range(nrows):\n try:\n float(colVals[ind])\n if math.isnan(colVals[ind]):\n tempnp[ind]=1\n except ValueError:\n pass\n zeroFrame = pd.DataFrame(tempnp)\n zeroFrame.columns=cmissingNames\n temp = pd.get_dummies(pandaFtrain[cname], prefix=cname, drop_first=False)\n tempNames = list(temp) # get column names\n colLength = len(tempNames)\n newNames = ['a']*colLength\n newIndics = [0]*colLength\n header = tempNames[0].split('.')[0]\n\n for ind in range(colLength):\n newIndics[ind] = int(tempNames[ind].split('.')[1][1:])\n newIndics.sort()\n\n for ind in range(colLength):\n newNames[ind] = header+'.l'+str(newIndics[ind]) # generate correct order of names\n ftemp = temp[newNames]\n ctemp = pd.concat([ftemp, zeroFrame], axis=1)\n return ctemp\n\ndef summarizeResult_binomial(h2oPredictD, nativePred, h2oTrainTimeD, nativeTrainTime, h2oPredictTimeD,\n nativeScoreTime, tolerance=1e-6):\n '''\n This method will summarize and compare H2OXGBoost and native XGBoost results for binomial classifiers.\n This method will summarize and compare H2OXGBoost and native XGBoost results for binomial classifiers.\n\n :param h2oPredictD:\n :param nativePred:\n :param h2oTrainTimeD:\n :param nativeTrainTime:\n :param h2oPredictTimeD:\n :param nativeScoreTime:\n :return:\n '''\n # Result comparison in terms of time\n print(\"H2OXGBoost train time is {0}s. Native XGBoost train time is {1}s.\\n H2OXGBoost scoring time is {2}s.\"\n \" Native XGBoost scoring time is {3}s.\".format(h2oTrainTimeD/1000.0, nativeTrainTime,\n h2oPredictTimeD, nativeScoreTime))\n # Result comparison in terms of actual prediction value between the two\n colnames = h2oPredictD.names\n h2oPredictD['predict'] = h2oPredictD['predict'].asnumeric()\n h2oPredictLocalD = h2oPredictD.as_data_frame(use_pandas=True, header=True)\n\n # compare prediction probability and they should agree if they use the same seed\n for ind in range(h2oPredictD.nrow):\n assert abs(h2oPredictLocalD[colnames[2]][ind]-nativePred[ind])<tolerance, \"H2O prediction prob: {0} and native \" \\\n \"XGBoost prediction prob: {1}. They are \" \\\n \"very different.\".format(h2oPredictLocalD[colnames[2]][ind], nativePred[ind])\n\ndef summarizeResult_multinomial(h2oPredictD, nativePred, h2oTrainTimeD, nativeTrainTime, h2oPredictTimeD,\n nativeScoreTime, tolerance=1e-6):\n # Result comparison in terms of time\n print(\"H2OXGBoost train time is {0}s. Native XGBoost train time is {1}s.\\n H2OGBoost scoring time is {2}s.\"\n \" Native XGBoost scoring time is {3}s.\".format(h2oTrainTimeD/1000.0, nativeTrainTime,\n h2oPredictTimeD, nativeScoreTime))\n # Result comparison in terms of actual prediction value between the two\n h2oPredictD['predict'] = h2oPredictD['predict'].asnumeric()\n h2oPredictLocalD = h2oPredictD.as_data_frame(use_pandas=True, header=True)\n nclass = len(nativePred[0])\n colnames = h2oPredictD.names\n\n # compare prediction probability and they should agree if they use the same seed\n for ind in range(h2oPredictD.nrow):\n for col in range(nclass):\n assert abs(h2oPredictLocalD[colnames[col+1]][ind]-nativePred[ind][col])<tolerance, \\\n \"H2O prediction prob: {0} and native XGBoost prediction prob: {1}. They are very \" \\\n \"different.\".format(h2oPredictLocalD[colnames[col+1]][ind], nativePred[ind][col])\n\ndef genTrainFrame(nrow, ncol, enumCols=0, enumFactors=2, responseLevel=2, miscfrac=0, randseed=None):\n if ncol>0:\n trainFrameNumerics = random_dataset_numeric_only(nrow, ncol, integerR = 1000000, misFrac=miscfrac, randSeed=randseed)\n if enumCols > 0:\n trainFrameEnums = random_dataset_enums_only(nrow, enumCols, factorL=enumFactors, misFrac=miscfrac, randSeed=randseed)\n\n yresponse = random_dataset_enums_only(nrow, 1, factorL=responseLevel, misFrac=0, randSeed=randseed)\n yresponse.set_name(0,'response')\n if enumCols > 0:\n if ncol > 0: # mixed datasets\n trainFrame = trainFrameEnums.cbind(trainFrameNumerics.cbind(yresponse))\n else: # contains enum datasets\n trainFrame = trainFrameEnums.cbind(yresponse)\n else: # contains numerical datasets\n trainFrame = trainFrameNumerics.cbind(yresponse)\n return trainFrame\n\ndef summarizeResult_regression(h2oPredictD, nativePred, h2oTrainTimeD, nativeTrainTime, h2oPredictTimeD, nativeScoreTime, tolerance=1e-6):\n # Result comparison in terms of time\n print(\"H2OXGBoost train time is {0}ms. Native XGBoost train time is {1}s.\\n H2OGBoost scoring time is {2}s.\"\n \" Native XGBoost scoring time is {3}s.\".format(h2oTrainTimeD, nativeTrainTime,\n h2oPredictTimeD, nativeScoreTime))\n # Result comparison in terms of actual prediction value between the two\n h2oPredictD['predict'] = h2oPredictD['predict'].asnumeric()\n h2oPredictLocalD = h2oPredictD.as_data_frame(use_pandas=True, header=True)\n\n\n # compare prediction probability and they should agree if they use the same seed\n for ind in range(h2oPredictD.nrow):\n assert abs((h2oPredictLocalD['predict'][ind]-nativePred[ind])/max(1, abs(h2oPredictLocalD['predict'][ind]), abs(nativePred[ind])))<tolerance, \\\n \"H2O prediction: {0} and native XGBoost prediction: {1}. They are very \" \\\n \"different.\".format(h2oPredictLocalD['predict'][ind], nativePred[ind])\n\ndef summarizeResult_binomial_DS(h2oPredictD, nativePred, h2oTrainTimeD, nativeTrainTime, h2oPredictTimeD,\n nativeScoreTime, h2oPredictS, tolerance=1e-6):\n # Result comparison in terms of time\n print(\"H2OXGBoost train time with sparse DMatrix is {0}s. Native XGBoost train time with dense DMtraix is {1}s.\\n H2OGBoost scoring time is {2}s.\"\n \" Native XGBoost scoring time with dense DMatrix is {3}s.\".format(h2oTrainTimeD/1000.0, nativeTrainTime,\n h2oPredictTimeD, nativeScoreTime))\n # Result comparison in terms of actual prediction value between the two\n h2oPredictD['predict'] = h2oPredictD['predict'].asnumeric()\n h2oPredictLocalD = h2oPredictD.as_data_frame(use_pandas=True, header=True)\n h2oPredictS['predict'] = h2oPredictS['predict'].asnumeric()\n h2oPredictLocalS = h2oPredictS.as_data_frame(use_pandas=True, header=True)\n\n # compare prediction probability and they should agree if they use the same seed\n for ind in range(h2oPredictD.nrow):\n assert abs(h2oPredictLocalD['c0.l1'][ind]-nativePred[ind])<tolerance or \\\n abs(h2oPredictLocalS['c0.l1'][ind]-nativePred[ind])<tolerance, \\\n \"H2O prediction prob: {0} and native XGBoost prediction prob: {1}. They are very \" \\\n \"different.\".format(h2oPredictLocalD['c0.l1'][ind], nativePred[ind])\n\n\ndef compare_weightedStats(model, dataframe, xlist, xname, weightV, pdpTDTable, tol=1e-6):\n '''\n This method is used to test the partial dependency plots and is not meant for any other functions.\n \n :param model:\n :param dataframe:\n :param xlist:\n :param xname:\n :param weightV:\n :param pdpTDTable:\n :param tol:\n :return:\n '''\n weightStat = manual_partial_dependence(model, dataframe, xlist, xname, weightV) # calculate theoretical weighted sts\n wMean = extract_col_value_H2OTwoDimTable(pdpTDTable, \"mean_response\") # stats for age predictor\n wStd = extract_col_value_H2OTwoDimTable(pdpTDTable, \"stddev_response\")\n wStdErr = extract_col_value_H2OTwoDimTable(pdpTDTable, \"std_error_mean_response\")\n equal_two_arrays(weightStat[0], wMean, tol, tol, throwError=True)\n equal_two_arrays(weightStat[1], wStd, tol, tol, throwError=True)\n equal_two_arrays(weightStat[2], wStdErr, tol, tol, throwError=True)\n\n\ndef manual_partial_dependence(model, dataframe, xlist, xname, weightV):\n meanV = []\n stdV = []\n stderrV = []\n nRows = dataframe.nrow\n nCols = dataframe.ncol-1\n\n for xval in xlist:\n cons = [xval]*nRows\n if xname in dataframe.names:\n dataframe=dataframe.drop(xname)\n if not((isinstance(xval, string_types) and xval=='NA') or (isinstance(xval, float) and math.isnan(xval))):\n dataframe = dataframe.cbind(h2o.H2OFrame(cons))\n dataframe.set_name(nCols, xname)\n\n pred = model.predict(dataframe).as_data_frame(use_pandas=False, header=False)\n pIndex = len(pred[0])-1\n sumEle = 0.0\n sumEleSq = 0.0\n sumWeight = 0.0\n numNonZeroWeightCount = 0.0\n m = 1.0/math.sqrt(dataframe.nrow*1.0)\n for rindex in range(len(pred)):\n val = float(pred[rindex][pIndex]);\n weight = float(weightV[rindex][0])\n if (abs(weight) > 0) and isinstance(val, float) and not(math.isnan(val)):\n temp = val*weight\n sumEle = sumEle+temp\n sumEleSq = sumEleSq+temp*val\n sumWeight = sumWeight+weight\n numNonZeroWeightCount = numNonZeroWeightCount+1\n wMean = sumEle/sumWeight\n scale = numNonZeroWeightCount*1.0/(numNonZeroWeightCount-1)\n wSTD = math.sqrt((sumEleSq/sumWeight-wMean*wMean)*scale)\n meanV.append(wMean)\n stdV.append(wSTD)\n stderrV.append(wSTD*m)\n\n return meanV, stdV, stderrV\n\ndef compare_frames_equal_names(frame1, frame2):\n '''\n This method will compare two frames with same column names and column types. The current accepted column\n types are enum, int and string.\n\n :param frame1:\n :param frame2:\n :return:\n '''\n cnames = frame1.names\n ctypes = frame1.types\n for cind in range(0, frame1.ncol):\n name1 = cnames[cind]\n type = str(ctypes[name1])\n\n if (type==\"enum\"):\n compare_frames_local_onecolumn_NA_enum(frame1[name1], frame2[name1], prob=1, tol=0)\n elif (type=='string'):\n compare_frames_local_onecolumn_NA_string(frame1[name1], frame2[name1], prob=1)\n else:\n compare_frames_local_onecolumn_NA(frame1[name1], frame2[name1], prob=1, tol=1e-10)\n"
] |
[
[
"numpy.asarray",
"numpy.kron",
"pandas.DataFrame",
"numpy.concatenate",
"numpy.any",
"numpy.exp",
"numpy.where",
"numpy.argmax",
"numpy.ravel",
"numpy.zeros",
"pandas.concat",
"numpy.isnan",
"scipy.sparse.csr_matrix",
"numpy.genfromtxt",
"numpy.random.random_integers",
"numpy.savetxt",
"numpy.array",
"numpy.sum",
"numpy.absolute",
"numpy.inner",
"numpy.random.standard_normal",
"numpy.linalg.norm",
"numpy.ones",
"numpy.sign",
"numpy.random.uniform",
"pandas.get_dummies"
]
] |
jasoriya/HackerEarth-DL-3-Challenge
|
[
"b1bd5b3955913327408541ef4b14c260b9014593"
] |
[
"src/data_prep.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 15 22:12:06 2018\n\n@author: Shreyans\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom keras import applications\nfrom keras.applications.resnet50 import preprocess_input\nfrom keras.preprocessing import image\nimport os\n\ntrain_data = pd.read_csv(\"../data/meta-data/train.csv\")\ntest_data = pd.read_csv(\"../data/meta-data/test.csv\")\n\ndef named_model(name):\n # include_top=False removes the fully connected layer at the end/top of the network\n # This allows us to get the feature vector as opposed to a classification\n if name == 'Xception':\n return applications.xception.Xception(weights='imagenet', include_top=False, pooling='avg')\n\n if name == 'VGG16':\n return applications.vgg16.VGG16(weights='imagenet', include_top=False, pooling='avg')\n\n if name == 'VGG19':\n return applications.vgg19.VGG19(weights='imagenet', include_top=False, pooling='avg')\n\n if name == 'InceptionV3':\n return applications.inception_v3.InceptionV3(weights='imagenet', include_top=False, pooling='avg')\n\n if name == 'MobileNet':\n return applications.mobilenet.MobileNet(weights='imagenet', include_top=False, pooling='avg')\n\n return applications.resnet50.ResNet50(weights='imagenet', include_top=False, pooling='avg')\n\ndef image_vectors(data, model):\n source_dir = os.path.dirname(os.getcwd())\n arr = []\n for i in range(len(data)):\n print(\"Extracting feature data of Image-\" + str(i) + \".jpg\")\n img_path = os.path.join(source_dir, 'data/train_img/' + data.iloc[i])\n img = image.load_img(img_path, target_size=(224, 224))\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n \n x = preprocess_input(x)\n \n features = model.predict(x)[0]\n features_arr = np.char.mod('%f', features)\n arr.append(features_arr)\n return arr\n\nmodel = named_model(\"InceptionV3\")\nfeatures_data = image_vectors(train_data.Image_name, model)\n#np.save(\"../data/train_features\",features_data)\nfeature_test = image_vectors(test_data.Image_name, model)\nnp.save(\"../data/test_features.npy\", feature_test)\n\n\n\n"
] |
[
[
"numpy.char.mod",
"pandas.read_csv",
"numpy.expand_dims",
"numpy.save"
]
] |
safwanhossain/fairlearn
|
[
"87de12b6f3036fc61efdbf0b8918470064e5b783"
] |
[
"test/unit/metrics/test_create_group_metric_set.py"
] |
[
"# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport numpy as np\nimport pytest\n\nfrom fairlearn.metrics import group_accuracy_score, group_roc_auc_score\nfrom fairlearn.metrics import create_group_metric_set\n\nfrom test.unit.input_convertors import conversions_for_1d\n\n\ndef test_bad_model_type():\n with pytest.raises(ValueError) as exception_context:\n create_group_metric_set(\"Something Random\", None, None, None)\n expected = \"model_type 'Something Random' not in ['binary_classification', 'regression']\"\n assert exception_context.value.args[0] == expected\n\n\ndef test_smoke():\n # Single model, single sensitive feature vector, no names\n Y_true = [0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0]\n Y_pred = [[0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1]]\n sensitive_feature = [['a', 'b', 'b', 'a', 'b', 'b', 'b', 'a', 'b', 'b', 'b']]\n sf_int = [int(x == 'b') for x in sensitive_feature[0]]\n\n result = create_group_metric_set('binary_classification', Y_true, Y_pred, sensitive_feature)\n assert result['predictionType'] == 'binaryClassification'\n assert result['schemaType'] == 'groupMetricSet'\n assert result['schemaVersion'] == 0\n\n assert isinstance(result['trueY'], list)\n assert np.array_equal(result['trueY'], Y_true)\n\n assert isinstance(result['precomputedFeatureBins'], list)\n assert len(result['precomputedFeatureBins']) == 1\n bin_dict = result['precomputedFeatureBins'][0]\n assert isinstance(bin_dict, dict)\n assert np.array_equal(bin_dict['binVector'], sf_int)\n assert np.array_equal(bin_dict['binLabels'], ['a', 'b'])\n\n assert isinstance(result['predictedY'], list)\n assert len(result['predictedY']) == 1\n y_p = result['predictedY'][0]\n assert isinstance(y_p, list)\n assert np.array_equal(y_p, Y_pred[0])\n\n assert isinstance(result['precomputedMetrics'], list)\n assert len(result['precomputedMetrics']) == 1\n metrics_group_0 = result['precomputedMetrics'][0]\n assert isinstance(metrics_group_0, list)\n assert len(metrics_group_0) == 1\n metrics_g0_m0 = metrics_group_0[0]\n assert isinstance(metrics_g0_m0, dict)\n assert len(metrics_g0_m0) == 10\n\n accuracy = metrics_g0_m0['accuracy_score']\n assert isinstance(accuracy, dict)\n gmr = group_accuracy_score(Y_true, Y_pred[0], sensitive_feature[0])\n assert gmr.overall == pytest.approx(accuracy['global'])\n assert isinstance(accuracy['bins'], list)\n assert len(accuracy['bins']) == 2\n assert gmr.by_group['a'] == pytest.approx(accuracy['bins'][0])\n assert gmr.by_group['b'] == pytest.approx(accuracy['bins'][1])\n\n roc_auc = metrics_g0_m0['balanced_accuracy_score']\n assert isinstance(roc_auc, dict)\n gmr = group_roc_auc_score(Y_true, Y_pred[0], sensitive_feature[0])\n assert gmr.overall == pytest.approx(roc_auc['global'])\n assert isinstance(roc_auc['bins'], list)\n assert len(roc_auc['bins']) == 2\n assert gmr.by_group['a'] == pytest.approx(roc_auc['bins'][0])\n assert gmr.by_group['b'] == pytest.approx(roc_auc['bins'][1])\n\n\ndef test_two_models():\n # Two models, single sensitive feature vector, no names\n Y_true = [0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1]\n Y_pred = [[0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1],\n [1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0]]\n a, b = 'a', 'b'\n sensitive_features = [[b, a, a, b, b, a, a, b, b, a, b, a, b, a, b]]\n sf_int = [int(x == 'b') for x in sensitive_features[0]]\n\n result = create_group_metric_set('binary_classification', Y_true, Y_pred, sensitive_features)\n assert result['predictionType'] == 'binaryClassification'\n assert result['schemaType'] == 'groupMetricSet'\n assert result['schemaVersion'] == 0\n\n assert isinstance(result['trueY'], list)\n assert np.array_equal(result['trueY'], Y_true)\n\n assert isinstance(result['precomputedFeatureBins'], list)\n assert len(result['precomputedFeatureBins']) == 1\n bin_dict = result['precomputedFeatureBins'][0]\n assert isinstance(bin_dict, dict)\n assert np.array_equal(bin_dict['binVector'], sf_int)\n assert np.array_equal(bin_dict['binLabels'], ['a', 'b'])\n\n assert isinstance(result['predictedY'], list)\n assert len(result['predictedY']) == 2\n for i in range(2):\n y_p = result['predictedY'][i]\n assert isinstance(y_p, list)\n assert np.array_equal(y_p, Y_pred[i])\n\n assert isinstance(result['precomputedMetrics'], list)\n assert len(result['precomputedMetrics']) == 1\n metrics_group_0 = result['precomputedMetrics'][0]\n assert isinstance(metrics_group_0, list)\n assert len(metrics_group_0) == 2\n for i in range(2):\n metrics_g0_m0 = metrics_group_0[i]\n assert isinstance(metrics_g0_m0, dict)\n assert len(metrics_g0_m0) == 10\n\n accuracy = metrics_g0_m0['accuracy_score']\n assert isinstance(accuracy, dict)\n gmr = group_accuracy_score(Y_true, Y_pred[i], sensitive_features[0])\n assert gmr.overall == pytest.approx(accuracy['global'])\n assert isinstance(accuracy['bins'], list)\n assert len(accuracy['bins']) == 2\n assert gmr.by_group['a'] == pytest.approx(accuracy['bins'][0])\n assert gmr.by_group['b'] == pytest.approx(accuracy['bins'][1])\n\n roc_auc = metrics_g0_m0['balanced_accuracy_score']\n assert isinstance(roc_auc, dict)\n gmr = group_roc_auc_score(Y_true, Y_pred[i], sensitive_features[0])\n assert gmr.overall == pytest.approx(roc_auc['global'])\n assert isinstance(roc_auc['bins'], list)\n assert len(roc_auc['bins']) == 2\n assert gmr.by_group['a'] == pytest.approx(roc_auc['bins'][0])\n assert gmr.by_group['b'] == pytest.approx(roc_auc['bins'][1])\n\n\ndef test_two_sensitive_features():\n # Single model, two sensitive feature vectors, no names\n Y_true = [0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0]\n Y_pred = [[0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1]]\n # First sensitive feature is just 'a' and 'b'. Second is 4, 5 and 6\n sensitive_features = [['b', 'a', 'a', 'a', 'b', 'b', 'b', 'a', 'b', 'b', 'b'],\n [4, 5, 6, 6, 5, 4, 4, 5, 5, 6, 6]]\n sf_int = [int(x == 'b') for x in sensitive_features[0]]\n\n result = create_group_metric_set('binary_classification', Y_true, Y_pred, sensitive_features)\n assert result['predictionType'] == 'binaryClassification'\n assert result['schemaType'] == 'groupMetricSet'\n assert result['schemaVersion'] == 0\n\n assert isinstance(result['trueY'], list)\n assert np.array_equal(result['trueY'], Y_true)\n\n assert isinstance(result['precomputedFeatureBins'], list)\n assert len(result['precomputedFeatureBins']) == 2\n bin_dict0 = result['precomputedFeatureBins'][0]\n assert isinstance(bin_dict0, dict)\n assert np.array_equal(bin_dict0['binVector'], sf_int)\n assert np.array_equal(bin_dict0['binLabels'], ['a', 'b'])\n bin_dict1 = result['precomputedFeatureBins'][1]\n assert isinstance(bin_dict1, dict)\n assert np.array_equal(bin_dict1['binVector'], [x-4 for x in sensitive_features[1]])\n assert np.array_equal(bin_dict1['binLabels'], ['4', '5', '6'])\n\n assert isinstance(result['predictedY'], list)\n assert len(result['predictedY']) == 1\n y_p = result['predictedY'][0]\n assert isinstance(y_p, list)\n assert np.array_equal(y_p, Y_pred[0])\n\n assert isinstance(result['precomputedMetrics'], list)\n assert len(result['precomputedMetrics']) == 2\n\n # Check the first grouping (with alphabetical labels)\n metrics_group_0 = result['precomputedMetrics'][0]\n assert isinstance(metrics_group_0, list)\n assert len(metrics_group_0) == 1\n metrics_g0_m0 = metrics_group_0[0]\n assert isinstance(metrics_g0_m0, dict)\n assert len(metrics_g0_m0) == 10\n\n accuracy = metrics_g0_m0['accuracy_score']\n assert isinstance(accuracy, dict)\n gmr = group_accuracy_score(Y_true, Y_pred[0], sensitive_features[0])\n assert gmr.overall == pytest.approx(accuracy['global'])\n assert isinstance(accuracy['bins'], list)\n assert len(accuracy['bins']) == 2\n assert gmr.by_group['a'] == pytest.approx(accuracy['bins'][0])\n assert gmr.by_group['b'] == pytest.approx(accuracy['bins'][1])\n\n roc_auc = metrics_g0_m0['balanced_accuracy_score']\n assert isinstance(roc_auc, dict)\n gmr = group_roc_auc_score(Y_true, Y_pred[0], sensitive_features[0])\n assert gmr.overall == pytest.approx(roc_auc['global'])\n assert isinstance(roc_auc['bins'], list)\n assert len(roc_auc['bins']) == 2\n assert gmr.by_group['a'] == pytest.approx(roc_auc['bins'][0])\n assert gmr.by_group['b'] == pytest.approx(roc_auc['bins'][1])\n\n # Check the second grouping (three unique numeric labels)\n metrics_group_1 = result['precomputedMetrics'][1]\n assert isinstance(metrics_group_1, list)\n assert len(metrics_group_1) == 1\n metrics_g1_m0 = metrics_group_1[0]\n assert isinstance(metrics_g1_m0, dict)\n assert len(metrics_g1_m0) == 10\n\n accuracy = metrics_g1_m0['accuracy_score']\n assert isinstance(accuracy, dict)\n gmr = group_accuracy_score(Y_true, Y_pred[0], sensitive_features[1])\n assert gmr.overall == pytest.approx(accuracy['global'])\n assert isinstance(accuracy['bins'], list)\n assert len(accuracy['bins']) == 3\n for i in range(3):\n assert gmr.by_group[i+4] == pytest.approx(accuracy['bins'][i])\n\n roc_auc = metrics_g1_m0['balanced_accuracy_score']\n assert isinstance(roc_auc, dict)\n gmr = group_roc_auc_score(Y_true, Y_pred[0], sensitive_features[1])\n assert gmr.overall == pytest.approx(roc_auc['global'])\n assert isinstance(roc_auc['bins'], list)\n assert len(roc_auc['bins']) == 3\n for i in range(3):\n assert gmr.by_group[i+4] == pytest.approx(roc_auc['bins'][i])\n\n\ndef test_two_named_sensitive_features():\n # Single model, two sensitive feature vectors, no names\n Y_true = [0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0]\n Y_pred = [[0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1]]\n # First sensitive feature is just 'a' and 'b'. Second is 4, 5 and 6\n sensitive_features = [['a', 'b', 'b', 'a', 'b', 'b', 'b', 'a', 'b', 'b', 'b'],\n [4, 5, 6, 6, 5, 4, 4, 5, 5, 6, 6]]\n sf_int = [int(x == 'b') for x in sensitive_features[0]]\n sensitive_feature_titles = ['alpha', 'num']\n\n result = create_group_metric_set('binary_classification',\n Y_true, Y_pred, sensitive_features,\n sensitive_feature_names=sensitive_feature_titles)\n assert result['predictionType'] == 'binaryClassification'\n assert result['schemaType'] == 'groupMetricSet'\n assert result['schemaVersion'] == 0\n\n assert isinstance(result['trueY'], list)\n assert np.array_equal(result['trueY'], Y_true)\n\n assert isinstance(result['precomputedFeatureBins'], list)\n assert len(result['precomputedFeatureBins']) == 2\n bin_dict0 = result['precomputedFeatureBins'][0]\n assert isinstance(bin_dict0, dict)\n assert np.array_equal(bin_dict0['binVector'], sf_int)\n assert np.array_equal(bin_dict0['binLabels'], ['a', 'b'])\n assert sensitive_feature_titles[0] == bin_dict0['featureBinName']\n bin_dict1 = result['precomputedFeatureBins'][1]\n assert isinstance(bin_dict1, dict)\n assert np.array_equal(bin_dict1['binVector'], [x-4 for x in sensitive_features[1]])\n assert np.array_equal(bin_dict1['binLabels'], ['4', '5', '6'])\n assert sensitive_feature_titles[1] == bin_dict1['featureBinName']\n\n\ndef test_two_named_models():\n # Two models, single sensitive feature vector, no names\n Y_true = [0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0]\n Y_pred = [[0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1],\n [1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0]]\n sensitive_features = [['b', 'a', 'a', 'b', 'b', 'b', 'b', 'a', 'b', 'b', 'b']]\n sf_int = [int(x == 'b') for x in sensitive_features[0]]\n model_names = ['firstModel', 'secondModel']\n\n result = create_group_metric_set('binary_classification',\n Y_true, Y_pred, sensitive_features,\n model_titles=model_names)\n assert result['predictionType'] == 'binaryClassification'\n assert result['schemaType'] == 'groupMetricSet'\n assert result['schemaVersion'] == 0\n\n assert isinstance(result['trueY'], list)\n assert np.array_equal(result['trueY'], Y_true)\n\n assert isinstance(result['precomputedFeatureBins'], list)\n assert len(result['precomputedFeatureBins']) == 1\n bin_dict = result['precomputedFeatureBins'][0]\n assert isinstance(bin_dict, dict)\n assert np.array_equal(bin_dict['binVector'], sf_int)\n assert np.array_equal(bin_dict['binLabels'], ['a', 'b'])\n\n assert isinstance(result['modelNames'], list)\n assert np.array_equal(result['modelNames'], model_names)\n\n\ndef test_multiple_models_multiple_sensitive_features():\n # Three models, two sensitive feature vectors, no names\n Y_true = [0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0]\n Y_pred = [[0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1],\n [1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1],\n [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0]]\n # First group is just 'a' and 'b'. Second is 4, 5 and 6\n sensitive_features = [['a', 'b', 'b', 'a', 'b', 'b', 'b', 'a', 'b', 'b', 'b'],\n [4, 5, 6, 6, 5, 4, 4, 5, 5, 6, 6]]\n sf_int = [int(x == 'b') for x in sensitive_features[0]]\n\n result = create_group_metric_set('binary_classification', Y_true, Y_pred, sensitive_features)\n assert result['predictionType'] == 'binaryClassification'\n assert result['schemaType'] == 'groupMetricSet'\n assert result['schemaVersion'] == 0\n\n assert isinstance(result['trueY'], list)\n assert np.array_equal(result['trueY'], Y_true)\n\n assert isinstance(result['precomputedFeatureBins'], list)\n assert len(result['precomputedFeatureBins']) == 2\n bin_dict0 = result['precomputedFeatureBins'][0]\n assert isinstance(bin_dict0, dict)\n assert np.array_equal(bin_dict0['binVector'], sf_int)\n assert np.array_equal(bin_dict0['binLabels'], ['a', 'b'])\n bin_dict1 = result['precomputedFeatureBins'][1]\n assert isinstance(bin_dict1, dict)\n assert np.array_equal(bin_dict1['binVector'], [x-4 for x in sensitive_features[1]])\n assert np.array_equal(bin_dict1['binLabels'], ['4', '5', '6'])\n\n assert isinstance(result['predictedY'], list)\n assert len(result['predictedY']) == 3\n for i in range(3):\n y_p = result['predictedY'][i]\n assert isinstance(y_p, list)\n assert np.array_equal(y_p, Y_pred[i])\n\n assert isinstance(result['precomputedMetrics'], list)\n assert len(result['precomputedMetrics']) == 2\n\n # Check the first grouping (with alphabetical labels)\n metrics_group_0 = result['precomputedMetrics'][0]\n assert isinstance(metrics_group_0, list)\n assert len(metrics_group_0) == 3\n # Loop over the models\n for i in range(3):\n m_g0 = metrics_group_0[i]\n assert isinstance(m_g0, dict)\n assert len(m_g0) == 10\n\n accuracy = m_g0['accuracy_score']\n assert isinstance(accuracy, dict)\n gmr = group_accuracy_score(Y_true, Y_pred[i], sensitive_features[0])\n assert gmr.overall == pytest.approx(accuracy['global'])\n assert isinstance(accuracy['bins'], list)\n assert len(accuracy['bins']) == 2\n assert gmr.by_group['a'] == pytest.approx(accuracy['bins'][0])\n assert gmr.by_group['b'] == pytest.approx(accuracy['bins'][1])\n\n roc_auc = m_g0['balanced_accuracy_score']\n assert isinstance(roc_auc, dict)\n gmr = group_roc_auc_score(Y_true, Y_pred[i], sensitive_features[0])\n assert gmr.overall == pytest.approx(roc_auc['global'])\n assert isinstance(roc_auc['bins'], list)\n assert len(roc_auc['bins']) == 2\n assert gmr.by_group['a'] == pytest.approx(roc_auc['bins'][0])\n assert gmr.by_group['b'] == pytest.approx(roc_auc['bins'][1])\n\n # Check the second grouping (three unique numeric labels)\n metrics_group_1 = result['precomputedMetrics'][1]\n assert isinstance(metrics_group_1, list)\n assert len(metrics_group_1) == 3\n # Loop over the models\n for i in range(3):\n m_g1 = metrics_group_1[i]\n assert isinstance(m_g1, dict)\n assert len(m_g1) == 10\n accuracy = m_g1['accuracy_score']\n assert isinstance(accuracy, dict)\n gmr = group_accuracy_score(Y_true, Y_pred[i], sensitive_features[1])\n assert gmr.overall == pytest.approx(accuracy['global'])\n assert isinstance(accuracy['bins'], list)\n assert len(accuracy['bins']) == 3\n # Use the fact that the groups are integers\n for j in range(3):\n assert gmr.by_group[j+4] == pytest.approx(accuracy['bins'][j])\n\n roc_auc = m_g1['balanced_accuracy_score']\n assert isinstance(roc_auc, dict)\n gmr = group_roc_auc_score(Y_true, Y_pred[i], sensitive_features[1])\n assert gmr.overall == pytest.approx(roc_auc['global'])\n assert isinstance(roc_auc['bins'], list)\n assert len(roc_auc['bins']) == 3\n for i in range(3):\n assert gmr.by_group[i+4] == pytest.approx(roc_auc['bins'][i])\n\n\[email protected](\"transform_y_true\", conversions_for_1d)\[email protected](\"transform_y_pred1\", conversions_for_1d)\[email protected](\"transform_group_1\", conversions_for_1d)\ndef test_argument_types(transform_y_true,\n transform_y_pred1,\n transform_group_1):\n # Three models, two groups, no names\n Y_true = transform_y_true([0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0])\n Y_pred = [[0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1],\n transform_y_pred1([1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1]),\n [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0]]\n g = [[0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1],\n [4, 5, 6, 6, 5, 4, 4, 5, 5, 6, 6]]\n Groups = [g[0],\n transform_group_1(g[1])]\n\n result = create_group_metric_set('binary_classification', Y_true, Y_pred, Groups)\n assert result['predictionType'] == 'binaryClassification'\n assert result['schemaType'] == 'groupMetricSet'\n assert result['schemaVersion'] == 0\n\n assert isinstance(result['trueY'], list)\n assert np.array_equal(result['trueY'], Y_true)\n\n assert isinstance(result['precomputedFeatureBins'], list)\n assert len(result['precomputedFeatureBins']) == 2\n bin_dict0 = result['precomputedFeatureBins'][0]\n assert isinstance(bin_dict0, dict)\n assert np.array_equal(bin_dict0['binVector'], g[0])\n assert np.array_equal(bin_dict0['binLabels'], ['0', '1'])\n bin_dict1 = result['precomputedFeatureBins'][1]\n assert isinstance(bin_dict1, dict)\n assert np.array_equal(bin_dict1['binVector'], [x-4 for x in g[1]])\n assert np.array_equal(bin_dict1['binLabels'], ['4', '5', '6'])\n\n assert isinstance(result['predictedY'], list)\n assert len(result['predictedY']) == 3\n for i in range(3):\n y_p = result['predictedY'][i]\n assert isinstance(y_p, list)\n assert np.array_equal(y_p, Y_pred[i])\n\n assert isinstance(result['precomputedMetrics'], list)\n assert len(result['precomputedMetrics']) == 2\n\n # Check the first grouping (with alphabetical labels)\n metrics_group_0 = result['precomputedMetrics'][0]\n assert isinstance(metrics_group_0, list)\n assert len(metrics_group_0) == 3\n # Loop over the models\n for i in range(3):\n m_g0 = metrics_group_0[i]\n assert isinstance(m_g0, dict)\n assert len(m_g0) == 10\n accuracy = m_g0['accuracy_score']\n assert isinstance(accuracy, dict)\n gmr = group_accuracy_score(Y_true, Y_pred[i], Groups[0])\n assert gmr.overall == pytest.approx(accuracy['global'])\n assert isinstance(accuracy['bins'], list)\n assert len(accuracy['bins']) == 2\n assert gmr.by_group[0] == pytest.approx(accuracy['bins'][0])\n assert gmr.by_group[1] == pytest.approx(accuracy['bins'][1])\n\n # Check the second grouping (three unique numeric labels)\n metrics_group_1 = result['precomputedMetrics'][1]\n assert isinstance(metrics_group_1, list)\n assert len(metrics_group_1) == 3\n # Loop over the models\n for i in range(3):\n m_g1 = metrics_group_1[i]\n assert isinstance(m_g1, dict)\n assert len(m_g1) == 10\n accuracy = m_g1['accuracy_score']\n assert isinstance(accuracy, dict)\n gmr = group_accuracy_score(Y_true, Y_pred[i], Groups[1])\n assert gmr.overall == pytest.approx(accuracy['global'])\n assert isinstance(accuracy['bins'], list)\n assert len(accuracy['bins']) == 3\n # Use the fact that the groups are integers\n for j in range(3):\n assert gmr.by_group[j+4] == pytest.approx(accuracy['bins'][j])\n"
] |
[
[
"numpy.array_equal"
]
] |
Alessi0X/Graph_Sampling
|
[
"b906c35314ddfecca0132092a21d21ca8542073b"
] |
[
"build/lib/Graph_Sampling/SRW_RWF_ISRW.py"
] |
[
"import random\nimport time\nimport datetime\nimport io\nimport array,re,itertools\nimport numpy as np\nimport networkx as nx\nimport matplotlib.pyplot as plt\nfrom itertools import groupby\n\nclass SRW_RWF_ISRW:\n\n def __init__(self):\n self.growth_size = 2\n self.T = 100 #number of iterations\n #with a probability (1-fly_back_prob) select a neighbor node\n #with a probability fly_back_prob go back to the initial vertex\n self.fly_back_prob = 0.15\n\n def random_walk_sampling_simple(self,complete_graph, nodes_to_sample):\n complete_graph = nx.convert_node_labels_to_integers(complete_graph, 0, 'default', True)\n # giving unique id to every node same as built-in function id\n for n, data in complete_graph.nodes(data=True):\n complete_graph.node[n]['id'] = n\n\n nr_nodes = len(complete_graph.nodes())\n upper_bound_nr_nodes_to_sample = nodes_to_sample\n index_of_first_random_node = random.randint(0, nr_nodes-1)\n sampled_graph = nx.Graph()\n\n sampled_graph.add_node(complete_graph.node[index_of_first_random_node]['id'])\n\n iteration = 1\n edges_before_t_iter = 0\n curr_node = index_of_first_random_node\n while sampled_graph.number_of_nodes() != upper_bound_nr_nodes_to_sample:\n edges = [n for n in complete_graph.neighbors(curr_node)]\n index_of_edge = random.randint(0, len(edges) - 1)\n chosen_node = edges[index_of_edge]\n sampled_graph.add_node(chosen_node)\n sampled_graph.add_edge(curr_node, chosen_node)\n curr_node = chosen_node\n iteration = iteration+1\n\n if iteration % self.T == 0:\n if ((sampled_graph.number_of_edges() - edges_before_t_iter) < self.growth_size):\n curr_node = random.randint(0, nr_nodes-1)\n edges_before_t_iter = sampled_graph.number_of_edges()\n return sampled_graph\n\n def random_walk_sampling_with_fly_back(self,complete_graph, nodes_to_sample, fly_back_prob):\n complete_graph = nx.convert_node_labels_to_integers(complete_graph, 0, 'default', True)\n # giving unique id to every node same as built-in function id\n for n, data in complete_graph.nodes(data=True):\n complete_graph.node[n]['id'] = n\n\n nr_nodes = len(complete_graph.nodes())\n upper_bound_nr_nodes_to_sample = nodes_to_sample\n\n index_of_first_random_node = random.randint(0, nr_nodes-1)\n sampled_graph = nx.Graph()\n\n sampled_graph.add_node(complete_graph.node[index_of_first_random_node]['id'])\n\n iteration = 1\n edges_before_t_iter = 0\n curr_node = index_of_first_random_node\n while sampled_graph.number_of_nodes() != upper_bound_nr_nodes_to_sample:\n edges = [n for n in complete_graph.neighbors(curr_node)]\n index_of_edge = random.randint(0, len(edges) - 1)\n chosen_node = edges[index_of_edge]\n sampled_graph.add_node(chosen_node)\n sampled_graph.add_edge(curr_node, chosen_node)\n choice = np.random.choice(['prev','neigh'], 1, p=[fly_back_prob,1-fly_back_prob])\n if choice == 'neigh':\n curr_node = chosen_node\n iteration=iteration+1\n\n if iteration % self.T == 0:\n if ((sampled_graph.number_of_edges() - edges_before_t_iter) < self.growth_size):\n curr_node = random.randint(0, nr_nodes-1)\n print (\"Choosing another random node to continue random walk \")\n edges_before_t_iter = sampled_graph.number_of_edges()\n\n return sampled_graph\n\n def random_walk_induced_graph_sampling(self, complete_graph, nodes_to_sample):\n complete_graph = nx.convert_node_labels_to_integers(complete_graph, 0, 'default', True)\n # giving unique id to every node same as built-in function id\n for n, data in complete_graph.nodes(data=True):\n complete_graph.node[n]['id'] = n\n \n nr_nodes = len(complete_graph.nodes())\n upper_bound_nr_nodes_to_sample = nodes_to_sample\n index_of_first_random_node = random.randint(0, nr_nodes - 1)\n\n Sampled_nodes = set([complete_graph.node[index_of_first_random_node]['id']])\n\n iteration = 1\n nodes_before_t_iter = 0\n curr_node = index_of_first_random_node\n while len(Sampled_nodes) != upper_bound_nr_nodes_to_sample:\n edges = [n for n in complete_graph.neighbors(curr_node)]\n index_of_edge = random.randint(0, len(edges) - 1)\n chosen_node = edges[index_of_edge]\n Sampled_nodes.add(complete_graph.node[chosen_node]['id'])\n curr_node = chosen_node\n iteration=iteration+1\n\n if iteration % self.T == 0:\n if ((len(Sampled_nodes) - nodes_before_t_iter) < self.growth_size):\n curr_node = random.randint(0, nr_nodes - 1)\n nodes_before_t_iter = len(Sampled_nodes)\n\n sampled_graph = complete_graph.subgraph(Sampled_nodes)\n\n return sampled_graph\n"
] |
[
[
"numpy.random.choice"
]
] |
BaldrLector/NeuralTracking
|
[
"ddf6a629937bc226b35928bea2f158aef833ed72"
] |
[
"render/camera.py"
] |
[
"'''\nMIT License\n\nCopyright (c) 2019 Shunsuke Saito, Zeng Huang, and Ryota Natsume\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n'''\nimport cv2\nimport numpy as np\n\nfrom .glm import ortho\n\n\nclass Camera:\n def __init__(self, width=1600, height=1200):\n # Focal Length\n # equivalent 50mm\n focal = np.sqrt(width * width + height * height)\n self.focal_x = focal\n self.focal_y = focal\n # Principal Point Offset\n self.principal_x = width / 2\n self.principal_y = height / 2\n # Axis Skew\n self.skew = 0\n # Image Size\n self.width = width\n self.height = height\n\n self.near = 1\n self.far = 10\n\n # Camera Center\n self.eye = np.array([0, 0, 1])\n self.center = np.array([0, 0, 0])\n self.direction = np.array([0, 0, -1])\n self.right = np.array([1, 0, 0])\n self.up = np.array([0, 1, 0])\n\n self.ortho_ratio = None\n\n def sanity_check(self):\n self.center = self.center.reshape([-1])\n self.direction = self.direction.reshape([-1])\n self.right = self.right.reshape([-1])\n self.up = self.up.reshape([-1])\n\n assert len(self.center) == 3\n assert len(self.direction) == 3\n assert len(self.right) == 3\n assert len(self.up) == 3\n\n @staticmethod\n def normalize_vector(v):\n v_norm = np.linalg.norm(v)\n return v if v_norm == 0 else v / v_norm\n\n def get_real_z_value(self, z):\n z_near = self.near\n z_far = self.far\n z_n = 2.0 * z - 1.0\n z_e = 2.0 * z_near * z_far / (z_far + z_near - z_n * (z_far - z_near))\n return z_e\n\n def get_rotation_matrix(self):\n rot_mat = np.eye(3)\n d = self.eye - self.center\n d = -self.normalize_vector(d)\n u = self.up\n self.right = -np.cross(u, d)\n u = np.cross(d, self.right)\n rot_mat[0, :] = self.right\n rot_mat[1, :] = u\n rot_mat[2, :] = d\n\n # s = self.right\n # s = self.normalize_vector(s)\n # rot_mat[0, :] = s\n # u = self.up\n # u = self.normalize_vector(u)\n # rot_mat[1, :] = -u\n # rot_mat[2, :] = self.normalize_vector(self.direction)\n\n return rot_mat\n\n def get_translation_vector(self):\n rot_mat = self.get_rotation_matrix()\n trans = -np.dot(rot_mat.T, self.eye)\n return trans\n\n def get_intrinsic_matrix(self):\n int_mat = np.eye(3)\n\n int_mat[0, 0] = self.focal_x\n int_mat[1, 1] = self.focal_y\n int_mat[0, 1] = self.skew\n int_mat[0, 2] = self.principal_x\n int_mat[1, 2] = self.principal_y\n\n return int_mat\n\n def get_projection_matrix(self):\n ext_mat = self.get_extrinsic_matrix()\n int_mat = self.get_intrinsic_matrix()\n\n return np.matmul(int_mat, ext_mat)\n\n def get_extrinsic_matrix(self):\n rot_mat = self.get_rotation_matrix()\n int_mat = self.get_intrinsic_matrix()\n trans = self.get_translation_vector()\n\n extrinsic = np.eye(4)\n extrinsic[:3, :3] = rot_mat\n extrinsic[:3, 3] = trans\n\n return extrinsic[:3, :]\n\n def set_rotation_matrix(self, rot_mat):\n self.direction = rot_mat[2, :]\n self.up = -rot_mat[1, :]\n self.right = rot_mat[0, :]\n\n def set_intrinsic_matrix(self, int_mat):\n self.focal_x = int_mat[0, 0]\n self.focal_y = int_mat[1, 1]\n self.skew = int_mat[0, 1]\n self.principal_x = int_mat[0, 2]\n self.principal_y = int_mat[1, 2]\n\n def set_projection_matrix(self, proj_mat):\n res = cv2.decomposeProjectionMatrix(proj_mat)\n int_mat, rot_mat, camera_center_homo = res[0], res[1], res[2]\n camera_center = camera_center_homo[0:3] / camera_center_homo[3]\n camera_center = camera_center.reshape(-1)\n int_mat = int_mat / int_mat[2][2]\n\n self.set_intrinsic_matrix(int_mat)\n self.set_rotation_matrix(rot_mat)\n self.center = camera_center\n\n self.sanity_check()\n\n def get_gl_matrix(self):\n z_near = self.near\n z_far = self.far\n rot_mat = self.get_rotation_matrix()\n int_mat = self.get_intrinsic_matrix()\n trans = self.get_translation_vector()\n\n extrinsic = np.eye(4)\n extrinsic[:3, :3] = rot_mat\n extrinsic[:3, 3] = trans\n axis_adj = np.eye(4)\n axis_adj[2, 2] = -1\n axis_adj[1, 1] = -1\n model_view = np.matmul(axis_adj, extrinsic)\n\n projective = np.zeros([4, 4])\n projective[:2, :2] = int_mat[:2, :2]\n projective[:2, 2:3] = -int_mat[:2, 2:3]\n projective[3, 2] = -1\n projective[2, 2] = (z_near + z_far)\n projective[2, 3] = (z_near * z_far)\n\n if self.ortho_ratio is None:\n ndc = ortho(0, self.width, 0, self.height, z_near, z_far)\n perspective = np.matmul(ndc, projective)\n else:\n perspective = ortho(-self.width * self.ortho_ratio / 2, self.width * self.ortho_ratio / 2,\n -self.height * self.ortho_ratio / 2, self.height * self.ortho_ratio / 2,\n z_near, z_far)\n\n return perspective, model_view\n\n\ndef KRT_from_P(proj_mat, normalize_K=True):\n res = cv2.decomposeProjectionMatrix(proj_mat)\n K, Rot, camera_center_homog = res[0], res[1], res[2]\n camera_center = camera_center_homog[0:3] / camera_center_homog[3]\n trans = -Rot.dot(camera_center)\n if normalize_K:\n K = K / K[2][2]\n return K, Rot, trans\n\n\ndef MVP_from_P(proj_mat, width, height, near=0.1, far=10000):\n '''\n Convert OpenCV camera calibration matrix to OpenGL projection and model view matrix\n :param proj_mat: OpenCV camera projeciton matrix\n :param width: Image width\n :param height: Image height\n :param near: Z near value\n :param far: Z far value\n :return: OpenGL projection matrix and model view matrix\n '''\n res = cv2.decomposeProjectionMatrix(proj_mat)\n K, Rot, camera_center_homog = res[0], res[1], res[2]\n camera_center = camera_center_homog[0:3] / camera_center_homog[3]\n trans = -Rot.dot(camera_center)\n K = K / K[2][2]\n\n extrinsic = np.eye(4)\n extrinsic[:3, :3] = Rot\n extrinsic[:3, 3:4] = trans\n axis_adj = np.eye(4)\n axis_adj[2, 2] = -1\n axis_adj[1, 1] = -1\n model_view = np.matmul(axis_adj, extrinsic)\n\n zFar = far\n zNear = near\n projective = np.zeros([4, 4])\n projective[:2, :2] = K[:2, :2]\n projective[:2, 2:3] = -K[:2, 2:3]\n projective[3, 2] = -1\n projective[2, 2] = (zNear + zFar)\n projective[2, 3] = (zNear * zFar)\n\n ndc = ortho(0, width, 0, height, zNear, zFar)\n\n perspective = np.matmul(ndc, projective)\n\n return perspective, model_view\n\n\nclass ImageCamera(Camera):\n def __init__(self, fx, fy, cx, cy, height, width):\n # Focal Length\n # equivalent 50mm\n focal = np.sqrt(width * width + height * height)\n self.focal_x = fx\n self.focal_y = fy\n # Principal Point Offset\n self.principal_x = cx\n self.principal_y = cy\n # Axis Skew\n self.skew = 0\n # Image Size\n self.width = width\n self.height = height\n\n self.near = 1\n self.far = 10\n\n # Camera Center\n self.eye = np.array([0, 0, -3.6])\n self.center = np.array([0, 0, 0])\n self.direction = np.array([0, 0, -1])\n self.right = np.array([1, 0, 0])\n self.up = np.array([0, 1, 0])\n self.ortho_ratio = None\n"
] |
[
[
"numpy.dot",
"numpy.sqrt",
"numpy.eye",
"numpy.matmul",
"numpy.linalg.norm",
"numpy.cross",
"numpy.array",
"numpy.zeros"
]
] |
cbd-nslc/LungCancerDetector
|
[
"50b2517814c68368a86752162d70b00115f9bd4a"
] |
[
"DSB2017/net_classifier.py"
] |
[
"import torch\nfrom torch import nn\nfrom DSB2017.layers import *\nfrom torch.nn import DataParallel\nfrom torch.backends import cudnn\nfrom torch.utils.data import DataLoader\nfrom torch import optim\nfrom torch.autograd import Variable\nfrom torch.utils.data import Dataset\nfrom scipy.ndimage.interpolation import rotate\nimport numpy as np\nimport os\n\nconfig = {}\nconfig['topk'] = 5\nconfig['resample'] = None\nconfig['datadir'] = '/run/shm/preprocess_1_3/'\nconfig['preload_train'] = True\nconfig['bboxpath'] = '../cpliangming/results/res18_prep3/bbox/'\nconfig['labelfile'] = '../stage1_labels.csv'\nconfig['preload_val'] = True\n\nconfig['padmask'] = False\n\nconfig['crop_size'] = [96, 96, 96]\nconfig['scaleLim'] = [0.85, 1.15]\nconfig['radiusLim'] = [6, 100]\nconfig['jitter_range'] = 0.15\nconfig['isScale'] = True\n\nconfig['random_sample'] = True\nconfig['T'] = 1\nconfig['topk'] = 5\nconfig['stride'] = 4\nconfig['augtype'] = {'flip': True, 'swap': False, 'rotate': False, 'scale': False}\n\nconfig['detect_th'] = 0.05\nconfig['conf_th'] = -1\nconfig['nms_th'] = 0.05\nconfig['filling_value'] = 160\n\nconfig['startepoch'] = 20\nconfig['lr_stage'] = np.array([50, 100, 140, 160])\nconfig['lr'] = [0.01, 0.001, 0.0001, 0.00001]\nconfig['miss_ratio'] = 1\nconfig['miss_thresh'] = 0.03\nconfig['anchors'] = [10, 30, 60]\n\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n # The first few layers consumes the most memory, so use simple convolution to save memory.\n # Call these layers preBlock, i.e., before the residual blocks of later layers.\n self.preBlock = nn.Sequential(\n nn.Conv3d(1, 24, kernel_size=3, padding=1),\n nn.BatchNorm3d(24),\n nn.ReLU(inplace=True),\n nn.Conv3d(24, 24, kernel_size=3, padding=1),\n nn.BatchNorm3d(24),\n nn.ReLU(inplace=True))\n\n # 3 poolings, each pooling downsamples the feature map by a factor 2.\n # 3 groups of blocks. The first block of each group has one pooling.\n num_blocks_forw = [2, 2, 3, 3]\n num_blocks_back = [3, 3]\n self.featureNum_forw = [24, 32, 64, 64, 64]\n self.featureNum_back = [128, 64, 64]\n for i in range(len(num_blocks_forw)):\n blocks = []\n for j in range(num_blocks_forw[i]):\n if j == 0:\n blocks.append(PostRes(self.featureNum_forw[i], self.featureNum_forw[i + 1]))\n else:\n blocks.append(PostRes(self.featureNum_forw[i + 1], self.featureNum_forw[i + 1]))\n setattr(self, 'forw' + str(i + 1), nn.Sequential(*blocks))\n\n for i in range(len(num_blocks_back)):\n blocks = []\n for j in range(num_blocks_back[i]):\n if j == 0:\n if i == 0:\n addition = 3\n else:\n addition = 0\n blocks.append(PostRes(self.featureNum_back[i + 1] + self.featureNum_forw[i + 2] + addition,\n self.featureNum_back[i]))\n else:\n blocks.append(PostRes(self.featureNum_back[i], self.featureNum_back[i]))\n setattr(self, 'back' + str(i + 2), nn.Sequential(*blocks))\n\n self.maxpool1 = nn.MaxPool3d(kernel_size=2, stride=2, return_indices=True)\n self.maxpool2 = nn.MaxPool3d(kernel_size=2, stride=2, return_indices=True)\n self.maxpool3 = nn.MaxPool3d(kernel_size=2, stride=2, return_indices=True)\n self.maxpool4 = nn.MaxPool3d(kernel_size=2, stride=2, return_indices=True)\n self.unmaxpool1 = nn.MaxUnpool3d(kernel_size=2, stride=2)\n self.unmaxpool2 = nn.MaxUnpool3d(kernel_size=2, stride=2)\n\n self.path1 = nn.Sequential(\n nn.ConvTranspose3d(64, 64, kernel_size=2, stride=2),\n nn.BatchNorm3d(64),\n nn.ReLU(inplace=True))\n self.path2 = nn.Sequential(\n nn.ConvTranspose3d(64, 64, kernel_size=2, stride=2),\n nn.BatchNorm3d(64),\n nn.ReLU(inplace=True))\n self.drop = nn.Dropout3d(p=0.2, inplace=False)\n self.output = nn.Sequential(nn.Conv3d(self.featureNum_back[0], 64, kernel_size=1),\n nn.ReLU(),\n # nn.Dropout3d(p = 0.3),\n nn.Conv3d(64, 5 * len(config['anchors']), kernel_size=1))\n\n def forward(self, x, coord):\n out = self.preBlock(x) # 16\n out_pool, indices0 = self.maxpool1(out)\n out1 = self.forw1(out_pool) # 32\n out1_pool, indices1 = self.maxpool2(out1)\n out2 = self.forw2(out1_pool) # 64\n # out2 = self.drop(out2)\n out2_pool, indices2 = self.maxpool3(out2)\n out3 = self.forw3(out2_pool) # 96\n out3_pool, indices3 = self.maxpool4(out3)\n out4 = self.forw4(out3_pool) # 96\n # out4 = self.drop(out4)\n\n rev3 = self.path1(out4)\n comb3 = self.back3(torch.cat((rev3, out3), 1)) # 96+96\n # comb3 = self.drop(comb3)\n rev2 = self.path2(comb3)\n\n feat = self.back2(torch.cat((rev2, out2, coord), 1)) # 64+64\n comb2 = self.drop(feat)\n out = self.output(comb2)\n size = out.size()\n out = out.view(out.size(0), out.size(1), -1)\n # out = out.transpose(1, 4).transpose(1, 2).transpose(2, 3).contiguous()\n out = out.transpose(1, 2).contiguous().view(size[0], size[2], size[3], size[4], len(config['anchors']), 5)\n # out = out.view(-1, 5)\n return feat, out\n\n\nclass CaseNet(nn.Module):\n def __init__(self, topk):\n super(CaseNet, self).__init__()\n self.NoduleNet = Net()\n self.fc1 = nn.Linear(128, 64)\n self.fc2 = nn.Linear(64, 1)\n self.pool = nn.MaxPool3d(kernel_size=2)\n self.dropout = nn.Dropout(0.5)\n self.baseline = nn.Parameter(torch.Tensor([-30.0]).float())\n self.Relu = nn.ReLU()\n\n def forward(self, xlist, coordlist):\n # xlist: n x k x 1x 96 x 96 x 96\n # coordlist: n x k x 3 x 24 x 24 x 24\n xsize = xlist.size()\n corrdsize = coordlist.size()\n xlist = xlist.view(-1, xsize[2], xsize[3], xsize[4], xsize[5])\n coordlist = coordlist.view(-1, corrdsize[2], corrdsize[3], corrdsize[4], corrdsize[5])\n\n noduleFeat, nodulePred = self.NoduleNet(xlist, coordlist)\n nodulePred = nodulePred.contiguous().view(corrdsize[0], corrdsize[1], -1)\n\n featshape = noduleFeat.size() # nk x 128 x 24 x 24 x24\n centerFeat = self.pool(noduleFeat[:, :, featshape[2] // 2 - 1:featshape[2] // 2 + 1,\n featshape[3] // 2 - 1:featshape[3] // 2 + 1,\n featshape[4] // 2 - 1:featshape[4] // 2 + 1])\n centerFeat = centerFeat[:, :, 0, 0, 0]\n out = self.dropout(centerFeat)\n out = self.Relu(self.fc1(out))\n out = torch.sigmoid(self.fc2(out))\n out = out.view(xsize[0], xsize[1])\n base_prob = torch.sigmoid(self.baseline)\n casePred = 1 - torch.prod(1 - out, dim=1) * (1 - base_prob.expand(out.size()[0]))\n return nodulePred, casePred, out\n"
] |
[
[
"torch.nn.Sequential",
"torch.nn.Dropout",
"torch.sigmoid",
"torch.Tensor",
"torch.cat",
"torch.nn.ConvTranspose3d",
"torch.nn.Dropout3d",
"torch.nn.Linear",
"torch.nn.MaxPool3d",
"torch.nn.Conv3d",
"torch.nn.BatchNorm3d",
"torch.prod",
"torch.nn.ReLU",
"numpy.array",
"torch.nn.MaxUnpool3d"
]
] |
ckrogers/aviio_technical_component
|
[
"85cc3209098d335aee404937c0117a869d31650a"
] |
[
"aviio_technical_component/aviio_technical_component.py"
] |
[
"import logging\nimport os\nimport pandas as pd\nfrom pathlib import Path\nimport requests\nfrom requests.packages.urllib3.util.retry import Retry\nfrom requests.adapters import HTTPAdapter\n\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\nhandler = logging.FileHandler(\"aviio_technical_component.log\", \"w\", \"utf-8\")\nlogger.addHandler(handler)\n\npackage_dir = Path(__file__).parent.absolute()\nAPI_TOKEN = os.getenv(\"TOKEN\")\nAPI_URL = \"https://atlas.pretio.in/atlas/coding_quiz\"\n\n\ndef get_data_from_api():\n \"\"\"\n Consume API endpoint, which returns a list of offers.\n Allow retry on status 429 after waiting 60s.\n All other http errors will be raised.\n \"\"\"\n retry_strategy = Retry(\n total=1,\n status_forcelist=[429],\n method_whitelist=[\"HEAD\", \"GET\"],\n backoff_factor=60,\n )\n adapter = HTTPAdapter(max_retries=retry_strategy)\n session = requests.Session()\n session.mount(\"https://\", adapter)\n result = session.get(API_URL, headers={\"Authorization\": f\"Bearer {API_TOKEN}\"})\n result.raise_for_status()\n\n return result.json()\n\n\ndef structure_data(data):\n \"\"\"\n Structure json data from API_URL to dataframe.\n Sort data by ascending payout.\n \"\"\"\n offers_list = data[\"rows\"]\n df = pd.DataFrame(offers_list)\n df[[\"cap\", \"payout\"]] = df[[\"cap\", \"payout\"]].apply(pd.to_numeric, errors=\"ignore\")\n df_sorted = df.sort_values(by=\"payout\", ascending=True)\n df_sorted.reset_index(drop=True, inplace=True)\n return df_sorted\n\n\ndef save_to_csv(dataframe, output_dir=None):\n \"\"\"\n Save pandas dataframe to offers.csv.\n Optional argument of output directory, otherwise csv will be saved to\n ./data/offers.csv\n \"\"\"\n csv_filename = \"offers.csv\"\n if not output_dir:\n output_dir = package_dir / \"data\"\n\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n\n csv_path = output_dir / csv_filename\n dataframe.to_csv(csv_path, index=False)\n message = f\"Data saved to path: {csv_path}\"\n print(message)\n logging.info(message)\n\n\nif __name__ == \"__main__\":\n offers_data = get_data_from_api()\n structured_data = structure_data(offers_data)\n save_to_csv(structured_data)\n"
] |
[
[
"pandas.DataFrame"
]
] |
akhambhati/pyEisen
|
[
"62ec6cb3168ff6f5c9ef81e51b0041b4809d2613"
] |
[
"pyeisen/family.py"
] |
[
"\"\"\"\nFunctions and Wrappers to define families of kernels for signal analysis.\n\nAuthor: Ankit N. Khambhati\nAdapted from: https://github.com/pennmem/ptsa_new/blob/master/ptsa/wavelet.py\nLast Updated: 2018/08/31\n\"\"\"\n\nimport numpy as np\nfrom scipy.signal import morlet as scipy_morlet\n\n\ndef morlet(freqs, cycles, Fs, n_win=7, complete=True):\n \"\"\"\n Calculate Morlet wavelets with the total energy normalized to 1.\n\n Calls the scipy.signal.wavelet.morlet() function to generate\n Morlet wavelets with the specified frequencies, samplerates, and\n widths (in cycles); see the docstring for the scipy morlet function\n for details. These wavelets are normalized before they are returned.\n\n Parameters\n ----------\n freqs: np.ndarray, shape: (n_wavelet, dtype=float)\n The center frequency (e.g. 10 Hz) for each wavelet.\n cycles: np.ndarray, shape (n_wavelet, dtype=float)\n The number of cycles for each wavelet.\n Needs to be same size as freqs.\n Fs: float\n The sampling frequency (e.g. 200 Hz) of the signal to which wavelet\n will be applied.\n n_win: float, (default=7)\n Length of the wavelet that will be sampled (usually >= 7).\n Provides a multiplicative factor for time sampling based on\n the requested cycles at the center frequency.\n complete : bool, (default=True)\n Whether to generate a complete or standard approximation to\n the complete version of a Morlet wavelet. Complete should be True,\n especially for low (<=5) values of width. See\n scipy.signal.wavelet.morlet() for details.\n\n Returns\n -------\n family: np.ndarray, shape: (n_wavelet, n_samples)\n A family of Morlet wavelets equal in number to the frequency/cycle\n pairs provided. Each wavelet entry spans same length but diff. decay.\n \"\"\"\n\n # Check Inputs:\n if len(freqs) < 1:\n raise ValueError('At least one frequency must be specified.')\n if len(cycles) != len(freqs):\n raise ValueError(\n 'Each frequency must have an associated number of cycles.')\n if Fs <= 0:\n raise ValueError('Sampling frequency must be non-negative.')\n\n # Temporal standard deviation of the wavelet (ratio of cycles to frequency)\n st = cycles / (2 * np.pi * freqs)\n\n # Support length for the wavelet (length in samples)\n max_len = int(np.max(np.ceil(st * Fs * n_win)))\n\n # Scaling factor for the wavelet.\n # Depends on frequency, cycles, support length, and sampling frequency.\n scales = (freqs * max_len) / (2 * cycles * Fs)\n\n # generate list of unnormalized wavelets:\n family = [\n scipy_morlet(max_len, w=cycles[i], s=scales[i], complete=complete)\n for i in range(len(scales))\n ]\n\n # generate list of energies for the wavelets:\n energies = [np.sqrt(np.sum(np.abs(wavelet)**2)) for wavelet in family]\n\n # normalize the wavelets by dividing each one by its energy:\n family = [family[i] / energies[i] for i in range(len(family))]\n\n # Convert to an array bank\n family = np.array(family)\n\n # Sort all vals to the scale\n scales = 1 / scales\n scale_ord = np.argsort(scales)\n\n wavelet_family = {\n 'kernel': family[scale_ord],\n 'wavelet': {\n 'scales': scales[scale_ord],\n 'freqs': freqs[scale_ord],\n 'cycles': cycles[scale_ord]\n },\n 'sample': {\n 'time': np.arange(max_len) / Fs\n },\n 'axis_ord': np.array(['wavelet', 'sample'])\n }\n\n return wavelet_family\n"
] |
[
[
"numpy.abs",
"numpy.arange",
"numpy.ceil",
"scipy.signal.morlet",
"numpy.argsort",
"numpy.array"
]
] |
Bhaskers-Blu-Org1/skills-for-planning
|
[
"98575d963e63d2c84075df9500c74c14f8a8553b"
] |
[
"factops/factops/cgrid.py"
] |
[
"import matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nimport time\n\nclass CGridWorld:\n '''Continuous gridworld domain\n '''\n def __init__(self, n_dims=2, discrete_actions=False):\n if discrete_actions:\n self.n_actions = 9\n else:\n self.n_actions = n_dims\n self.discrete_actions = discrete_actions\n self.n_states = n_dims\n self.state = self.random_state()\n\n def reset(self):\n self.state = self.random_state()\n\n def random_state(self):\n return np.random.uniform(0, 1, size=self.n_states)\n\n def random_action(self):\n if self.discrete_actions:\n a = np.random.randint(self.n_actions)\n else:\n a = np.random.uniform(-0.1,0.1, size=self.n_actions)\n return a\n\n def discrete2continuous(self, a):\n assert np.all(a >= 0) and np.all(a < self.n_actions)\n ax = a % 3 - 1\n ay = -1*(a // 3 - 1)\n return 0.1*np.stack([ax, ay]).transpose()\n\n def step(self, action):\n if self.discrete_actions:\n action = self.discrete2continuous(action)\n assert len(action)==self.n_states\n self.state += action + np.random.normal(0, 0.01, size=self.n_states)\n self.state = np.clip(self.state,0,1)\n\n s = self.get_state()\n r = 0\n done = False\n return s, r, done\n\n def get_state(self):\n return np.copy(self.state)\n\n def plot(self, ax=None):\n n_subplots = self.n_states//2 + 3\n if ax is None:\n _, ax = plt.subplots(nrows=1, ncols=(self.n_states//2), figsize=(4,4))\n for i in range(self.n_states//2):\n ax.set_xlim([0,1])\n ax.set_ylim([0,1])\n ax.scatter(self.state[2*i],self.state[2*i+1])\n ax.set_xticks([])\n ax.set_yticks([])\n # ax.set_xlabel('XY({})'.format(i))\n return ax\n\ndef run_agent(env, n_trials=1, n_samples=100, video=False):\n if video:\n ax = env.plot()\n fig = plt.gcf()\n fig.show()\n states = [env.get_state()]\n actions = []\n for trial in range(n_trials):\n for sample in range(n_samples):\n a = env.random_action()\n _, _, done = env.step(a)\n actions.append(a)\n states.append(env.get_state())\n\n if video:\n ax.clear()\n env.plot(ax)\n fig.canvas.draw()\n fig.canvas.flush_events()\n\n if done:\n time.sleep(1)\n env.reset()\n break\n return np.stack(states,axis=0), np.stack(actions,axis=0)\n\n#%%\nif __name__ == '__main__':\n env = CGridWorld()\n run_agent(env, n_samples=100, video=True)\n"
] |
[
[
"numpy.clip",
"matplotlib.pyplot.subplots",
"numpy.stack",
"matplotlib.pyplot.gcf",
"numpy.all",
"numpy.copy",
"numpy.random.normal",
"numpy.random.uniform",
"numpy.random.randint"
]
] |
ianthomas23/spatialpandas
|
[
"b6809e79f615e0be6fda6845b9725b5f87529c56"
] |
[
"spatialpandas/geometry/base.py"
] |
[
"import re\nfrom collections.abc import Container, Iterable\nfrom numbers import Integral\n\nimport numpy as np\nimport pandas as pd\nimport pyarrow as pa\nfrom pandas.api.extensions import ExtensionArray, ExtensionDtype\nfrom pandas.api.types import is_array_like\n\nfrom .._optional_imports import gp, sg\nfrom ..spatialindex import HilbertRtree\nfrom ..spatialindex.rtree import _distances_from_bounds\nfrom ..utils import ngjit\n\n\ndef _unwrap_geometry(a, element_dtype):\n try:\n if np.isscalar(a) and np.isnan(a):\n # replace top-level nana with None\n return None\n except (TypeError, ValueError):\n # Not nan, continue\n pass\n if isinstance(a, Geometry):\n return a.data.as_py()\n elif sg and isinstance(a, sg.base.BaseGeometry):\n return element_dtype._shapely_to_coordinates(a)\n else:\n return a\n\n\nclass GeometryDtype(ExtensionDtype):\n _geometry_name = 'geometry'\n base = np.dtype('O')\n _metadata = ('subtype',)\n na_value = np.nan\n\n def __from_arrow__(self, data):\n return self.construct_array_type()(data, dtype=self)\n\n @classmethod\n def _arrow_element_type_from_numpy_subtype(cls, subtype):\n raise NotImplementedError\n\n @classmethod\n def construct_array_type(cls, *args):\n return GeometryArray\n\n @classmethod\n def _parse_subtype(cls, dtype_string):\n # Be case insensitive\n dtype_string = dtype_string.lower()\n subtype_re = re.compile('^' + cls._geometry_name + r\"\\[(?P<subtype>\\w+)\\]$\")\n\n match = subtype_re.match(dtype_string)\n if match:\n subtype_string = match.groupdict()['subtype']\n elif dtype_string == cls._geometry_name.lower():\n subtype_string = 'float64'\n else:\n raise ValueError(\"Cannot parse {dtype_string}\".format(\n dtype_string=dtype_string))\n\n return subtype_string\n\n @classmethod\n def construct_from_string(cls, string):\n # lowercase string\n try:\n string = string.lower()\n if not isinstance(string, str):\n raise AttributeError\n except AttributeError:\n raise TypeError(\n \"'construct_from_string' expects a string, got {typ}\".format(\n typ=type(string)))\n\n msg = \"Cannot construct a '%s' from '{}'\" % cls.__name__\n if string.startswith(cls._geometry_name.lower()):\n # Extract subtype\n try:\n subtype_string = cls._parse_subtype(string)\n return cls(subtype_string)\n except Exception:\n raise TypeError(msg.format(string))\n else:\n raise TypeError(msg.format(string))\n\n def __init__(self, subtype):\n if isinstance(subtype, GeometryDtype):\n self.subtype = subtype.subtype\n else:\n self.subtype = np.dtype(subtype)\n\n # Validate the subtype is numeric\n if self.subtype.kind not in ('i', 'u', 'f'):\n raise ValueError(\"Received non-numeric type of kind '{}'\".format(self.kind))\n\n array_type = self.construct_array_type()\n self.arrow_dtype = array_type._arrow_type_from_numpy_element_dtype(subtype)\n\n def __hash__(self):\n return hash((self.__class__, self.arrow_dtype))\n\n def __str__(self):\n return \"{}[{}]\".format(self._geometry_name, str(self.subtype.name))\n\n def __repr__(self):\n return \"{}({})\".format(self.__class__.__name__, str(self.subtype.name))\n\n @property\n def type(self):\n # type: () -> type\n \"\"\"The scalar type for the array, e.g. ``int``.\n It's expected ``ExtensionArray[item]`` returns an instance\n of ``ExtensionDtype.type`` for scalar ``item``.\n \"\"\"\n return Geometry\n\n @property\n def name(self):\n # type: () -> str\n \"\"\"A string identifying the data type.\n Will be used for display in, e.g. ``Series.dtype``\n \"\"\"\n return str(self)\n\n\nclass Geometry:\n def __init__(self, data, dtype=None):\n if isinstance(data, pa.Scalar):\n # Use arrow Scalar as is\n self.data = data\n else:\n # Convert to arrow Scalar\n self.data = pa.array([data])[0]\n\n def __repr__(self):\n return \"{}({})\".format(self.__class__.__name__, self.data.as_py())\n\n def __hash__(self):\n return hash((self.__class__, np.array(self.data.as_py()).tobytes()))\n\n def __eq__(self, other):\n if isinstance(other, Container):\n return other == self\n if type(other) is not type(self):\n return False\n return self.data == other.data\n\n @classmethod\n def _shapely_to_coordinates(cls, shape):\n raise NotImplementedError()\n\n @classmethod\n def from_shapely(cls, shape):\n \"\"\"\n Build a spatialpandas geometry object from a shapely shape\n\n Args:\n shape: A shapely shape\n\n Returns:\n spatialpandas geometry object with type of the calling class\n \"\"\"\n shape_parts = cls._shapely_to_coordinates(shape)\n return cls(shape_parts)\n\n def intersects_bounds(self, bounds):\n raise NotImplementedError()\n\n def intersects(self, shape):\n raise NotImplementedError(\n \"intersects not yet implemented for %s objects\" % type(self).__name__\n )\n\n\nclass GeometryArray(ExtensionArray):\n _element_type = Geometry\n _can_hold_na = True\n\n # Import / export methods\n @classmethod\n def from_geopandas(cls, ga):\n \"\"\"\n Build a spatialpandas geometry array from a geopandas GeometryArray or\n GeoSeries.\n\n Args:\n ga: A geopandas GeometryArray or GeoSeries to import\n\n Returns:\n spatialpandas geometry array with type of the calling class\n \"\"\"\n if cls is GeometryArray:\n raise ValueError(\n \"from_geopandas must be called on a subclass of GeometryArray\"\n )\n return cls([\n cls._element_type._shapely_to_coordinates(shape)\n if shape is not None else None\n for shape in ga\n ])\n\n def to_geopandas(self):\n \"\"\"\n Convert a spatialpandas geometry array into a geopandas GeometryArray\n\n Returns:\n geopandas GeometryArray\n \"\"\"\n from geopandas.array import from_shapely\n return from_shapely([el.to_shapely() for el in self])\n\n # Constructor\n def __init__(self, array, dtype=None, copy=None):\n # Choose default dtype for empty arrays\n try:\n if len(array) == 0 and dtype is None:\n dtype = 'float64'\n except:\n # len failed\n pass\n\n # See if we can determine arrow array type\n if isinstance(dtype, GeometryDtype):\n # Use arrow type as-is\n arrow_dtype = dtype.arrow_dtype\n elif isinstance(dtype, pa.DataType):\n arrow_dtype = dtype\n elif dtype is not None and dtype != np.dtype('object'):\n # Scalar element dtype\n arrow_dtype = self._arrow_type_from_numpy_element_dtype(dtype)\n else:\n # Let arrow infer type\n arrow_dtype = None\n\n # Unwrap GeometryList elements to numpy arrays\n if is_array_like(array) or isinstance(array, list):\n array = [_unwrap_geometry(el, self._element_type) for el in array]\n array = pa.array(array, type=arrow_dtype)\n elif isinstance(array, pa.Array):\n # Nothing to do\n pass\n elif isinstance(array, pa.ChunkedArray):\n array = pa.concat_arrays(array.chunks)\n else:\n raise ValueError(\n \"Unsupported type passed for {}: {}\".format(\n self.__class__.__name__, type(array)\n )\n )\n\n # Save off pyarrow array\n self.data = array\n\n # Compute types\n np_type = self._numpy_element_dtype_from_arrow_type(self.data.type)\n self._numpy_element_type = np.dtype(np_type)\n self._dtype = self._dtype_class(np_type)\n\n # Initialize backing property for spatial index\n self._sindex = None\n\n @classmethod\n def _arrow_type_from_numpy_element_dtype(cls, dtype):\n raise NotImplementedError\n\n def _numpy_element_dtype_from_arrow_type(self, pyarrow_type):\n raise NotImplementedError\n\n @property\n def _dtype_class(self):\n return GeometryDtype\n\n @property\n def numpy_dtype(self):\n return self._numpy_element_type\n\n # Arrow conversion\n def __arrow_array__(self, type=None):\n return self.data\n\n # ExtensionArray methods\n @property\n def dtype(self):\n return self._dtype\n\n def astype(self, dtype, copy=True):\n if self.dtype == dtype:\n return self.copy() if copy else self\n\n if dtype == np.dtype('object'):\n return np.array(self, dtype='object')\n\n if isinstance(dtype, GeometryDtype):\n dtype = dtype.arrow_dtype.to_pandas_dtype()\n elif isinstance(dtype, pa.DataType):\n dtype = dtype.to_pandas_dtype()\n else:\n dtype = np.dtype(dtype)\n\n return self.__class__(np.asarray(self.data), dtype=dtype)\n\n astype.__doc__ = ExtensionArray.astype.__doc__\n\n @property\n def nbytes(self):\n size = 0\n for buf in self.data.buffers():\n if buf is not None:\n size += buf.size\n return size\n\n def isna(self):\n return _extract_isnull_bytemap(self.data)\n\n isna.__doc__ = ExtensionArray.isna.__doc__\n\n def copy(self):\n return type(self)(self.data, self.dtype)\n\n copy.__doc__ = ExtensionArray.copy.__doc__\n\n def __eq__(self, other):\n if type(other) is type(self):\n if len(other) != len(self):\n raise ValueError(\"\"\"\nCannot check equality of {typ} instances of unequal length\n len(ra1) == {len_a1}\n len(ra2) == {len_a2}\"\"\".format(\n typ=type(self).__name__,\n len_a1=len(self),\n len_a2=len(other)))\n result = np.zeros(len(self), dtype=np.bool_)\n for i in range(len(self)):\n result[i] = self[i] == other[i]\n return result\n if isinstance(other, (self.dtype.type, type(None))):\n result = np.zeros(len(self), dtype=np.bool_)\n for i in range(len(self)):\n result[i] = self[i] == other\n return result\n raise ValueError(\"\"\"\nCannot check equality of {typ} of length {a_len} with:\n {other}\"\"\".format(typ=type(self).__name__, a_len=len(self), other=repr(other)))\n\n def __contains__(self, item) -> bool:\n raise NotImplementedError\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, item):\n err_msg = (\"Only integers, slices and integer or boolean\"\n \"arrays are valid indices.\")\n if isinstance(item, tuple) and len(item) == 2:\n if item[0] is Ellipsis:\n item = item[1]\n elif item[1] is Ellipsis:\n item = item[0]\n\n if isinstance(item, Integral):\n item = int(item)\n if item < -len(self) or item >= len(self):\n raise IndexError(\"{item} is out of bounds\".format(item=item))\n else:\n # Convert negative item index\n if item < 0:\n item += len(self)\n\n value = self.data[item].as_py()\n if value is not None:\n return self._element_type(value, self.numpy_dtype)\n else:\n return None\n elif isinstance(item, slice):\n if item.step is None or item.step == 1:\n # pyarrow only supports slice with step of 1\n return self.__class__(self.data[item], dtype=self.dtype)\n else:\n selected_indices = np.arange(len(self))[item]\n return self.take(selected_indices, allow_fill=False)\n elif isinstance(item, Iterable):\n if isinstance(item, (np.ndarray, ExtensionArray)):\n # Leave numpy and pandas arrays alone\n kind = item.dtype.kind\n else:\n item = pd.array(item)\n kind = item.dtype.kind\n\n if len(item) == 0:\n return self.take([], allow_fill=False)\n elif kind == 'b':\n # Check mask length is compatible\n if len(item) != len(self):\n raise IndexError(\n \"Boolean index has wrong length: {} instead of {}\"\n .format(len(item), len(self))\n )\n\n # check for NA values\n if any(pd.isna(item)):\n raise ValueError(\n \"Cannot mask with a boolean indexer containing NA values\"\n )\n\n # Convert to unsigned integer array of indices\n indices = np.nonzero(item)[0].astype(np.uint32)\n if len(indices):\n return self.take(indices, allow_fill=False)\n else:\n return self[:0]\n elif kind in ('i', 'u'):\n if any(pd.isna(item)):\n raise ValueError(\n \"Cannot index with an integer indexer containing NA values\"\n )\n return self.take(item, allow_fill=False)\n else:\n raise IndexError(err_msg)\n else:\n raise IndexError(err_msg)\n\n def take(self, indices, allow_fill=False, fill_value=None):\n indices = np.asarray(indices)\n\n # Validate self non-empty (Pandas expects this error when array is empty)\n if (len(self) == 0 and len(indices) > 0 and\n (not allow_fill or any(indices >= 0))):\n raise IndexError(\"cannot do a non-empty take from an empty axes|out of bounds on {typ}\".format(\n typ=self.__class__.__name__,\n ))\n\n # Validate fill values\n if allow_fill and not (\n fill_value is None or\n np.isscalar(fill_value) and np.isnan(fill_value)):\n\n raise ValueError('non-None fill value not supported')\n\n # Validate indices\n invalid_mask = indices >= len(self)\n if not allow_fill:\n invalid_mask |= indices < -len(self)\n\n if any(invalid_mask):\n raise IndexError(\n \"Index value out of bounds for {typ} of length {n}: \"\n \"{idx}\".format(\n typ=self.__class__.__name__,\n n=len(self),\n idx=indices[invalid_mask][0]\n )\n )\n\n if allow_fill:\n invalid_mask = indices < -1\n if any(invalid_mask):\n # ValueError expected by pandas ExtensionArray test suite\n raise ValueError(\n \"Invalid index value for {typ} with allow_fill=True: \"\n \"{idx}\".format(\n typ=self.__class__.__name__,\n idx=indices[invalid_mask][0]\n )\n )\n\n # Build pyarrow array of indices\n indices = pa.array(indices.astype('int'), mask=indices < 0)\n else:\n # Convert negative indices to positive\n negative_mask = indices < 0\n indices[negative_mask] = indices[negative_mask] + len(self)\n\n # Build pyarrow array of indices\n indices = pa.array(indices.astype('int'))\n\n return self.__class__(self.data.take(indices), dtype=self.dtype)\n\n take.__doc__ = ExtensionArray.take.__doc__\n\n @classmethod\n def _from_sequence(cls, scalars, dtype=None, copy=None):\n if isinstance(scalars, cls):\n return scalars\n elif isinstance(scalars, Geometry):\n scalars = [scalars]\n\n return cls([\n None if np.isscalar(v) and np.isnan(v) else v for v in scalars\n ], dtype=dtype)\n\n def _values_for_factorize(self):\n return np.array(self, dtype='object'), None\n\n @classmethod\n def _from_factorized(cls, values, original):\n return cls(values, dtype=original.dtype)\n\n def _values_for_argsort(self):\n return np.array(list(self), dtype='object')\n\n @classmethod\n def _concat_same_type(cls, to_concat):\n return cls(\n pa.concat_arrays(\n [ea.data for ea in to_concat]\n ),\n dtype=to_concat[0].dtype\n )\n\n def fillna(self, value=None, method=None, limit=None):\n from pandas.api.types import is_array_like\n from pandas.core.missing import get_fill_func\n from pandas.util._validators import validate_fillna_kwargs\n\n value, method = validate_fillna_kwargs(value, method)\n\n mask = self.isna()\n\n if is_array_like(value):\n if len(value) != len(self):\n raise ValueError(\n \"Length of 'value' does not match. Got ({}) \"\n \" expected {}\".format(len(value), len(self))\n )\n value = value[mask]\n\n if mask.any():\n if method is not None:\n func = get_fill_func(method)\n new_values = func(self.astype(object), limit=limit, mask=mask)\n # From pandas 1.3, get_fill_func also return mask\n new_values = new_values[0] if isinstance(new_values, tuple) else new_values\n new_values = self._from_sequence(new_values, self._dtype)\n else:\n # fill with value\n new_values = np.asarray(self)\n if isinstance(value, Geometry):\n value = [value]\n new_values[mask] = value\n new_values = self.__class__(new_values, dtype=self.dtype)\n else:\n new_values = self.copy()\n return new_values\n\n fillna.__doc__ = ExtensionArray.fillna.__doc__\n\n # Geometry properties/methods\n @property\n def sindex(self):\n if self._sindex is None:\n self.build_sindex()\n return self._sindex\n\n def build_sindex(self, **kwargs):\n if self._sindex is None:\n self._sindex = HilbertRtree(self.bounds, **kwargs)\n return self\n\n @property\n def cx(self):\n \"\"\"\n Geopandas-style spatial indexer to select a subset of the array by intersection\n with a bounding box\n\n Format of input should be ``.cx[xmin:xmax, ymin:ymax]``. Any of\n ``xmin``, ``xmax``, ``ymin``, and ``ymax`` can be provided, but input\n must include a comma separating x and y slices. That is, ``.cx[:, :]``\n will return the full series/frame, but ``.cx[:]`` is not implemented.\n \"\"\"\n return _CoordinateIndexer(self)\n\n @property\n def total_bounds(self):\n raise NotImplementedError\n\n @property\n def total_bounds_x(self):\n raise NotImplementedError\n\n @property\n def total_bounds_y(self):\n raise NotImplementedError\n\n @property\n def bounds(self):\n raise NotImplementedError\n\n def hilbert_distance(self, total_bounds=None, p=10):\n # Handle default total_bounds\n if total_bounds is None:\n total_bounds = list(self.total_bounds)\n\n # Expand zero width bounds\n if total_bounds[0] == total_bounds[2]:\n total_bounds[2] += 1.0\n if total_bounds[1] == total_bounds[3]:\n total_bounds[3] += 1.0\n total_bounds = tuple(total_bounds)\n\n return _distances_from_bounds(self.bounds, total_bounds, p)\n\n def intersects_bounds(self, bounds, inds=None):\n \"\"\"\n Test whether each element in the array intersects with the supplied bounds\n\n Args:\n bounds: Tuple of bounds coordinates of the form (x0, y0, x1, y1)\n inds: Optional array of indices into the array. If supplied, intersection\n calculations will be performed only on the elements selected by this\n array. If not supplied, intersection calculations are performed\n on all elements.\n\n Returns:\n Array of boolean values indicating which elements of the array intersect\n with the supplied bounds\n \"\"\"\n raise NotImplementedError()\n\n def intersects(self, shape, inds=None):\n \"\"\"\n Test whether each element in the array intersects with the supplied shape\n\n Args:\n shape: The spatialpandas shape to compute intersections with\n inds: Optional array of indices into the array. If supplied, intersection\n calculations will be performed only on the elements selected by this\n array. If not supplied, intersection calculations are performed\n on all elements.\n\n Returns:\n Array of boolean values indicating which elements of the array intersect\n with the supplied shape\n \"\"\"\n raise NotImplementedError(\n \"intersects not yet implemented for %s objects\" % type(self).__name__\n )\n\n\nclass _BaseCoordinateIndexer:\n def __init__(self, sindex):\n self._sindex = sindex\n\n def _get_bounds(self, key):\n xs, ys = key\n # Handle xs and ys as scalar numeric values\n if type(xs) is not slice:\n xs = slice(xs, xs)\n if type(ys) is not slice:\n ys = slice(ys, ys)\n if xs.step is not None or ys.step is not None:\n raise ValueError(\n \"Slice step not supported. The cx indexer uses slices to represent \"\n \"intervals in continuous coordinate space, and a slice step has no \"\n \"clear interpretation in this context.\"\n )\n if self._sindex:\n xmin, ymin, xmax, ymax = self._sindex.total_bounds\n else:\n xmin, ymin, xmax, ymax = self._obj.total_bounds\n x0, y0, x1, y1 = (\n xs.start if xs.start is not None else xmin,\n ys.start if ys.start is not None else ymin,\n xs.stop if xs.stop is not None else xmax,\n ys.stop if ys.stop is not None else ymax,\n )\n # Handle inverted bounds\n if x1 < x0:\n x0, x1 = x1, x0\n if y1 < y0:\n y0, y1 = y1, y0\n return x0, x1, y0, y1\n\n def __getitem__(self, key):\n x0, x1, y0, y1 = self._get_bounds(key)\n if self._sindex:\n covers_inds, overlaps_inds = self._sindex.covers_overlaps((x0, y0, x1, y1))\n else:\n covers_inds, overlaps_inds = None, None\n return self._perform_get_item(covers_inds, overlaps_inds, x0, x1, y0, y1)\n\n def _perform_get_item(self, covers_inds, overlaps_inds, x0, x1, y0, y1):\n raise NotImplementedError()\n\n\nclass _CoordinateIndexer(_BaseCoordinateIndexer):\n def __init__(self, obj, parent=None):\n super().__init__(obj._sindex)\n self._obj = obj\n self._parent = parent\n\n def _perform_get_item(self, covers_inds, overlaps_inds, x0, x1, y0, y1):\n overlaps_inds_mask = self._obj.intersects_bounds(\n (x0, y0, x1, y1), overlaps_inds\n )\n if covers_inds is not None:\n selected_inds = np.sort(\n np.concatenate([covers_inds, overlaps_inds[overlaps_inds_mask]])\n )\n if self._parent is not None:\n if len(self._parent) > 0:\n return self._parent.iloc[selected_inds]\n else:\n return self._parent\n return self._obj[selected_inds]\n else:\n if self._parent is not None:\n if len(self._parent) > 0:\n return self._parent[overlaps_inds_mask]\n else:\n return self._parent\n return self._obj[overlaps_inds_mask]\n\n\n@ngjit\ndef _perform_extract_isnull_bytemap(bitmap, bitmap_length, bitmap_offset, dst_offset, dst):\n \"\"\"\n Note: Copied from fletcher: See NOTICE for license info\n\n (internal) write the values of a valid bitmap as bytes to a pre-allocatored\n isnull bytemap.\n\n Parameters\n ----------\n bitmap: pyarrow.Buffer\n bitmap where a set bit indicates that a value is valid\n bitmap_length: int\n Number of bits to read from the bitmap\n bitmap_offset: int\n Number of bits to skip from the beginning of the bitmap.\n dst_offset: int\n Number of bytes to skip from the beginning of the output\n dst: numpy.array(dtype=bool)\n Pre-allocated numpy array where a byte is set when a value is null\n \"\"\"\n for i in range(bitmap_length):\n idx = bitmap_offset + i\n byte_idx = idx // 8\n bit_mask = 1 << (idx % 8)\n dst[dst_offset + i] = (bitmap[byte_idx] & bit_mask) == 0\n\n\ndef _extract_isnull_bytemap(list_array):\n \"\"\"\n Note: Copied from fletcher: See NOTICE for license info\n\n Extract the valid bitmaps of a chunked array into numpy isnull bytemaps.\n\n Parameters\n ----------\n chunked_array: pyarrow.ChunkedArray\n\n Returns\n -------\n valid_bytemap: numpy.array\n \"\"\"\n result = np.zeros(len(list_array), dtype=bool)\n\n offset = 0\n chunk = list_array\n valid_bitmap = chunk.buffers()[0]\n if valid_bitmap:\n buf = memoryview(valid_bitmap)\n _perform_extract_isnull_bytemap(buf, len(chunk), chunk.offset, offset, result)\n else:\n return np.full(len(list_array), False)\n\n return result\n\n\ndef is_geometry_array(data):\n \"\"\"\n Check if the data is of geometry dtype.\n Does not include object array of GeometryList/shapely scalars\n \"\"\"\n if isinstance(getattr(data, \"dtype\", None), GeometryDtype):\n return True\n else:\n return False\n\n\ndef to_geometry_array(data, dtype=None):\n from . import (LineArray, MultiLineArray, MultiPointArray,\n MultiPolygonArray, PointArray, PolygonArray, RingArray)\n if sg is not None:\n shapely_to_spatialpandas = {\n sg.Point: PointArray,\n sg.MultiPoint: MultiPointArray,\n sg.LineString: LineArray,\n sg.LinearRing: RingArray,\n sg.MultiLineString: MultiLineArray,\n sg.Polygon: PolygonArray,\n sg.MultiPolygon: MultiPolygonArray,\n }\n else:\n shapely_to_spatialpandas = {}\n\n # Normalize dtype from string\n if dtype is not None:\n dtype = pd.array([], dtype=dtype).dtype\n\n err_msg = \"Unable to convert data argument to a GeometryList array\"\n if is_geometry_array(data):\n # Keep data as is\n pass\n elif (is_array_like(data) or\n isinstance(data, (list, tuple))\n or gp and isinstance(data, (gp.GeoSeries, gp.array.GeometryArray))):\n\n if dtype is not None:\n data = dtype.construct_array_type()(data, dtype=dtype)\n elif len(data) == 0:\n raise ValueError(\n \"Cannot infer spatialpandas geometry type from empty collection \"\n \"without dtype.\\n\"\n )\n else:\n # Check for list/array of geometry scalars.\n first_valid = None\n for val in data:\n if val is not None:\n first_valid = val\n break\n if isinstance(first_valid, Geometry):\n # Pass data to constructor of appropriate geometry array\n data = first_valid.construct_array_type()(data)\n elif type(first_valid) in shapely_to_spatialpandas:\n if isinstance(first_valid, sg.LineString):\n # Handle mix of sg.LineString and sg.MultiLineString\n for val in data:\n if isinstance(val, sg.MultiLineString):\n first_valid = val\n break\n elif isinstance(first_valid, sg.Polygon):\n # Handle mix of sg.Polygon and sg.MultiPolygon\n for val in data:\n if isinstance(val, sg.MultiPolygon):\n first_valid = val\n break\n\n array_type = shapely_to_spatialpandas[type(first_valid)]\n data = array_type.from_geopandas(data)\n else:\n raise ValueError(err_msg)\n else:\n raise ValueError(err_msg)\n return data\n"
] |
[
[
"numpy.nonzero",
"numpy.asarray",
"numpy.isnan",
"pandas.array",
"numpy.dtype",
"pandas.util._validators.validate_fillna_kwargs",
"numpy.concatenate",
"pandas.api.types.is_array_like",
"numpy.isscalar",
"pandas.isna",
"numpy.array",
"pandas.core.missing.get_fill_func"
]
] |
siriuslee/modeldb
|
[
"25e8354e126f2ddeb99ff76bb8136544b65dd581"
] |
[
"client/verta/verta/_internal_utils/_utils.py"
] |
[
"# -*- coding: utf-8 -*-\n\nimport datetime\nimport glob\nimport inspect\nimport itertools\nimport json\nimport numbers\nimport os\nimport re\nimport string\nimport subprocess\nimport sys\nimport threading\nimport time\n\nimport requests\nfrom requests.adapters import HTTPAdapter\nfrom urllib3.util.retry import Retry\n\nfrom google.protobuf import json_format\nfrom google.protobuf.struct_pb2 import Value, ListValue, Struct, NULL_VALUE\n\nfrom ..external import six\nfrom ..external.six.moves.urllib.parse import urljoin # pylint: disable=import-error, no-name-in-module\n\nfrom .._protos.public.common import CommonService_pb2 as _CommonCommonService\n\ntry:\n import pandas as pd\nexcept ImportError: # pandas not installed\n pd = None\n\ntry:\n import tensorflow as tf\nexcept ImportError: # TensorFlow not installed\n tf = None\n\ntry:\n import ipykernel\nexcept ImportError: # Jupyter not installed\n pass\nelse:\n try:\n from IPython.display import Javascript, display\n try: # Python 3\n from notebook.notebookapp import list_running_servers\n except ImportError: # Python 2\n import warnings\n from IPython.utils.shimmodule import ShimWarning\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", category=ShimWarning)\n from IPython.html.notebookapp import list_running_servers\n del warnings, ShimWarning # remove ad hoc imports from scope\n except ImportError: # abnormally nonstandard installation of Jupyter\n pass\n\n\ntry:\n import numpy as np\nexcept ImportError: # NumPy not installed\n np = None\n BOOL_TYPES = (bool,)\nelse:\n BOOL_TYPES = (bool, np.bool_)\n\n_GRPC_PREFIX = \"Grpc-Metadata-\"\n\n_VALID_HTTP_METHODS = {'GET', 'POST', 'PUT', 'DELETE'}\n_VALID_FLAT_KEY_CHARS = set(string.ascii_letters + string.digits + '_-/')\n\nTHREAD_LOCALS = threading.local()\nTHREAD_LOCALS.active_experiment_run = None\n\nSAVED_MODEL_DIR = \"/app/tf_saved_model/\"\n\n# TODO: remove this in favor of _config_utils when #635 is merged\nHOME_VERTA_DIR = os.path.expanduser(os.path.join('~', \".verta\"))\n\n\nclass Connection:\n def __init__(self, scheme=None, socket=None, auth=None, max_retries=0, ignore_conn_err=False):\n \"\"\"\n HTTP connection configuration utility struct.\n\n Parameters\n ----------\n scheme : {'http', 'https'}, optional\n HTTP authentication scheme.\n socket : str, optional\n Hostname and port.\n auth : dict, optional\n Verta authentication headers.\n max_retries : int, default 0\n Maximum number of times to retry a request on a connection failure. This only attempts retries\n on HTTP codes {502, 503, 504} which commonly occur during back end connection lapses.\n ignore_conn_err : bool, default False\n Whether to ignore connection errors and instead return successes with empty contents.\n\n \"\"\"\n self.scheme = scheme\n self.socket = socket\n self.auth = auth\n # TODO: retry on 404s, but only if we're sure it's not legitimate e.g. from a GET\n self.retry = Retry(total=max_retries,\n backoff_factor=1, # each retry waits (2**retry_num) seconds\n method_whitelist=False, # retry on all HTTP methods\n status_forcelist=(502, 503, 504), # only retry on these status codes\n raise_on_redirect=False, # return Response instead of raising after max retries\n raise_on_status=False) # return Response instead of raising after max retries\n self.ignore_conn_err = ignore_conn_err\n\n\nclass Configuration:\n def __init__(self, use_git=True, debug=False):\n \"\"\"\n Client behavior configuration utility struct.\n\n Parameters\n ----------\n use_git : bool, default True\n Whether to use a local Git repository for certain operations.\n\n \"\"\"\n self.use_git = use_git\n self.debug = debug\n\n\nclass LazyList(object):\n # number of items to fetch per back end call in __iter__()\n _ITER_PAGE_LIMIT = 100\n\n def __init__(self, conn, conf, msg, endpoint, rest_method):\n self._conn = conn\n self._conf = conf\n self._msg = msg # protobuf msg used to make back end calls\n self._endpoint = endpoint\n self._rest_method = rest_method\n\n def __getitem__(self, index):\n if isinstance(index, int):\n # copy msg to avoid mutating `self`'s state\n msg = self._msg.__class__()\n msg.CopyFrom(self._msg)\n msg.page_limit = 1\n if index >= 0:\n # convert zero-based indexing into page number\n msg.page_number = index + 1\n else:\n # reverse page order to index from end\n msg.ascending = not msg.ascending # pylint: disable=no-member\n msg.page_number = abs(index)\n\n response_msg = self._call_back_end(msg)\n\n records = self._get_records(response_msg)\n if (not records\n and msg.page_number > response_msg.total_records): # pylint: disable=no-member\n raise IndexError(\"index out of range\")\n id_ = records[0].id\n\n return self._create_element(id_)\n else:\n raise TypeError(\"index must be integer, not {}\".format(type(index)))\n\n def __iter__(self):\n # copy msg to avoid mutating `self`'s state\n msg = self._msg.__class__()\n msg.CopyFrom(self._msg)\n msg.page_limit = self._ITER_PAGE_LIMIT\n msg.page_number = 0 # this will be incremented as soon as we enter the loop\n\n seen_ids = set()\n total_records = float('inf')\n while msg.page_limit*msg.page_number < total_records: # pylint: disable=no-member\n msg.page_number += 1 # pylint: disable=no-member\n\n response_msg = self._call_back_end(msg)\n\n total_records = response_msg.total_records\n\n ids = self._get_ids(response_msg)\n for id_ in ids:\n # skip if we've seen the ID before\n if id_ in seen_ids:\n continue\n else:\n seen_ids.add(id_)\n\n yield self._create_element(id_)\n\n def __len__(self):\n # copy msg to avoid mutating `self`'s state\n msg = self._msg.__class__()\n msg.CopyFrom(self._msg)\n msg.page_limit = msg.page_number = 1 # minimal request just to get total_records\n\n response_msg = self._call_back_end(msg)\n\n return response_msg.total_records\n\n def _call_back_end(self, msg):\n data = proto_to_json(msg)\n\n if self._rest_method == \"GET\":\n response = make_request(\n self._rest_method,\n self._endpoint.format(self._conn.scheme, self._conn.socket),\n self._conn, params=data,\n )\n elif self._rest_method == \"POST\":\n response = make_request(\n self._rest_method,\n self._endpoint.format(self._conn.scheme, self._conn.socket),\n self._conn, json=data,\n )\n raise_for_http_error(response)\n\n response_msg = json_to_proto(body_to_json(response), msg.Response)\n return response_msg\n\n def _get_ids(self, response_msg):\n return (record.id for record in self._get_records(response_msg))\n\n def _get_records(self, response_msg):\n \"\"\"Get the attribute of `response_msg` that is not `total_records`.\"\"\"\n raise NotImplementedError\n\n def _create_element(self, id_):\n \"\"\"Instantiate element to return to user.\"\"\"\n raise NotImplementedError\n\n\ndef make_request(method, url, conn, stream=False, **kwargs):\n \"\"\"\n Makes a REST request.\n\n Parameters\n ----------\n method : {'GET', 'POST', 'PUT', 'DELETE'}\n HTTP method.\n url : str\n URL.\n conn : Connection\n Connection authentication and configuration.\n stream : bool, default False\n Whether to stream the response contents.\n **kwargs\n Initialization parameters to requests.Request().\n\n Returns\n -------\n requests.Response\n\n \"\"\"\n if method.upper() not in _VALID_HTTP_METHODS:\n raise ValueError(\"`method` must be one of {}\".format(_VALID_HTTP_METHODS))\n\n # add auth to headers\n kwargs.setdefault('headers', {}).update(conn.auth)\n\n with requests.Session() as s:\n s.mount(url, HTTPAdapter(max_retries=conn.retry))\n try:\n request = requests.Request(method, url, **kwargs).prepare()\n response = s.send(request, stream=stream, allow_redirects=False)\n\n # manually inspect initial response and subsequent redirects to stop on 302s\n history = [] # track history because `requests` doesn't since we're redirecting manually\n responses = itertools.chain([response], s.resolve_redirects(response, request))\n for response in responses:\n if response.status_code == 302:\n if not conn.ignore_conn_err:\n raise RuntimeError(\n \"received status 302 from {},\"\n \" which is not supported by the Client\".format(response.url)\n )\n else:\n return fabricate_200()\n\n history.append(response)\n # set full history\n response.history = history[:-1] # last element is this response, so drop it\n except (requests.exceptions.BaseHTTPError,\n requests.exceptions.RequestException) as e:\n if not conn.ignore_conn_err:\n raise e\n # else fall through to fabricate 200 response\n else:\n if response.ok or not conn.ignore_conn_err:\n return response\n # else fall through to fabricate 200 response\n return fabricate_200()\n\n\ndef fabricate_200():\n \"\"\"\n Returns an HTTP response with ``status_code`` 200 and empty JSON contents.\n\n This is used when the Client has ``ignore_conn_err=True``, so that backend responses can be\n spoofed to minimize execution-halting errors.\n\n Returns\n -------\n :class:`requests.Response`\n\n \"\"\"\n response = requests.Response()\n response.status_code = 200 # success\n response._content = six.ensure_binary(\"{}\") # empty contents\n return response\n\n\ndef raise_for_http_error(response):\n \"\"\"\n Raises a potential HTTP error with a back end message if provided, or a default error message otherwise.\n\n Parameters\n ----------\n response : :class:`requests.Response`\n Response object returned from a `requests`-module HTTP request.\n\n Raises\n ------\n :class:`requests.HTTPError`\n If an HTTP error occured.\n\n \"\"\"\n try:\n response.raise_for_status()\n except requests.HTTPError as e:\n # get current time in UTC to display alongside exception\n curr_time = timestamp_to_str(now(), utc=True)\n time_str = \" at {} UTC\".format(curr_time)\n\n try:\n reason = body_to_json(response)['message']\n except (ValueError, # not JSON response\n KeyError): # no 'message' from back end\n e.args = (e.args[0] + time_str,) + e.args[1:] # attach time to error message\n six.raise_from(e, None) # use default reason\n else:\n # replicate https://github.com/psf/requests/blob/428f7a/requests/models.py#L954\n if 400 <= response.status_code < 500:\n cause = \"Client\"\n elif 500 <= response.status_code < 600:\n cause = \"Server\"\n else: # should be impossible here, but sure okay\n cause = \"Unexpected\"\n message = \"{} {} Error: {} for url: {}\".format(response.status_code, cause, reason, response.url)\n message += time_str # attach time to error message\n six.raise_from(requests.HTTPError(message, response=response), None)\n\n\ndef body_to_json(response):\n \"\"\"\n Returns the JSON-encoded contents of `response`, raising a detailed error on failure.\n\n Parameters\n ----------\n response : :class:`requests.Response`\n HTTP response.\n\n Returns\n -------\n contents : dict\n JSON-encoded contents of `response`.\n\n Raises\n ------\n ValueError\n If `response`'s contents are not JSON-encoded.\n\n \"\"\"\n try:\n return response.json()\n except ValueError: # not JSON response\n msg = '\\n'.join([\n \"expected JSON response from {}, but instead got:\".format(response.url),\n response.text or \"<empty response>\",\n \"\",\n \"Please notify the Verta development team.\",\n ])\n msg = six.ensure_str(msg)\n six.raise_from(ValueError(msg), None)\n\n\ndef is_hidden(path): # to avoid \"./\".startswith('.')\n return os.path.basename(path.rstrip('/')).startswith('.') and path != \".\"\n\n\ndef find_filepaths(paths, extensions=None, include_hidden=False, include_venv=False):\n \"\"\"\n Unravels a list of file and directory paths into a list of only filepaths by walking through the\n directories.\n\n Parameters\n ----------\n paths : str or list of str\n File and directory paths.\n extensions : str or list of str, optional\n What files to include while walking through directories. If not provided, all files will be\n included.\n include_hidden : bool, default False\n Whether to include hidden files and subdirectories found while walking through directories.\n include_venv : bool, default False\n Whether to include Python virtual environment directories.\n\n Returns\n -------\n filepaths : set\n\n \"\"\"\n if isinstance(paths, six.string_types):\n paths = [paths]\n paths = list(map(os.path.expanduser, paths))\n\n if isinstance(extensions, six.string_types):\n extensions = [extensions]\n if extensions is not None:\n # prepend period to file extensions where missing\n extensions = map(lambda ext: ext if ext.startswith('.') else ('.' + ext), extensions)\n extensions = set(extensions)\n\n filepaths = set()\n for path in paths:\n if os.path.isdir(path):\n for parent_dir, dirnames, filenames in os.walk(path):\n if not include_hidden:\n # skip hidden directories\n dirnames[:] = [dirname for dirname in dirnames if not is_hidden(dirname)]\n # skip hidden files\n filenames[:] = [filename for filename in filenames if not is_hidden(filename)]\n if not include_venv:\n exec_path_glob = os.path.join(parent_dir, \"{}\", \"bin\", \"python*\")\n dirnames[:] = [dirname for dirname in dirnames if not glob.glob(exec_path_glob.format(dirname))]\n for filename in filenames:\n if extensions is None or os.path.splitext(filename)[1] in extensions:\n filepaths.add(os.path.join(parent_dir, filename))\n else:\n filepaths.add(path)\n return filepaths\n\n\ndef proto_to_json(msg):\n \"\"\"\n Converts a `protobuf` `Message` object into a JSON-compliant dictionary.\n\n The output preserves snake_case field names and integer representaions of enum variants.\n\n Parameters\n ----------\n msg : google.protobuf.message.Message\n `protobuf` `Message` object.\n\n Returns\n -------\n dict\n JSON object representing `msg`.\n\n \"\"\"\n return json.loads(json_format.MessageToJson(msg,\n including_default_value_fields=True,\n preserving_proto_field_name=True,\n use_integers_for_enums=True))\n\n\ndef json_to_proto(response_json, response_cls, ignore_unknown_fields=True):\n \"\"\"\n Converts a JSON-compliant dictionary into a `protobuf` `Message` object.\n\n Parameters\n ----------\n response_json : dict\n JSON object representing a Protocol Buffer message.\n response_cls : type\n `protobuf` `Message` subclass, e.g. ``CreateProject.Response``.\n ignore_unknown_fields : bool, default True\n Whether to allow (and ignore) fields in `response_json` that are not defined in\n `response_cls`. This is for forward compatibility with the back end; if the Client protos\n are outdated and we get a response with new fields, ``True`` prevents an error.\n\n Returns\n -------\n google.protobuf.message.Message\n `protobuf` `Message` object represented by `response_json`.\n\n \"\"\"\n return json_format.Parse(json.dumps(response_json),\n response_cls(),\n ignore_unknown_fields=ignore_unknown_fields)\n\n\ndef to_builtin(obj):\n \"\"\"\n Tries to coerce `obj` into a built-in type, for JSON serialization.\n\n Parameters\n ----------\n obj\n\n Returns\n -------\n object\n A built-in equivalent of `obj`, or `obj` unchanged if it could not be handled by this function.\n\n \"\"\"\n # jump through ludicrous hoops to avoid having hard dependencies in the Client\n cls_ = obj.__class__\n obj_class = getattr(cls_, '__name__', None)\n obj_module = getattr(cls_, '__module__', None)\n\n # booleans\n if isinstance(obj, BOOL_TYPES):\n return True if obj else False\n\n # NumPy scalars\n if obj_module == \"numpy\" and obj_class.startswith(('int', 'uint', 'float', 'str')):\n return obj.item()\n\n # scientific library collections\n if obj_class == \"ndarray\":\n return obj.tolist()\n if obj_class == \"Series\":\n return obj.values.tolist()\n if obj_class == \"DataFrame\":\n return obj.values.tolist()\n if obj_class == \"Tensor\" and obj_module == \"torch\":\n return obj.detach().numpy().tolist()\n if tf is not None and isinstance(obj, tf.Tensor): # if TensorFlow\n try:\n return obj.numpy().tolist()\n except: # TF 1.X or not-eager execution\n pass\n\n # strings\n if isinstance(obj, six.string_types): # prevent infinite loop with iter\n return obj\n if isinstance(obj, six.binary_type):\n return six.ensure_str(obj)\n\n # dicts and lists\n if isinstance(obj, dict):\n return {to_builtin(key): to_builtin(val) for key, val in six.viewitems(obj)}\n try:\n iter(obj)\n except TypeError:\n pass\n else:\n return [to_builtin(val) for val in obj]\n\n return obj\n\n\ndef python_to_val_proto(raw_val, allow_collection=False):\n \"\"\"\n Converts a Python variable into a `protobuf` `Value` `Message` object.\n\n Parameters\n ----------\n raw_val\n Python variable.\n allow_collection : bool, default False\n Whether to allow ``list``s and ``dict``s as `val`. This flag exists because some callers\n ought to not support logging collections, so this function will perform the typecheck on `val`.\n\n Returns\n -------\n google.protobuf.struct_pb2.Value\n `protobuf` `Value` `Message` representing `val`.\n\n \"\"\"\n # TODO: check `allow_collection` before `to_builtin()` to avoid unnecessary processing\n val = to_builtin(raw_val)\n\n if val is None:\n return Value(null_value=NULL_VALUE)\n elif isinstance(val, bool): # did you know that `bool` is a subclass of `int`?\n return Value(bool_value=val)\n elif isinstance(val, numbers.Real):\n return Value(number_value=val)\n elif isinstance(val, six.string_types):\n return Value(string_value=val)\n elif isinstance(val, (list, dict)):\n if allow_collection:\n if isinstance(val, list):\n list_value = ListValue()\n list_value.extend(val) # pylint: disable=no-member\n return Value(list_value=list_value)\n else: # isinstance(val, dict)\n if all([isinstance(key, six.string_types) for key in val.keys()]):\n struct_value = Struct()\n struct_value.update(val) # pylint: disable=no-member\n return Value(struct_value=struct_value)\n else: # protobuf's fault\n raise TypeError(\"struct keys must be strings; consider using log_artifact() instead\")\n else:\n raise TypeError(\"unsupported type {}; consider using log_attribute() instead\".format(type(raw_val)))\n else:\n raise TypeError(\"unsupported type {}; consider using log_artifact() instead\".format(type(raw_val)))\n\n\ndef val_proto_to_python(msg):\n \"\"\"\n Converts a `protobuf` `Value` `Message` object into a Python variable.\n\n Parameters\n ----------\n msg : google.protobuf.struct_pb2.Value\n `protobuf` `Value` `Message` representing a variable.\n\n Returns\n -------\n one of {None, bool, float, int, str}\n Python variable represented by `msg`.\n\n \"\"\"\n value_kind = msg.WhichOneof(\"kind\")\n if value_kind == \"null_value\":\n return None\n elif value_kind == \"bool_value\":\n return msg.bool_value\n elif value_kind == \"number_value\":\n return int(msg.number_value) if msg.number_value.is_integer() else msg.number_value\n elif value_kind == \"string_value\":\n return msg.string_value\n elif value_kind == \"list_value\":\n return [val_proto_to_python(val_msg)\n for val_msg\n in msg.list_value.values]\n elif value_kind == \"struct_value\":\n return {key: val_proto_to_python(val_msg)\n for key, val_msg\n in msg.struct_value.fields.items()}\n else:\n raise NotImplementedError(\"retrieved value type is not supported\")\n\n\ndef unravel_key_values(rpt_key_value_msg):\n \"\"\"\n Converts a repeated KeyValue field of a protobuf message into a dictionary.\n\n Parameters\n ----------\n rpt_key_value_msg : google.protobuf.pyext._message.RepeatedCompositeContainer\n Repeated KeyValue field of a protobuf message.\n\n Returns\n -------\n dict of str to {None, bool, float, int, str}\n Names and values.\n\n \"\"\"\n return {key_value.key: val_proto_to_python(key_value.value)\n for key_value\n in rpt_key_value_msg}\n\n\ndef unravel_artifacts(rpt_artifact_msg):\n \"\"\"\n Converts a repeated Artifact field of a protobuf message into a list of names.\n\n Parameters\n ----------\n rpt_artifact_msg : google.protobuf.pyext._message.RepeatedCompositeContainer\n Repeated Artifact field of a protobuf message.\n\n Returns\n -------\n list of str\n Names of artifacts.\n\n \"\"\"\n return [artifact.key\n for artifact\n in rpt_artifact_msg]\n\n\ndef unravel_observation(obs_msg):\n \"\"\"\n Converts an Observation protobuf message into a more straightforward Python tuple.\n\n This is useful because an Observation message has a oneof that's finicky to handle.\n\n Returns\n -------\n str\n Name of observation.\n {None, bool, float, int, str}\n Value of observation.\n str\n Human-readable timestamp.\n\n \"\"\"\n if obs_msg.WhichOneof(\"oneOf\") == \"attribute\":\n key = obs_msg.attribute.key\n value = obs_msg.attribute.value\n elif obs_msg.WhichOneof(\"oneOf\") == \"artifact\":\n key = obs_msg.artifact.key\n value = \"{} artifact\".format(_CommonCommonService.ArtifactTypeEnum.ArtifactType.Name(obs_msg.artifact.artifact_type))\n return (\n key,\n val_proto_to_python(value),\n timestamp_to_str(obs_msg.timestamp),\n int(obs_msg.epoch_number.number_value),\n )\n\n\ndef unravel_observations(rpt_obs_msg):\n \"\"\"\n Converts a repeated Observation field of a protobuf message into a dictionary.\n\n Parameters\n ----------\n rpt_obs_msg : google.protobuf.pyext._message.RepeatedCompositeContainer\n Repeated Observation field of a protobuf message.\n\n Returns\n -------\n dict of str to list of tuples ({None, bool, float, int, str}, str)\n Names and observation sequences.\n\n \"\"\"\n observations = {}\n for obs_msg in rpt_obs_msg:\n obs_tuple = unravel_observation(obs_msg)\n key = obs_tuple[0]\n observations.setdefault(key, []).append(obs_tuple[1:])\n return observations\n\n\ndef validate_flat_key(key):\n \"\"\"\n Checks whether `key` contains invalid characters.\n\n To prevent bugs with querying (which allow dot-delimited nested keys), flat keys (such as those\n used for individual metrics) must not contain periods.\n\n Furthermore, to prevent potential bugs with the back end down the line, keys should be restricted\n to alphanumeric characters, underscores, and dashes until we can verify robustness.\n\n Parameters\n ----------\n key : str\n Name of metadatum.\n\n Raises\n ------\n ValueError\n If `key` contains invalid characters.\n\n \"\"\"\n for c in key:\n if c not in _VALID_FLAT_KEY_CHARS:\n raise ValueError(\"`key` may only contain alphanumeric characters, underscores, dashes,\"\n \" and forward slashes\")\n\n\ndef generate_default_name():\n \"\"\"\n Generates a string that can be used as a default entity name while avoiding collisions.\n\n The generated string is a concatenation of the current process ID and the current Unix timestamp,\n such that a collision should only occur if a single process produces two of an entity at the same\n nanosecond.\n\n Returns\n -------\n name : str\n String generated from the current process ID and Unix timestamp.\n\n \"\"\"\n return \"{}{}\".format(os.getpid(), str(time.time()).replace('.', ''))\n\n\nclass UTC(datetime.tzinfo):\n \"\"\"UTC timezone class for Python 2 timestamp calculations\"\"\"\n def utcoffset(self, dt):\n return datetime.timedelta(0)\n\n def tzname(self, dt):\n return \"UTC\"\n\n def dst(self, dt):\n return datetime.timedelta(0)\n\n\ndef timestamp_to_ms(timestamp):\n \"\"\"\n Converts a Unix timestamp into one with millisecond resolution.\n\n Parameters\n ----------\n timestamp : float or int\n Unix timestamp.\n\n Returns\n -------\n int\n `timestamp` with millisecond resolution (13 integer digits).\n\n \"\"\"\n num_integer_digits = len(str(timestamp).split('.')[0])\n return int(timestamp*10**(13 - num_integer_digits))\n\n\ndef ensure_timestamp(timestamp):\n \"\"\"\n Converts a representation of a datetime into a Unix timestamp with millisecond resolution.\n\n If `timestamp` is provided as a string, this function attempts to use pandas (if installed) to\n parse it into a Unix timestamp, since pandas can interally handle many different human-readable\n datetime string representations. If pandas is not installed, this function will only handle an\n ISO 8601 representation.\n\n Parameters\n ----------\n timestamp : str or float or int\n String representation of a datetime or numerical Unix timestamp.\n\n Returns\n -------\n int\n `timestamp` with millisecond resolution (13 integer digits).\n\n \"\"\"\n if isinstance(timestamp, six.string_types):\n try: # attempt with pandas, which can parse many time string formats\n return timestamp_to_ms(pd.Timestamp(timestamp).timestamp())\n except NameError: # pandas not installed\n six.raise_from(ValueError(\"pandas must be installed to parse datetime strings\"),\n None)\n except ValueError: # can't be handled by pandas\n six.raise_from(ValueError(\"unable to parse datetime string \\\"{}\\\"\".format(timestamp)),\n None)\n elif isinstance(timestamp, numbers.Real):\n return timestamp_to_ms(timestamp)\n elif isinstance(timestamp, datetime.datetime):\n if six.PY2:\n # replicate https://docs.python.org/3/library/datetime.html#datetime.datetime.timestamp\n seconds = (timestamp - datetime.datetime(1970, 1, 1, tzinfo=UTC())).total_seconds()\n else: # Python 3\n seconds = timestamp.timestamp()\n return timestamp_to_ms(seconds)\n else:\n raise TypeError(\"unable to parse timestamp of type {}\".format(type(timestamp)))\n\n\ndef timestamp_to_str(timestamp, utc=False):\n \"\"\"\n Converts a Unix timestamp into a human-readable string representation.\n\n Parameters\n ----------\n timestamp : int\n Numerical Unix timestamp.\n\n Returns\n -------\n str\n Human-readable string representation of `timestamp`.\n\n \"\"\"\n num_digits = len(str(timestamp))\n ts_as_sec = timestamp*10**(10 - num_digits)\n if utc:\n datetime_obj = datetime.datetime.utcfromtimestamp(ts_as_sec)\n else:\n datetime_obj = datetime.datetime.fromtimestamp(ts_as_sec)\n return str(datetime_obj)\n\n\ndef now():\n \"\"\"\n Returns the current Unix timestamp with millisecond resolution.\n\n Returns\n -------\n now : int\n Current Unix timestamp in milliseconds.\n\n \"\"\"\n return timestamp_to_ms(time.time())\n\n\ndef get_python_version():\n \"\"\"\n Returns the version number of the locally-installed Python interpreter.\n\n Returns\n -------\n str\n Python version number in the form \"{major}.{minor}.{patch}\".\n\n \"\"\"\n return '.'.join(map(str, sys.version_info[:3]))\n\n\ndef save_notebook(notebook_path=None, timeout=5):\n \"\"\"\n Saves the current notebook on disk and returns its contents after the file has been rewritten.\n\n Parameters\n ----------\n notebook_path : str, optional\n Filepath of the Jupyter Notebook.\n timeout : float, default 5\n Maximum number of seconds to wait for the notebook to save.\n\n Returns\n -------\n notebook_contents : file-like\n An in-memory copy of the notebook's contents at the time this function returns. This can\n be ignored, but is nonetheless available to minimize the risk of a race condition caused by\n delaying the read until a later time.\n\n Raises\n ------\n OSError\n If the notebook is not saved within `timeout` seconds.\n\n \"\"\"\n if notebook_path is None:\n notebook_path = get_notebook_filepath()\n modtime = os.path.getmtime(notebook_path)\n\n display(Javascript('''\n require([\"base/js/namespace\"],function(Jupyter) {\n Jupyter.notebook.save_checkpoint();\n });\n '''))\n\n # wait for file to be modified\n start_time = time.time()\n while time.time() - start_time < timeout:\n new_modtime = os.path.getmtime(notebook_path)\n if new_modtime > modtime:\n break\n time.sleep(0.01)\n else:\n raise OSError(\"unable to save notebook\")\n\n # wait for file to be rewritten\n timeout -= (time.time() - start_time) # remaining time\n start_time = time.time()\n while time.time() - start_time < timeout:\n with open(notebook_path, 'r') as f:\n contents = f.read()\n if contents:\n return six.StringIO(contents)\n time.sleep(0.01)\n else:\n raise OSError(\"unable to read saved notebook\")\n\n\ndef get_notebook_filepath():\n \"\"\"\n Returns the filesystem path of the Jupyter notebook running the Client.\n\n This implementation is from https://github.com/jupyter/notebook/issues/1000#issuecomment-359875246.\n\n Returns\n -------\n str\n\n Raises\n ------\n OSError\n If one of the following is true:\n - Jupyter is not installed\n - Client is not being called from a notebook\n - the calling notebook cannot be identified\n\n \"\"\"\n try:\n connection_file = ipykernel.connect.get_connection_file()\n except (NameError, # Jupyter not installed\n RuntimeError): # not in a Notebook\n pass\n else:\n kernel_id = re.search('kernel-(.*).json', connection_file).group(1)\n for server in list_running_servers():\n response = requests.get(urljoin(server['url'], 'api/sessions'),\n params={'token': server.get('token', '')})\n if response.ok:\n for session in body_to_json(response):\n if session['kernel']['id'] == kernel_id:\n relative_path = session['notebook']['path']\n return os.path.join(server['notebook_dir'], relative_path)\n raise OSError(\"unable to find notebook file\")\n\n\ndef get_script_filepath():\n \"\"\"\n Returns the filesystem path of the Python script running the Client.\n\n This function iterates back through the call stack until it finds a non-Verta stack frame and\n returns its filepath.\n\n Returns\n -------\n str\n\n Raises\n ------\n OSError\n If the calling script cannot be identified.\n\n \"\"\"\n for frame_info in inspect.stack():\n module = inspect.getmodule(frame_info[0])\n if module is None or module.__name__.split('.', 1)[0] != \"verta\":\n filepath = frame_info[1]\n if os.path.exists(filepath): # e.g. Jupyter fakes the filename for cells\n return filepath\n else:\n break # continuing might end up returning a built-in\n raise OSError(\"unable to find script file\")\n\n\ndef is_org(workspace_name, conn):\n response = make_request(\n \"GET\",\n \"{}://{}/api/v1/uac-proxy/organization/getOrganizationByName\".format(conn.scheme, conn.socket),\n conn, params={'org_name': workspace_name},\n )\n\n return response.status_code != 404\n\n\ndef as_list_of_str(tags):\n \"\"\"\n Ensures that `tags` is a list of str.\n\n Parameters\n ----------\n tags : str or list of str\n If list of str, return unchanged. If str, return wrapped in a list.\n\n Returns\n -------\n tags : list of str\n Tags.\n\n Raises\n ------\n TypeError\n If `tags` is neither str nor list of str.\n\n \"\"\"\n # TODO: make error messages more general so this can be used for any similar var\n if isinstance(tags, six.string_types):\n tags = [tags]\n else:\n if not isinstance(tags, (list, tuple, set)):\n raise TypeError(\"`tags` should be list of str, not {}\".format(type(tags)))\n\n for tag in tags:\n if not isinstance(tag, six.string_types):\n raise TypeError(\"`tags` must be list of str, but found {}\".format(type(tag)))\n\n return tags\n"
] |
[
[
"pandas.Timestamp"
]
] |
RishikeshMagar/ManufacturingNet
|
[
"96e8624f0932123968d599a3b7c6511cd03a349d"
] |
[
"ManufacturingNet/shallow_learning_methods/all_classification_models.py"
] |
[
"from contextlib import redirect_stderr, redirect_stdout\nimport io\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import SVC, NuSVC, LinearSVC\nimport time\nfrom xgboost import XGBClassifier\n\nclass AllClassificationModels:\n \"\"\"\n Wrapper class around all supported classification models: LogisticRegression, MLPClassifier, RandomForest, SVC,\n NuSVC, LinearSVC, and XGBClassifier.\n AllClassificationModels runs every available classification algorithm on the given dataset and outputs the mean\n accuracy, ROC-AUC, and execution time of each successful model when all_classification_models() is run.\n \"\"\"\n def __init__(self, attributes=None, labels=None, test_size=0.25, verbose=False):\n \"\"\"\n Initializes an AllClassificationModels object.\n\n The following parameters are needed to use an AllClassificationModels object:\n\n – attributes: a numpy array of the desired independent variables (Default is None)\n – labels: a numpy array of the classes (Default is None)\n – test_size: the proportion of the dataset to be used for testing the model;\n the proportion of the dataset to be used for training will be the complement of test_size (Default is 0.25)\n – verbose: specifies whether or not to ouput any and all logging during model training (Default is False)\n\n Note: These are the only parameters allowed. All other parameters for each model will use their default\n values. For more granular control, please instantiate each model individually.\n\n The following instance data is found after running all_classification_models() successfully:\n\n – logistic_regression: a reference to the LogisticRegression model\n – MLP: a reference to the MLPClassifier model\n – random_forest: a reference to the RandomForest model\n – SVC: a reference to the SVC model\n – nu_SVC: a reference to the NuSVC model\n – linear_SVC: a reference to the LinearSVC model\n – XGB_classifier: a reference to the XGBClassifier model\n\n After running all_classification_models(), the mean accuracy, ROC-AUC (if available), and execution time for\n each model that ran successfully will be displayed in tabular form. Any models that failed to run will be listed.\n \"\"\"\n self.attributes = attributes\n self.labels = labels\n self.test_size = test_size\n self.verbose = verbose\n\n self.logistic_regression = LogisticRegression(verbose=self.verbose)\n self.MLP = MLPClassifier(verbose=self.verbose)\n self.random_forest = RandomForestClassifier(verbose=self.verbose)\n self.SVC = SVC(verbose=self.verbose, probability=True)\n self.nu_SVC = NuSVC(verbose=self.verbose, probability=True)\n self.linear_SVC = LinearSVC(verbose=self.verbose)\n self.XGB_classifier = XGBClassifier(verbosity=int(self.verbose))\n\n self._classification_models = {\"Model\": [\"Accuracy\", \"ROC-AUC\", \"Time\"]}\n self._failures = []\n\n # Accessor methods\n\n def get_attributes(self):\n \"\"\"\n Accessor method for attributes.\n\n If an AllClassificationModels object is initialized without specifying attributes, attributes will be None.\n all_classification_models() cannot be called until attributes is a populated numpy array of independent variables;\n call set_attributes(new_attributes) to fix this.\n \"\"\"\n return self.attributes\n\n def get_labels(self):\n \"\"\"\n Accessor method for labels.\n\n If an AllClassificationModels object is initialized without specifying labels, labels will be None.\n all_classification_models() cannot be called until labels is a populated numpy array of classes;\n call set_labels(new_labels) to fix this.\n \"\"\"\n return self.labels\n\n def get_test_size(self):\n \"\"\"\n Accessor method for test_size.\n\n Should return a number or None.\n \"\"\"\n return self.test_size\n\n def get_verbose(self):\n \"\"\"\n Accessor method for verbose.\n\n Will default to False if not set by the user.\n \"\"\"\n return self.verbose\n\n def get_all_classification_models(self):\n \"\"\"\n Accessor method that returns a list of all models.\n\n All models within the list will be None if all_classification_models() hasn't been called, yet.\n \"\"\"\n return [self.logistic_regression, self.MLP, self.random_forest, self.SVC, self.nu_SVC, self.linear_SVC,\n self.XGB_classifier]\n\n def get_logistic_regression(self):\n \"\"\"\n Accessor method for logistic_regression.\n\n Will return None if all_classification_models() hasn't been called, yet.\n \"\"\"\n return self.logistic_regression\n\n def get_MLP(self):\n \"\"\"\n Accessor method for MLP.\n\n Will return None if all_classification_models() hasn't been called, yet.\n \"\"\"\n return self.MLP\n\n def get_random_forest(self):\n \"\"\"\n Accessor method for random_forest.\n\n Will return None if all_classification_models() hasn't been called, yet.\n \"\"\"\n return self.random_forest\n\n def get_SVC(self):\n \"\"\"\n Accessor method for SVC.\n\n Will return None if all_classification_models() hasn't been called, yet.\n \"\"\"\n return self.SVC\n \n def get_nu_SVC(self):\n \"\"\"\n Accessor method for nu_SVC.\n\n Will return None if all_classification_models() hasn't been called, yet.\n \"\"\"\n return self.nu_SVC\n \n def get_linear_SVC(self):\n \"\"\"\n Accessor method for linear_SVC.\n\n Will return None if all_classification_models() hasn't been called, yet.\n \"\"\"\n return self.linear_SVC\n\n def get_XGB_classifier(self):\n \"\"\"\n Accessor method for XGB_classifier.\n\n Will return None if all_classification_models() hasn't been called, yet.\n \"\"\"\n return self.XGB_classifier\n\n # Modifier methods\n\n def set_attributes(self, new_attributes=None):\n \"\"\"\n Modifier method for attributes.\n\n Input should be a numpy array of independent variables. Defaults to None.\n \"\"\"\n self.attributes = new_attributes\n\n def set_labels(self, new_labels=None):\n \"\"\"\n Modifier method for labels.\n\n Input should be a numpy array of classes. Defaults to None.\n \"\"\"\n self.labels = new_labels\n\n def set_test_size(self, new_test_size=0.25):\n \"\"\"\n Modifier method for test_size.\n\n Input should be a number or None. Defaults to 0.25.\n \"\"\"\n self.test_size = new_test_size\n\n def set_verbose(self, new_verbose=False):\n \"\"\"\n Modifier method for verbose.\n\n Input should be a truthy/falsy value. Defaults to False.\n \"\"\"\n self.verbose = new_verbose\n\n # Classification functionality\n\n def all_classification_models(self):\n \"\"\"\n Driver method for running all classification models with given attributes and labels.\n all_classification_models() first trains the models and determines their mean accuracy, ROC-AUC, and execution\n time via _all_classification_models_runner(). Then, all_classification_models() calls _print_results() to\n format and print each successful model's measurements, while also listing any failed models.\n\n If verbose is True, all verbose logging for each model will be enabled.\n If verbose is False, all logging to stdout and stderr will be suppressed.\n \"\"\"\n\n # Call helper method for running all classification models; suppress output, if needed\n if not self.verbose:\n suppress_output = io.StringIO()\n with redirect_stderr(suppress_output), redirect_stdout(suppress_output):\n self._all_classification_models_runner()\n else:\n self._all_classification_models_runner()\n \n # Print results\n self._print_results()\n\n # Helper methods\n\n def _all_classification_models_runner(self):\n \"\"\"\n Helper method that runs all models using the given dataset and all default parameters.\n After running all models, each model is determined to be either a success or failure, and relevant data\n (accuracy, ROC-AUC, execution time) is recorded.\n\n _all_classification_models_runner() may only be called by all_classification_models().\n \"\"\"\n\n # Split dataset\n dataset_X_train, dataset_X_test, dataset_y_train, dataset_y_test =\\\n train_test_split(self.attributes, self.labels, test_size=self.test_size)\n\n # Run and time all models; identify each as success or failure\n try:\n start_time = time.time()\n self.logistic_regression.fit(dataset_X_train, dataset_y_train)\n end_time = time.time()\n self._classification_models[\"LogisticRegression\"] =\\\n [self.logistic_regression.score(dataset_X_test, dataset_y_test),\n roc_auc_score(self.logistic_regression.predict(dataset_X_test),\n self.logistic_regression.predict_proba(dataset_X_test)[::, 1]),\n end_time - start_time]\n except:\n self._failures.append(\"LogisticRegression\")\n\n try: \n start_time = time.time()\n self.MLP.fit(dataset_X_train, dataset_y_train)\n end_time = time.time()\n self._classification_models[\"MLPClassifier\"] =\\\n [self.MLP.score(dataset_X_test, dataset_y_test),\n roc_auc_score(self.MLP.predict(dataset_X_test), self.MLP.predict_proba(dataset_X_test)[::, 1]),\n end_time - start_time]\n except:\n self._failures.append(\"MLPClassifier\")\n\n try: \n start_time = time.time()\n self.random_forest.fit(dataset_X_train, dataset_y_train)\n end_time = time.time()\n self._classification_models[\"RandomForest\"] =\\\n [self.random_forest.score(dataset_X_test, dataset_y_test),\n roc_auc_score(self.random_forest.predict(dataset_X_test),\n self.random_forest.predict_proba(dataset_X_test)[::, 1]),\n end_time - start_time]\n except:\n self._failures.append(\"RandomForest\")\n \n try:\n start_time = time.time()\n self.SVC.fit(dataset_X_train, dataset_y_train)\n end_time = time.time()\n self._classification_models[\"SVC\"] =\\\n [self.SVC.score(dataset_X_test, dataset_y_test),\n roc_auc_score(self.SVC.predict(dataset_X_test), self.SVC.predict_proba(dataset_X_test)[::, 1]),\n end_time - start_time]\n except:\n self._failures.append(\"SVC\")\n\n try:\n start_time = time.time()\n self.nu_SVC.fit(dataset_X_train, dataset_y_train)\n end_time = time.time()\n self._classification_models[\"NuSVC\"] =\\\n [self.nu_SVC.score(dataset_X_test, dataset_y_test),\n roc_auc_score(self.nu_SVC.predict(dataset_X_test), self.nu_SVC.predict_proba(dataset_X_test)[::, 1]),\n end_time - start_time]\n except:\n self._failures.append(\"NuSVC\")\n\n try:\n start_time = time.time()\n self.linear_SVC.fit(dataset_X_train, dataset_y_train)\n end_time = time.time()\n self._classification_models[\"LinearSVC\"] =\\\n [self.linear_SVC.score(dataset_X_test, dataset_y_test), \"Not Available\", end_time - start_time]\n except:\n self._failures.append(\"LinearSVC\")\n\n try:\n start_time = time.time()\n self.XGB_classifier.fit(dataset_X_train, dataset_y_train)\n end_time = time.time()\n self._classification_models[\"XGBClassifier\"] =\\\n [self.XGB_classifier.score(dataset_X_test, dataset_y_test),\n roc_auc_score(self.XGB_classifier.predict(dataset_X_test),\n self.XGB_classifier.predict_proba(dataset_X_test)[::, 1]),\n end_time - start_time]\n except:\n self._failures.append(\"XGBClassifier\")\n \n def _print_results(self):\n \"\"\"\n Helper method that prints results of _all_classification_models_runner() in tabular form.\n\n _print_results() may only be called by all_classification_models() after all models have attempted to run.\n \"\"\"\n\n # Print models that didn't fail\n print(\"\\nResults:\\n\")\n\n for model, data in self._classification_models.items():\n print(\"{:<20} {:<20} {:<20} {:<20}\".format(model, data[0], data[1], data[2]))\n\n print()\n\n # Print failures, if any\n if len(self._failures) > 0:\n print(\"The following models failed to run:\\n\")\n\n for entry in self._failures:\n print(entry)\n \n print()"
] |
[
[
"sklearn.neural_network.MLPClassifier",
"sklearn.svm.NuSVC",
"sklearn.linear_model.LogisticRegression",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.model_selection.train_test_split",
"sklearn.svm.SVC",
"sklearn.svm.LinearSVC"
]
] |
Vole1/MC-CDP-BraTS2018
|
[
"32430dc87d40c8d1f41092598c839e0b34f32e7c"
] |
[
"src/models/unets_do.py"
] |
[
"from models.nasnet_do import NASNet_large_do\nfrom models.xception_padding import Xception\nfrom tensorflow.keras import Model\nfrom tensorflow.keras.applications import DenseNet169\nfrom tensorflow.keras.layers import UpSampling2D, Conv2D, BatchNormalization, Activation, concatenate, Add\nfrom tensorflow.keras.utils import get_file\n\nfrom . import NetType\n\nresnet_filename = 'ResNet-{}-model.keras.h5'\nresnet_resource = 'https://github.com/fizyr/keras-models/releases/download/v0.0.1/{}'.format(resnet_filename)\n\n\ndef download_resnet_imagenet(v):\n v = int(v.replace('resnet', ''))\n\n filename = resnet_filename.format(v)\n resource = resnet_resource.format(v)\n if v == 50:\n checksum = '3e9f4e4f77bbe2c9bec13b53ee1c2319'\n elif v == 101:\n checksum = '05dc86924389e5b401a9ea0348a3213c'\n elif v == 152:\n checksum = '6ee11ef2b135592f8031058820bb9e71'\n\n return get_file(\n filename,\n resource,\n cache_subdir='models',\n md5_hash=checksum\n )\n\n\ndef conv_bn_relu(input, num_channel, kernel_size, stride, name, padding='same', bn_axis=-1, bn_momentum=0.99,\n bn_scale=True, use_bias=True):\n x = Conv2D(filters=num_channel, kernel_size=(kernel_size, kernel_size),\n strides=stride, padding=padding,\n kernel_initializer=\"he_normal\",\n use_bias=use_bias,\n name=name + \"_conv\")(input)\n x = BatchNormalization(name=name + '_bn', scale=bn_scale, axis=bn_axis, momentum=bn_momentum, epsilon=1.001e-5, )(x)\n x = Activation('relu', name=name + '_relu')(x)\n return x\n\n\ndef conv_relu(input, num_channel, kernel_size, stride, name, padding='same', use_bias=True, activation='relu'):\n x = Conv2D(filters=num_channel, kernel_size=(kernel_size, kernel_size),\n strides=stride, padding=padding,\n kernel_initializer=\"he_normal\",\n use_bias=use_bias,\n name=name + \"_conv\")(input)\n x = Activation(activation, name=name + '_relu')(x)\n return x\n\n\ndef create_pyramid_features(C1, C2, C3, C4, C5, feature_size=256):\n P5 = Conv2D(feature_size, kernel_size=1, strides=1, padding='same', name='P5', kernel_initializer=\"he_normal\")(C5)\n P5_upsampled = UpSampling2D(name='P5_upsampled')(P5)\n\n P4 = Conv2D(feature_size, kernel_size=1, strides=1, padding='same', name='C4_reduced',\n kernel_initializer=\"he_normal\")(C4)\n P4 = Add(name='P4_merged')([P5_upsampled, P4])\n P4 = Conv2D(feature_size, kernel_size=3, strides=1, padding='same', name='P4', kernel_initializer=\"he_normal\")(P4)\n P4_upsampled = UpSampling2D(name='P4_upsampled')(P4)\n\n P3 = Conv2D(feature_size, kernel_size=1, strides=1, padding='same', name='C3_reduced',\n kernel_initializer=\"he_normal\")(C3)\n P3 = Add(name='P3_merged')([P4_upsampled, P3])\n P3 = Conv2D(feature_size, kernel_size=3, strides=1, padding='same', name='P3', kernel_initializer=\"he_normal\")(P3)\n P3_upsampled = UpSampling2D(name='P3_upsampled')(P3)\n\n P2 = Conv2D(feature_size, kernel_size=1, strides=1, padding='same', name='C2_reduced',\n kernel_initializer=\"he_normal\")(C2)\n P2 = Add(name='P2_merged')([P3_upsampled, P2])\n P2 = Conv2D(feature_size, kernel_size=3, strides=1, padding='same', name='P2', kernel_initializer=\"he_normal\")(P2)\n P2_upsampled = UpSampling2D(size=(2, 2), name='P2_upsampled')(P2)\n\n P1 = Conv2D(feature_size, kernel_size=1, strides=1, padding='same', name='C1_reduced',\n kernel_initializer=\"he_normal\")(C1)\n P1 = Add(name='P1_merged')([P2_upsampled, P1])\n P1 = Conv2D(feature_size, kernel_size=3, strides=1, padding='same', name='P1', kernel_initializer=\"he_normal\")(P1)\n\n return P1, P2, P3, P4, P5\n\n\ndef decoder_block_no_bn(input, filters, skip, block_name, activation='relu'):\n x = UpSampling2D()(input)\n x = conv_relu(x, filters, 3, stride=1, padding='same', name=block_name + '_conv1', activation=activation)\n x = concatenate([x, skip], axis=-1, name=block_name + '_concat')\n x = conv_relu(x, filters, 3, stride=1, padding='same', name=block_name + '_conv2', activation=activation)\n return x\n\n\ndef prediction_fpn_block(x, name, upsample=None):\n x = conv_relu(x, 128, 3, stride=1, name=\"prediction_\" + name + \"_1\")\n x = conv_relu(x, 128, 3, stride=1, name=\"prediction_\" + name + \"_2\")\n if upsample:\n x = UpSampling2D(upsample)(x)\n return x\n\n\ndef xception_fpn(input_shape, channels=1, weights='imagenet', activation=\"sigmoid\"):\n xception = Xception(input_shape=input_shape, weights=weights, include_top=False)\n conv1 = xception.get_layer(\"block1_conv2_act\").output\n conv2 = xception.get_layer(\"block3_sepconv2_bn\").output\n conv3 = xception.get_layer(\"block4_sepconv2_bn\").output\n conv3 = Activation(\"relu\")(conv3)\n conv4 = xception.get_layer(\"block13_sepconv2_bn\").output\n conv4 = Activation(\"relu\")(conv4)\n conv5 = xception.get_layer(\"block14_sepconv2_act\").output\n\n P1, P2, P3, P4, P5 = create_pyramid_features(conv1, conv2, conv3, conv4, conv5)\n x = concatenate(\n [\n prediction_fpn_block(P5, \"P5\", (8, 8)),\n prediction_fpn_block(P4, \"P4\", (4, 4)),\n prediction_fpn_block(P3, \"P3\", (2, 2)),\n prediction_fpn_block(P2, \"P2\"),\n ]\n )\n x = conv_bn_relu(x, 256, 3, (1, 1), name=\"aggregation\")\n x = decoder_block_no_bn(x, 128, conv1, 'up4')\n x = UpSampling2D()(x)\n x = conv_relu(x, 64, 3, (1, 1), name=\"up5_conv1\")\n x = conv_relu(x, 64, 3, (1, 1), name=\"up5_conv2\")\n if activation == 'softmax':\n name = 'mask_softmax'\n x = Conv2D(channels, (1, 1), activation=activation, name=name)(x)\n else:\n x = Conv2D(channels, (1, 1), activation=activation, name=\"mask\")(x)\n model = Model(xception.input, x)\n return model\n\n\ndef densenet_fpn(input_shape, channels=1, activation=\"sigmoid\"):\n densenet = DenseNet169(input_shape=input_shape, include_top=False)\n conv1 = densenet.get_layer(\"conv1/relu\").output\n conv2 = densenet.get_layer(\"pool2_relu\").output\n conv3 = densenet.get_layer(\"pool3_relu\").output\n conv4 = densenet.get_layer(\"pool4_relu\").output\n conv5 = densenet.get_layer(\"bn\").output\n conv5 = Activation(\"relu\", name=\"conv5_relu\")(conv5)\n\n P1, P2, P3, P4, P5 = create_pyramid_features(conv1, conv2, conv3, conv4, conv5)\n x = concatenate(\n [\n prediction_fpn_block(P5, \"P5\", (8, 8)),\n prediction_fpn_block(P4, \"P4\", (4, 4)),\n prediction_fpn_block(P3, \"P3\", (2, 2)),\n prediction_fpn_block(P2, \"P2\"),\n ]\n )\n x = conv_bn_relu(x, 256, 3, (1, 1), name=\"aggregation\")\n x = decoder_block_no_bn(x, 128, conv1, 'up4')\n x = UpSampling2D()(x)\n x = conv_relu(x, 64, 3, (1, 1), name=\"up5_conv1\")\n x = conv_relu(x, 64, 3, (1, 1), name=\"up5_conv2\")\n if activation == 'softmax':\n name = 'mask_softmax'\n x = Conv2D(channels, (1, 1), activation=activation, name=name)(x)\n else:\n x = Conv2D(channels, (1, 1), activation=activation, name=\"mask\")(x)\n model = Model(densenet.input, x)\n return model\n\n\ndef nasnet_fpn_do(input_shape, net_type, channels=1, do_rate=0.3, total_training_steps=None, weights='imagenet',\n activation=\"softmax\"):\n nasnet = NASNet_large_do(input_shape=input_shape, net_type=net_type, do_rate=do_rate, include_top=False,\n total_training_steps=total_training_steps, weights=weights)\n conv1 = nasnet.get_layer(\"activation\").output # (\"stem_bn1\").output\n conv2 = nasnet.get_layer(\"reduction_concat_stem_1\").output\n conv3 = nasnet.get_layer(\"activation_134\").output # (\"normal_concat_5\").output\n conv4 = nasnet.get_layer(\"activation_252\").output # (\"normal_concat_12\").output\n conv5 = nasnet.get_layer(\"normal_concat_18\").output # (\"normal_concat_18\").output\n\n P1, P2, P3, P4, P5 = create_pyramid_features(conv1, conv2, conv3, conv4, conv5)\n x = concatenate(\n [\n prediction_fpn_block(P5, \"P5\", (8, 8)),\n prediction_fpn_block(P4, \"P4\", (4, 4)),\n prediction_fpn_block(P3, \"P3\", (2, 2)),\n prediction_fpn_block(P2, \"P2\"),\n ]\n )\n x = conv_bn_relu(x, 256, 3, (1, 1), name=\"aggregation\")\n x = decoder_block_no_bn(x, 128, conv1, 'up4')\n x = UpSampling2D()(x)\n x = conv_relu(x, 64, 3, (1, 1), name=\"up5_conv1\")\n x = conv_relu(x, 64, 3, (1, 1), name=\"up5_conv2\")\n if activation == 'softmax':\n name = 'mask_softmax'\n x = Conv2D(channels, (1, 1), activation=activation, name=name)(x)\n else:\n x = Conv2D(channels, (1, 1), activation=activation, name=\"mask\")(x)\n model = Model(nasnet.input, x)\n return model\n\n\ndef nasnet_cdo_fpn(input_shape, channels=1, weights='imagenet', activation=\"sigmoid\"):\n return nasnet_fpn_do(input_shape, NetType.cdo, channels, weights=weights, activation=activation)\n\n\ndef nasnet_cdp_fpn(input_shape, channels=1, weights='imagenet', activation=\"sigmoid\"):\n return nasnet_fpn_do(input_shape, NetType.cdp, channels, weights=weights, activation=activation)\n\n\ndef nasnet_do_fpn(input_shape, channels=1, do_rate=0.3, weights='imagenet', activation=\"sigmoid\"):\n return nasnet_fpn_do(input_shape, NetType.mc_do, channels, do_rate, weights=weights, activation=activation)\n\n\ndef nasnet_df_fpn(input_shape, channels=1, do_rate=0.3, weights='imagenet', activation=\"sigmoid\"):\n return nasnet_fpn_do(input_shape, NetType.mc_df, channels, do_rate, weights=weights, activation=activation)\n\n\ndef nasnet_sdo_fpn(input_shape, channels=1, do_rate=0.3, total_training_steps=None, weights='imagenet',\n activation=\"sigmoid\"):\n return nasnet_fpn_do(input_shape, NetType.sdo, channels, do_rate, total_training_steps, weights, activation)\n\n\ndef nasnet_sdp_fpn(input_shape, channels=1, do_rate=0.3, total_training_steps=None, weights='imagenet',\n activation=\"sigmoid\"):\n return nasnet_fpn_do(input_shape, NetType.sdp, channels, do_rate, total_training_steps, weights, activation)\n"
] |
[
[
"tensorflow.keras.layers.Activation",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.UpSampling2D",
"tensorflow.keras.applications.DenseNet169",
"tensorflow.keras.layers.concatenate",
"tensorflow.keras.Model",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.utils.get_file",
"tensorflow.keras.layers.Add"
]
] |
KeVincenty/single_timestamps_action_recognition
|
[
"35a60bba81d2875a46e94bef247cd3554cc3f08b"
] |
[
"src/tf_model_zoo/bninception/parse_caffe.py"
] |
[
"#!/usr/bin/env python\n\nimport argparse\n\nparser = argparse.ArgumentParser(description=\"Convert a Caffe model and its learned parameters to torch\")\nparser.add_argument('model', help='network spec, usually a ProtoBuf text message')\nparser.add_argument('weights', help='network parameters, usually in a name like *.caffemodel ')\nparser.add_argument('--model_yaml', help=\"translated model spec yaml file\")\nparser.add_argument('--dump_weights', help=\"translated model parameters to be used by torch\")\nparser.add_argument('--model_version', help=\"the version of Caffe's model spec, usually 2\", default=2)\n\nargs = parser.parse_args()\n\nimport caffe_pb2\nfrom google.protobuf import text_format\nfrom pprint import pprint\nimport yaml\nimport numpy as np\nimport torch\n\n\nclass CaffeVendor(object):\n def __init__(self, net_name, weight_name, version=2):\n print(\"loading model spec...\")\n self._net_pb = caffe_pb2.NetParameter()\n text_format.Merge(open(net_name).read(), self._net_pb)\n self._weight_dict = {}\n self._init_dict = []\n\n if weight_name is not None:\n print(\"loading weights...\")\n self._weight_pb = caffe_pb2.NetParameter()\n self._weight_pb.ParseFromString(open(weight_name, 'rb').read())\n for l in self._weight_pb.layer:\n self._weight_dict[l.name] = l\n\n print(\"parsing...\")\n self._parse_net(version)\n\n def _parse_net(self, version):\n self._name = str(self._net_pb.name)\n self._layers = self._net_pb.layer if version == 2 else self._net_pb.layers\n self._parsed_layers = [self._layer2dict(x, version) for x in self._layers]\n\n self._net_dict = {\n 'name': self._name,\n 'inputs': [],\n 'layers': [],\n }\n\n self._weight_array_dict = {}\n\n for info, blob, is_data in self._parsed_layers:\n if not is_data and info is not None:\n self._net_dict['layers'].append(info)\n\n self._weight_array_dict.update(blob)\n\n @staticmethod\n def _parse_blob(blob):\n flat_data = np.array(blob.data)\n shaped_data = flat_data.reshape(list(blob.shape.dim))\n return shaped_data\n\n def _layer2dict(self, layer, version):\n attr_dict = {}\n params = []\n weight_params = []\n fillers = []\n\n for field, value in layer.ListFields():\n if field.name == 'top':\n tops = [v.replace('-', '_').replace('/', '_') for v in value]\n elif field.name == 'name':\n layer_name = str(value).replace('-', '_').replace('/', '_')\n elif field.name == 'bottom':\n bottoms = [v.replace('-', '_').replace('/', '_') for v in value]\n elif field.name == 'include':\n if value[0].phase == 1 and op == 'Data':\n print('found 1 testing data layer')\n return None, dict(), dict(), False\n elif field.name == 'type':\n if version == 2:\n op = value\n else:\n raise NotImplemented\n elif field.name == 'loss_weight':\n pass\n elif field.name == 'param':\n pass\n else:\n # other params\n try:\n for f, v in value.ListFields():\n if 'filler' in f.name:\n pass\n elif f.name == 'pool':\n attr_dict['mode'] = 'max' if v == 0 else 'ave'\n else:\n attr_dict[f.name] = v\n\n except:\n print(field.name, value)\n raise\n\n expr_temp = '{top}<={op}<={input}'\n\n if layer.name in self._weight_dict:\n blobs = [self._parse_blob(x) for x in self._weight_dict[layer.name].blobs]\n else:\n blobs = []\n\n blob_dict = dict()\n if len(blobs) > 0:\n blob_dict['{}.weight'.format(layer_name)] = torch.from_numpy(blobs[0])\n blob_dict['{}.bias'.format(layer_name)] = torch.from_numpy(blobs[1])\n if op == 'BN':\n blob_dict['{}.running_mean'.format(layer_name)] = torch.from_numpy(blobs[2])\n blob_dict['{}.running_var'.format(layer_name)] = torch.from_numpy(blobs[3])\n\n expr = expr_temp.format(top=','.join(tops), input=','.join(bottoms), op=op)\n\n out_dict = {\n 'id': layer_name,\n 'expr': expr,\n }\n\n if len(attr_dict) > 0:\n out_dict['attrs'] = attr_dict\n\n return out_dict, blob_dict, False\n\n @property\n def text_form(self):\n return str(self._net_pb)\n\n @property\n def info(self):\n return {\n 'name': self._name,\n 'layers': [x.name for x in self._layers]\n }\n\n @property\n def yaml(self):\n return yaml.dump(self._net_dict)\n\n def dump_weights(self, filename):\n # print self._weight_array_dict.keys()\n torch.save(self._weight_array_dict, open(filename, 'wb'))\n\n# build output\ncv = CaffeVendor(args.model, args.weights, int(args.model_version))\n\nif args.model_yaml is not None:\n open(args.model_yaml, 'w').write(cv.yaml)\n\nif args.dump_weights is not None:\n cv.dump_weights(args.dump_weights)\n"
] |
[
[
"numpy.array",
"torch.from_numpy"
]
] |
Leopard-X/tensorflow
|
[
"e4296aefff97e6edd3d7cee9a09b9dd77da4c034"
] |
[
"tensorflow/python/framework/function.py"
] |
[
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\"\"\"Python front-end supports for functions.\n\nNOTE: functions are currently experimental and subject to change!\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport hashlib\nimport re\n\nfrom tensorflow.core.framework import attr_value_pb2\nfrom tensorflow.core.framework import function_pb2\nfrom tensorflow.core.framework import op_def_pb2\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import op_def_registry\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import variable_scope as vs\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.util import tf_decorator\nfrom tensorflow.python.util import tf_inspect\n\n\ndef _make_argname_from_tensor_name(name):\n return re.sub(\":0$\", \"\", name).replace(\":\", \"_o\")\n\n\ndef _tensor_to_argdef(t, name=None, used_names=None):\n \"\"\"Convert tensor t to an argdef, with a specified name or a unique name.\"\"\"\n arg = op_def_pb2.OpDef.ArgDef()\n if name is None:\n arg.name = _make_argname_from_tensor_name(t.name)\n if used_names is not None:\n if arg.name in used_names:\n i = 0\n while True:\n new_name = \"%s_U%d\" % (arg.name, i)\n if new_name not in used_names:\n arg.name = new_name\n break\n i += 1\n used_names.add(arg.name)\n else:\n arg.name = name\n arg.type = t.dtype.as_datatype_enum\n return arg\n\n\ndef _get_node_def(op):\n return op._node_def # pylint: disable=protected-access\n\n\ndef _get_op_def(op):\n return op.op_def or op_def_registry.get_registered_ops()[op.type]\n\n\ndef _is_in_placeholders(op, func_arg_placeholders):\n return op.values() and (op.values()[0].name in func_arg_placeholders)\n\n\ndef _create_input_dict(function_graph, func_arg_placeholders):\n \"\"\"Create a mapping from graph tensor names to function tensor names.\"\"\"\n input_dict = {}\n for op in function_graph.get_operations():\n if _is_in_placeholders(op, func_arg_placeholders):\n input_dict[op.values()[0].name] = op.values()[0].name\n input_dict[op.name] = op.name\n else:\n op_def = _get_op_def(op)\n attrs = _get_node_def(op).attr\n o = 0\n for arg_def in op_def.output_arg:\n if arg_def.number_attr:\n num = attrs[arg_def.number_attr].i\n elif arg_def.type_list_attr:\n num = len(attrs[arg_def.type_list_attr].list.type)\n else:\n num = 1\n for i in range(num):\n result = \"%s:%s:%d\" % (op.name, arg_def.name, i)\n input_dict[op.values()[o].name] = result\n if o == 0:\n input_dict[op.name] = result\n o += 1\n return input_dict\n\n\ndef _add_op_node(op, func, input_dict):\n \"\"\"Converts an op to a function def node and add it to `func`.\"\"\"\n # Add an entry in func.node_def\n\n # Note that extend() makes a copy in this case, see:\n # https://developers.google.com/protocol-buffers/docs/reference/python-generated#repeated-message-fields\n func.node_def.extend([_get_node_def(op)])\n node_def = func.node_def[-1]\n for i in range(len(node_def.input)):\n if not node_def.input[i].startswith(\"^\"):\n assert node_def.input[i] in input_dict, (\"%s missing from %s\" %\n (node_def.input[i],\n input_dict.items()))\n node_def.input[i] = input_dict[node_def.input[i]]\n\n\ndef _graph_to_function_def(graph, operations, inputs, outputs, out_names=None):\n \"\"\"Returns `graph` as a `FunctionDef` protocol buffer.\n\n This method creates a [`FunctionDef`](\n https://www.tensorflow.org/code/tensorflow/core/framework/function.proto)\n protocol buffer that contains all the ops in `operations`. The\n operations become the body of the function.\n\n The arguments `inputs` and `outputs` will be listed as the inputs\n and outputs tensors of the function. They must be lists of\n tensors present in the graph. The lists can optionally be empty.\n\n Args:\n graph: Graph.\n operations: the operations to put in the function. Must be a subset of\n the operations in the graph.\n inputs: List of tensors. Inputs to the function.\n outputs: List of tensors. Outputs of the function.\n out_names: Optional list of string names for the outputs.\n\n Returns:\n A FunctionDef protocol buffer.\n\n Raises:\n ValueError: if out_names is specified and the wrong length.\n \"\"\"\n func = function_pb2.FunctionDef()\n func.signature.name = \"_\"\n used_names = set()\n func.signature.input_arg.extend(\n [_tensor_to_argdef(i, used_names=used_names) for i in inputs])\n if out_names is None:\n used_names = set()\n func.signature.output_arg.extend(\n [_tensor_to_argdef(o, used_names=used_names) for o in outputs])\n elif len(outputs) != len(out_names):\n raise ValueError(\n \"Length of out_names (%d) does not match number of outputs (%d): %s\" %\n (len(out_names), len(outputs), \", \".join(out_names)))\n elif len(out_names) != len(set(out_names)):\n raise ValueError(\n \"Must not have duplicates in out_names: %s\" % \", \".join(out_names))\n else:\n func.signature.output_arg.extend(\n [_tensor_to_argdef(o, name=n) for o, n in zip(outputs, out_names)])\n func_arg_placeholders = set([i.name for i in inputs])\n input_dict = _create_input_dict(graph, func_arg_placeholders)\n\n for op in operations:\n if _is_in_placeholders(op, func_arg_placeholders):\n continue\n _add_op_node(op, func, input_dict)\n\n if out_names is None:\n for index, o in enumerate(outputs):\n k = func.signature.output_arg[index].name\n func.ret[k] = input_dict[o.name]\n else:\n for o, n in zip(outputs, out_names):\n func.ret[n] = input_dict[o.name]\n\n return func\n\n\ndef _parse_kwargs_as_attrs(func_name, **kwargs):\n \"\"\"Parses **kwargs into a node's attributes.\"\"\"\n attrs = {}\n\n noinline = kwargs.pop(\"noinline\", None)\n if noinline is not None:\n attrs[\"_noinline\"] = attr_value_pb2.AttrValue(b=bool(noinline))\n\n compiled = kwargs.pop(\"compiled\", None)\n separate_compiled_gradients = kwargs.pop(\"separate_compiled_gradients\", None)\n if compiled is not None:\n attrs[\"_XlaCompile\"] = attr_value_pb2.AttrValue(b=bool(compiled))\n attrs[\"_XlaSeparateCompiledGradients\"] = attr_value_pb2.AttrValue(\n b=bool(separate_compiled_gradients))\n attrs[\"_XlaScope\"] = attr_value_pb2.AttrValue(\n s=(\"function_%s\" % func_name).encode())\n\n if kwargs:\n raise ValueError(\"Unknown keyword arguments: %s\" % kwargs.keys())\n return attrs\n\n\ndef _call(sig, *inputs, **kwargs):\n \"\"\"Adds a node calling a function.\n\n This adds a `call` op to the default graph that calls the function\n of signature `sig`, passing the tensors in `inputs` as arguments.\n It returns the outputs of the call, which are one or more tensors.\n\n `sig` is OpDefArg.a `_DefinedFunction` object.\n\n You can pass an optional keyword parameter `name=string` to name the\n added operation.\n\n You can pass an optional keyword parameter `noinline=True|False` to\n instruct the runtime not to inline the function body into the call\n site.\n\n Args:\n sig: OpDefArg. The signature of the function.\n *inputs: arguments to the function.\n **kwargs: Optional keyword arguments. Can only contain 'name' or\n 'noinline'.\n\n Returns:\n A 2-element tuple. First element: a Tensor if the function returns a single\n value; a list of Tensors if the function returns multiple value; the\n Operation if the function returns no values. Second element: the Operation.\n\n Raises:\n ValueError: if the arguments are invalid.\n \"\"\"\n if len(inputs) != len(sig.input_arg):\n raise ValueError(\"Expected number of arguments: %d, received: %d\" %\n (len(sig.input_arg), len(inputs)))\n name = kwargs.pop(\"name\", None)\n g = ops.get_default_graph()\n func_name = sig.name\n attrs = _parse_kwargs_as_attrs(func_name, **kwargs)\n output_types = [dtypes.DType(x.type) for x in sig.output_arg]\n with ops.name_scope(name, func_name, inputs) as name:\n op = g.create_op(\n func_name,\n list(inputs),\n output_types,\n name=name,\n attrs=attrs,\n op_def=sig,\n compute_shapes=False)\n if op.outputs:\n if len(op.outputs) == 1:\n ret = op.outputs[0]\n else:\n ret = tuple(op.outputs)\n else:\n ret = op\n return ret, op\n\n\ndef _get_func_name(func):\n _, func = tf_decorator.unwrap(func)\n if callable(func):\n if tf_inspect.isfunction(func):\n return func.__name__\n elif tf_inspect.ismethod(func):\n return \"%s.%s\" % (func.__self__.__name__, func.__name__)\n else: # Probably a class instance with __call__\n return type(func)\n else:\n raise ValueError(\"Argument must be callable\")\n\n\nclass _FuncGraph(ops.Graph):\n \"\"\"A helper for construction a function.\n\n _FuncGraph overrides ops.Graph's create_op() so that we can keep\n track of every inputs into every op created inside the function. If\n any input is from other graphs, we keep track of it in self.capture\n and substitute the input with a place holder.\n\n Each captured input's corresponding place holder is converted into a\n function argument and the caller passes in the captured tensor.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(_FuncGraph, self).__init__(*args, **kwargs)\n self._building_function = True\n self._outer_graph = ops.get_default_graph()\n self._vscope = vs.get_variable_scope()\n self._old_custom_getter = self._vscope.custom_getter\n self._captured = {}\n self.extra_inputs = []\n self.extra_args = []\n self.extra_vars = []\n\n def getvar(\n self,\n getter,\n name,\n shape=None,\n dtype=None,\n initializer=None,\n reuse=None,\n trainable=True,\n collections=None, # pylint: disable=redefined-outer-name\n use_resource=None,\n **kwargs):\n \"\"\"A custom variable getter.\"\"\"\n # Here, we switch the default graph to the outer graph and ask the\n # variable scope in which the function is defined to give us the\n # variable. The variable is stashed in extra_vars and returned to\n # the caller.\n #\n # We capture these variables so that the variable definition is\n # hoisted upward to the outer most graph.\n with self._outer_graph.as_default():\n # pylint: disable=protected-access\n var = self._vscope.get_variable(\n vs._get_default_variable_store(),\n name,\n shape=shape,\n dtype=dtype,\n initializer=initializer,\n reuse=reuse,\n trainable=trainable,\n collections=collections,\n use_resource=use_resource)\n self.extra_vars.append(var)\n if isinstance(var, resource_variable_ops.ResourceVariable):\n # For resource-based variables read the variable outside the function\n # and pass in the value. This ensures that the function is pure and\n # differentiable. TODO(apassos) this may have performance problems if\n # the function will only do embedding lookups on the variable.\n return var.value()\n return var\n\n def create_op(self, op_type, inputs, data_types, **kwargs):\n for i, x in enumerate(inputs):\n if x.graph is not self:\n # Referring to a tensor from other graph.\n if x in self._captured:\n # Captured already.\n inputs[i] = self._captured[x]\n else:\n # Substitute with a placeholder.\n self.extra_inputs.append(x)\n ph = array_ops.placeholder(x.dtype, shape=x.get_shape())\n # pylint: disable=protected-access\n ph._handle_shape = x._handle_shape\n ph._handle_dtype = x._handle_dtype\n # pylint: enable=protected-access\n inputs[i] = ph\n self._captured[x] = ph\n self.extra_args.append(ph)\n return super(_FuncGraph, self).create_op(op_type, inputs, data_types,\n **kwargs)\n\n\ndef get_extra_vars():\n \"\"\"Returns the captured variables by the function.\n\n Returns:\n If the default graph is being used to define a function, the\n returned list of variables are those created inside the function\n body so far. Otherwise, returns an empty list.\n \"\"\"\n g = ops.get_default_graph()\n if isinstance(g, _FuncGraph):\n return g.extra_vars\n else:\n return []\n\n\ndef get_extra_inputs():\n \"\"\"Returns the captured input tensors by the function.\n\n Returns:\n If the default graph is being used to define a function, the\n returned list of tensors are those accessed inside the function body\n but defined outside the function body so far. Otherwise, returns an\n empty list.\n \"\"\"\n g = ops.get_default_graph()\n if isinstance(g, _FuncGraph):\n return g.extra_inputs\n else:\n return []\n\n\ndef get_extra_args():\n \"\"\"Returns the corresponding function arguments for the captured inputs.\n\n Returns:\n If the default graph is being used to define a function, the\n returned list of place holders are those used inside the function\n body corresponding those returned by get_extra_inputs(). Otherwise,\n returns an empty list.\n \"\"\"\n g = ops.get_default_graph()\n if isinstance(g, _FuncGraph):\n return g.extra_args\n else:\n return []\n\n\nclass _DefinedFunction(object):\n \"\"\"_DefinedFunction encapsulates a function definition and its properties.\n\n Attributes:\n name: The function name.\n definition: The definition of this function. A FunctionDef proto.\n grad_func_name: If not None, the name of this function's gradient function.\n python_grad_func: A python callable implementing the gradient of\n the function python-side.\n \"\"\"\n\n def __init__(self,\n func,\n argnames,\n input_types,\n func_name=None,\n grad_func=None,\n python_grad_func=None,\n out_names=None,\n shape_func=None,\n **kwargs):\n \"\"\"Creates _DefinedFunction.\n\n Args:\n func: A python callable which constructs a tf function body.\n argnames: A list of strings for function argument names.\n input_types: The function's argument types. Can be a tuple, list of\n tf data types.\n func_name: The function name. Defaults to None, in which derives from\n 'func'.\n grad_func: This function's gradient function, if not None. Defaults\n to None.\n python_grad_func: A python callable implementing the gradient of\n the function python-side.\n out_names: An optional list of strings for the function return value\n names.\n shape_func: An optional function mapping an op to a list of static\n output shapes.\n **kwargs: The keyword arguments. **kwargs is passed to every call\n site of this function.\n\n Raises:\n ValueError: The function definition is invalid.\n\n \"\"\"\n self._func = func\n self._input_types = input_types\n self._func_name = func_name\n self._grad_func = grad_func\n self._python_grad_func = python_grad_func\n self._out_names = out_names\n self._shape_func = shape_func\n self._extra_kwargs = kwargs\n self._definition = None # Constructed lazily.\n self._sub_functions = dict() # Constructed with definition.\n\n self._args = []\n assert isinstance(input_types, (list, tuple))\n for i in range(len(input_types)):\n argname = argnames[i] if i < len(argnames) else (\"arg%d\" % i)\n argtype = input_types[i]\n self._args.append((argname, argtype))\n\n @property\n def name(self):\n \"\"\"Function name.\"\"\"\n self._create_definition_if_needed()\n return self._func_name\n\n @property\n def definition(self):\n \"\"\"Function definition proto.\"\"\"\n self._create_definition_if_needed()\n return self._definition\n\n def set_grad_func(self, grad_func):\n \"\"\"Specifies the gradient function of this function.\"\"\"\n assert not self._grad_func\n assert isinstance(grad_func, _DefinedFunction)\n self._grad_func = grad_func\n\n @property\n def grad_func_name(self):\n \"\"\"Its gradient function's name.\"\"\"\n return self._grad_func.name if self._grad_func else None\n\n @property\n def python_grad_func(self):\n \"\"\"Python gradient function callable.\"\"\"\n return self._python_grad_func\n\n @property\n def declared_input_types(self):\n \"\"\"Returns the list of data types of explicit declared inputs.\"\"\"\n return self._input_types\n\n @property\n def captured_inputs(self):\n \"\"\"Returns the list of implicitly captured inputs.\"\"\"\n self._create_definition_if_needed()\n return self._extra_inputs\n\n def _create_definition_if_needed(self):\n \"\"\"Creates the function definition if it's not created yet.\"\"\"\n\n if self._definition is not None:\n return\n\n # Create the func_def object.\n temp_graph = _FuncGraph()\n with temp_graph.as_default():\n # List of placeholders for the function_def.\n inputs = []\n for (argname, argtype) in self._args:\n argholder = array_ops.placeholder(argtype, name=argname)\n inputs.append(argholder)\n # Call func and gather the output tensors.\n with vs.variable_scope(\"\", custom_getter=temp_graph.getvar):\n outputs = self._func(*inputs)\n # If func only returned one value, make it a tuple.\n if not isinstance(outputs, (list, tuple)):\n outputs = (outputs,)\n if any([_ is None for _ in outputs]):\n raise ValueError(\"Function can not return None.\")\n # Ensures each output is a Tensor.\n outputs = [ops.convert_to_tensor(_) for _ in outputs]\n self._extra_inputs = temp_graph.extra_inputs\n inputs.extend(temp_graph.extra_args)\n # pylint: disable=protected-access\n self._sub_functions = temp_graph._functions\n # pylint: enable=protected-access\n\n # Build the FunctionDef\n self._definition = _graph_to_function_def(\n temp_graph,\n temp_graph.get_operations(),\n inputs,\n outputs,\n out_names=self._out_names)\n\n # Extra kwargs are treated as attrs on the function def.\n sig_pre_func_name = self._func_name or _get_func_name(self._func)\n kwargs_attr = _parse_kwargs_as_attrs(sig_pre_func_name,\n **self._extra_kwargs)\n for k in kwargs_attr:\n self._definition.attr[k].CopyFrom(kwargs_attr[k])\n\n # Hash the definition and its dependencies.\n self._hash_str = self._create_hash_str(\n self._definition.signature.input_arg,\n self._definition.signature.output_arg, self._definition.node_def)\n\n # Finally, we decide the function name to use. If not specified,\n # make up something which is almost certainly unique (but deterministic).\n if not self._func_name:\n self._func_name = \"_\".join([_get_func_name(self._func), self._hash_str])\n self._definition.signature.name = self._func_name\n if self._func.__doc__:\n self._definition.signature.description = self._func.__doc__\n\n def _create_hash_str(self, input_arg, output_arg, node_def):\n \"\"\"Creates an 8-character string unique to this input.\n\n Args:\n input_arg: the input_arg field of an OpDef\n (e.g. self._definition.signature.input_arg)\n output_arg: the output_arg field of an OpDef\n (e.g. self._definition.signature.output_arg)\n node_def: the node_def field of a FunctionDef\n (e.g. self._definition.node_def)\n\n Returns:\n The unique string for this input\n \"\"\"\n hasher = hashlib.sha1()\n\n def update_num(n):\n hasher.update(compat.as_bytes(\"%x\" % n))\n\n def update_str(s):\n update_num(len(s))\n hasher.update(compat.as_bytes(s))\n\n def update_strs(slist):\n update_num(len(slist))\n for s in slist:\n update_str(s)\n\n for adef in input_arg:\n update_str(adef.SerializeToString())\n\n for adef in output_arg:\n update_str(adef.SerializeToString())\n\n for n in sorted(node_def, key=lambda n: n.name):\n update_str(n.name)\n update_str(n.op)\n update_strs(n.input)\n update_num(len(n.attr))\n # NOTE: protobuf map serialization does not guarantee ordering.\n for k in sorted(n.attr):\n update_str(k)\n update_str(n.attr[k].SerializeToString())\n\n return hasher.hexdigest()[:8]\n\n def add_to_graph(self, g):\n \"\"\"Adds this function into the graph g.\"\"\"\n self._create_definition_if_needed()\n\n # pylint: disable=protected-access\n # If 'g' has an identical function already, do nothing.\n prev = g._get_function(self.name)\n if prev and (prev._hash_str == self._hash_str):\n return\n\n # Adds this function into 'g'.\n g._add_function(self)\n # pylint: enable=protected-access\n\n # Ensures related sub-routines are defined in 'g', too.\n for f in self._sub_functions.values():\n f.add_to_graph(g)\n\n # Adds its gradient function, too.\n if self._grad_func:\n self._grad_func.add_to_graph(g)\n\n def __call__(self, *args, **kwargs):\n self.add_to_graph(ops.get_default_graph())\n args = [ops.convert_to_tensor(_) for _ in args] + self._extra_inputs\n ret, op = _call(self._definition.signature, *args, **kwargs)\n if self._shape_func is not None:\n shapes = self._shape_func(op)\n if len(shapes) != len(op.outputs):\n raise ValueError(\"shape_func produced %d shapes for %d outputs\" %\n (len(shapes), len(op.outputs)))\n for (t, shape) in zip(op.outputs, shapes):\n t.set_shape(shape)\n return ret\n\n\ndef _from_definition(fdef, grad_func=None):\n \"\"\"Creates a _DefinedFunction initialized from a FunctionDef proto.\n\n Args:\n fdef: a FunctionDef\n grad_func: a _DefinedFunction or None\n\n Returns:\n A _DefinedFunction representing fdef\n \"\"\"\n # The Python callable is only needed to create a FunctionDef. Since we have\n # the FunctionDef here, we don't need to set _DefinedFunction._func (nor do we\n # have access to such a callable here).\n func = None\n argnames = [arg.name for arg in fdef.signature.input_arg]\n input_types = tuple(\n dtypes.as_dtype(arg.type) for arg in fdef.signature.input_arg)\n func_name = fdef.signature.name\n # Note: FunctionDefs do not include python gradient functions, so if the\n # original _DefinedFunction included one it will not be reflected here.\n python_grad_func = None\n out_names = [arg.name for arg in fdef.signature.output_arg]\n result = _DefinedFunction(func, argnames, input_types, func_name, grad_func,\n python_grad_func, out_names)\n # pylint: disable=protected-access\n result._definition = fdef\n # Captured inputs are added as regular inputs to a function when it's\n # serialized, i.e. any extra inputs from the original function are now\n # included in `result`._args\n result._extra_inputs = []\n result._hash_str = result._create_hash_str(\n result._definition.signature.input_arg,\n result._definition.signature.output_arg, result._definition.node_def)\n # pylint: enable=protected-access\n return result\n\n\ndef _from_library(lib):\n \"\"\"Creates _DefinedFunctions initialized from a FunctionDefLibrary proto.\n\n This method handles assigning the correct gradient functions to each\n function.\n\n Args:\n lib: a FunctionDefLibrary\n\n Returns:\n A list of _DefinedFunctions\n\n Raises:\n ValueError: `lib` is invalid\n \"\"\"\n if not lib.function and not lib.gradient:\n return []\n\n # function name -> FunctionDef proto\n funcs = {fdef.signature.name: fdef for fdef in lib.function}\n\n # Validate that all references function names have function defs\n for g in lib.gradient:\n if g.function_name not in funcs:\n raise ValueError(\"FunctionDefLibrary missing '%s' FunctionDef\\n%s\" %\n (g.function_name, str(lib)))\n if g.gradient_func not in funcs:\n raise ValueError(\"FunctionDefLibrary missing '%s' FunctionDef\\n%s\" %\n (g.gradient_func, str(lib)))\n\n # function name -> gradient function name\n func_to_grad = collections.defaultdict(lambda: None)\n # gradient function name -> names of functions having that grad function\n grad_to_funcs = collections.defaultdict(list)\n\n for gdef in lib.gradient:\n func_to_grad[gdef.function_name] = gdef.gradient_func\n grad_to_funcs[gdef.gradient_func].append(gdef.function_name)\n\n # Start with functions without gradients\n ready = [\n fdef for fdef in lib.function if func_to_grad[fdef.signature.name] is None\n ]\n if not ready:\n raise ValueError(\"FunctionDefLibrary contains cyclic gradient functions!\\n\"\n + str(lib))\n # function name -> _DefinedFunction\n initialized = {}\n\n while ready:\n fdef = ready.pop()\n name = fdef.signature.name\n\n grad = initialized.get(func_to_grad[name])\n if func_to_grad[name]:\n assert grad\n defined_func = _from_definition(fdef, grad_func=grad)\n initialized[name] = defined_func\n\n ready.extend(funcs[f] for f in grad_to_funcs[name])\n\n return initialized.values()\n\n\n# NOTE: The list needs to be extended when more data types are added.\n_DTYPE_TO_STR = {\n dtypes.float16: \"f16\",\n dtypes.float32: \"f32\",\n dtypes.float64: \"f64\",\n dtypes.int32: \"i32\",\n dtypes.uint8: \"i8\",\n dtypes.uint16: \"u16\",\n dtypes.int16: \"i16\",\n dtypes.int8: \"i8\",\n dtypes.string: \"s\",\n dtypes.complex64: \"c64\",\n dtypes.complex128: \"c128\",\n dtypes.int64: \"i64\",\n dtypes.bool: \"b\",\n dtypes.qint8: \"qi8\",\n dtypes.quint8: \"qu8\",\n dtypes.qint16: \"qi16\",\n dtypes.quint16: \"qu16\",\n dtypes.qint32: \"qi32\",\n dtypes.bfloat16: \"b16\"\n}\n\n\ndef _type_list_to_str(types):\n if any([_ not in _DTYPE_TO_STR for _ in types]):\n raise ValueError(\"Unsupported dtypes: %s\" % types)\n return \"\".join([_DTYPE_TO_STR[_] for _ in types])\n\n\nclass _OverloadedFunction(object):\n \"\"\"_OverloadedFunction encapsulates an overloaded function.\n\n _OverloadedFunction maintains a mapping from input types to\n instantiated _DefinedFunction in self._overload.\n\n \"\"\"\n\n def __init__(self,\n func,\n argnames,\n func_name=None,\n grad_func=None,\n python_grad_func=None,\n out_names=None,\n **kwargs):\n \"\"\"Creates _DefinedFunction.\n\n Args:\n func: A python callable which constructs a tf function body.\n argnames: A list of strings for function argument names.\n func_name: The function name. Defaults to None, in which derives from\n 'func'.\n grad_func: This function's gradient function, if not None. Defaults\n to None.\n python_grad_func: A python callable implementing the gradient of\n the function python-side.\n out_names: A list of strings for the function return value names.\n **kwargs: The keyword arguments. **kwargs is passed to every call\n site of this function.\n\n Raises:\n ValueError: The function definition is invalid.\n\n \"\"\"\n self._func = func\n self._argnames = argnames\n self._func_name = func_name\n assert grad_func is None or isinstance(grad_func, _OverloadedFunction)\n self._grad_func = grad_func\n self._python_grad_func = python_grad_func\n self._out_names = out_names\n self._extra_kwargs = kwargs\n self._overload = {}\n\n def instantiate(self, input_types):\n \"\"\"Instantiate this function given input argument types.\n\n Args:\n input_types: A list of data types for the inputs.\n\n Returns:\n _DefinedFunction for the given input types.\n\n \"\"\"\n # Stringify the type list.\n key = _type_list_to_str(input_types)\n defined = self._overload.get(key)\n if not defined:\n # If not defined yet, define the function given the input types.\n name = self._func_name\n if name is not None:\n name = \"_\".join([name, key])\n defined = _DefinedFunction(\n self._func,\n self._argnames,\n input_types,\n name,\n None,\n self._python_grad_func,\n out_names=self._out_names,\n **self._extra_kwargs)\n _ = defined.name # Fully instantiate the function definition.\n if self._grad_func:\n # If _grad_func is given, it is another\n # _OverloadedFunction. We need to instantiate it with the\n # right input types.\n output_types = [\n dtypes.DType(_.type)\n for _ in defined.definition.signature.output_arg\n ]\n # pylint: disable=protected-access\n defined._grad_func = self._grad_func.instantiate(\n input_types + output_types)\n # pylint: enable=protected-access\n self._overload[key] = defined\n return defined\n\n def __call__(self, *args, **kwargs):\n input_types = []\n args = list(args)\n for (i, x) in enumerate(args):\n x = ops.convert_to_tensor(x)\n if not isinstance(x, ops.Tensor):\n raise ValueError(\"Expect a Tensor but get \", x)\n input_types.append(x.dtype)\n args[i] = x\n return self.instantiate(input_types)(*args, **kwargs)\n\n\nclass Defun(object):\n \"\"\"Decorator used to define TensorFlow functions.\n\n Use this decorator to make a Python function usable directly as a TensorFlow\n function.\n\n The decorated function must add ops to the default graph and return zero or\n more `Tensor` objects. Call the decorator with named arguments, one for each\n argument of the function to decorate, with the expected type of the argument\n as value.\n\n For example if the function to decorate accepts two `tf.float32` arguments\n named `x` and `y`, call the decorator with:\n\n @Defun(tf.float32, tf.float32)\n def foo(x, y):\n ...\n\n When you call the decorated function it will add `call` ops to the\n default graph and adds the definition of the function into the\n default graph. Because the addition of the function into the graph\n is deferred, the decorator can be used anywhere in the program.\n\n Any variables created inside of the function are hoisted into the outer graph.\n Note that the variables are created in the variable scope that was active\n during the first call to the function. Subsequent function calls will refer to\n the same set of variables.\n\n Definitions of functions are frozen in a graph as soon as the graph is used to\n create a session. Therefore, nodes using the function must be created in the\n graph before the corresponding session is created.\n\n Example, but also see the [How To on functions](link_needed).\n\n ```python\n # Defining the function.\n @tf.Defun(tf.float32, tf.float32)\n def MyFunc(x, y):\n return x + y, x - y\n\n # Building the graph.\n a = tf.Constant([1.0])\n b = tf.Constant([2.0])\n c, d = MyFunc(a, b, name='mycall')\n ```\n \"\"\"\n\n def __init__(self, *input_types, **kwargs):\n \"\"\"Create a `Defun` decorator.\n\n Args:\n *input_types: A list of `tf.DType`\n **kwargs: Optional keyword arguments, including\n func_name - (optional). A python string, the name to use to\n declare this `Function` in the graph.\n\n grad_func - (optional). A function implementing the gradient\n of the function-to-register. This is either a\n `_DefinedFunction` or a `Declare` object. The gradient\n function must satisify the criterion defined in\n function.proto:GradientDef.\n\n python_grad_func - (optional). A function implementing the\n gradient of the function python-side. This function must\n take the current op and the gradients w.r.t. its outputs,\n and return the gradients w.r.t. the inputs. That is it must\n implement the interface expected by `tf.RegisterGradient`).\n This will be called by tf.gradients to add the gradient ops\n to the graph. At most one of grad_func and python_grad_func\n can be specified.\n\n out_names = (optional). A list of strings, one per output\n tensor.\n\n shape_func - (optional). A function taking the op and returning a list\n of static shapes to set for the function's outputs.\n \"\"\"\n self._input_types = input_types\n self._func_name = kwargs.pop(\"func_name\", None)\n self._grad_func = kwargs.pop(\"grad_func\", None)\n self._python_grad_func = kwargs.pop(\"python_grad_func\", None)\n self._out_names = kwargs.pop(\"out_names\", None)\n self._extra_kwargs = kwargs\n\n def __call__(self, func):\n # Various sanity checks on the callable func.\n if not callable(func):\n raise ValueError(\"func %s must be callable\" % func)\n\n # Func should not use kwargs and defaults.\n argspec = tf_inspect.getargspec(func)\n if argspec.keywords or argspec.defaults:\n raise ValueError(\"Functions with argument defaults or keyword \"\n \"arguments are not supported.\")\n\n # Computes how many arguments 'func' has.\n min_args = len(argspec.args)\n max_args = min_args\n if argspec.varargs:\n max_args = 1000000\n argnames = argspec.args\n if tf_inspect.ismethod(func):\n # 1st argument is the \"class\" type.\n min_args -= 1\n argnames = argnames[1:]\n\n if self._input_types:\n # If Defun is given a list of types for the inputs, the number\n # of input types should be compatible with 'func'.\n num = len(self._input_types)\n if num < min_args or num > max_args:\n raise ValueError(\n \"The function has fewer arguments than the number of specified \"\n \"input types.\")\n return _DefinedFunction(\n func,\n argnames,\n self._input_types,\n self._func_name,\n self._grad_func,\n self._python_grad_func,\n out_names=self._out_names,\n **self._extra_kwargs)\n\n # 'func' expects no arguments and input types is an empty list.\n if min_args == 0 and max_args == 0:\n return _DefinedFunction(\n func, [], [],\n self._func_name,\n self._grad_func,\n self._python_grad_func,\n out_names=self._out_names,\n **self._extra_kwargs)\n\n # Input types are unknown. It's an overloaded function and hence\n # its definition needs to be deferred until it's called.\n return _OverloadedFunction(\n func,\n argnames,\n self._func_name,\n self._grad_func,\n self._python_grad_func,\n out_names=self._out_names,\n **self._extra_kwargs)\n\n\nclass Declare(object):\n \"\"\"Declares a TensorFlow function.\n\n The object represents a TensorFlow function which will be defined\n later during a graph construction.\n\n For example,\n # Declares a function Foo, which takes a tf.int32 named \"n\" and a\n # tf.float32 named \"x\" as inputs and returns a tf.float32 named \"z\"\n # as its output.\n foo = Declare(\"Foo\", [(\"n\", tf.int32), (\"x\", tf.float32)],\n [(\"z\", tf.float32)])\n\n # Defines a function Bar calls Foo.\n @tf.Defun(tf.float32)\n def Bar(x):\n return foo(6, x)\n\n # Defines Foo, with output named \"z\".\n @tf.Defun(tf.int32, tf.float32, out_names=[\"z\"])\n def Foo(n, x):\n ... # Calculation.\n return result\n \"\"\"\n\n def __init__(self, func_name, inputs, outputs):\n \"\"\"Creates a `Declare` object.\n\n Args:\n func_name: The name of the function.\n inputs: A list of (name, data type) pairs of function arguments.\n outputs: A list of (name, data type) pairs of function return values.\n \"\"\"\n self._sig = op_def_pb2.OpDef()\n self._sig.name = func_name\n\n def _to_argdef_list(args):\n names = [n for n, t in args]\n if len(names) != len(set(names)):\n raise ValueError(\"Expected names to all be unique: %s\" % str(names))\n return [\n op_def_pb2.OpDef.ArgDef(type=t.as_datatype_enum, name=n)\n for n, t in args\n ]\n\n self._sig.input_arg.extend(_to_argdef_list(inputs))\n self._sig.output_arg.extend(_to_argdef_list(outputs))\n\n def __call__(self, *inputs, **kwargs):\n inputs = [ops.convert_to_tensor(_) for _ in inputs]\n return _call(self._sig, *inputs, **kwargs)[0]\n"
] |
[
[
"tensorflow.core.framework.function_pb2.FunctionDef",
"tensorflow.python.util.tf_inspect.ismethod",
"tensorflow.core.framework.op_def_pb2.OpDef",
"tensorflow.python.ops.variable_scope.get_variable_scope",
"tensorflow.python.ops.variable_scope._get_default_variable_store",
"tensorflow.python.util.tf_inspect.isfunction",
"tensorflow.core.framework.op_def_pb2.OpDef.ArgDef",
"tensorflow.python.framework.op_def_registry.get_registered_ops",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.framework.dtypes.as_dtype",
"tensorflow.python.ops.array_ops.placeholder",
"tensorflow.python.util.compat.as_bytes",
"tensorflow.python.framework.dtypes.DType",
"tensorflow.python.util.tf_inspect.getargspec",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.variable_scope.variable_scope",
"tensorflow.python.util.tf_decorator.unwrap"
]
] |
Tyelab/sleap
|
[
"67b4b3e762571e70beadf940a64ed62d9481dafe"
] |
[
"sleap/nn/model.py"
] |
[
"\"\"\"This module defines the main SLEAP model class for defining a trainable model.\n\nThis is a higher level wrapper around `tf.keras.Model` that holds all the configuration\nparameters required to construct the actual model. This allows for easy querying of the\nmodel configuration without actually instantiating the model itself.\n\"\"\"\nimport tensorflow as tf\n\nimport attr\nfrom typing import List, TypeVar, Optional, Text, Tuple\n\nimport sleap\nfrom sleap.nn.architectures import (\n LeapCNN,\n UNet,\n Hourglass,\n ResNetv1,\n ResNet50,\n ResNet101,\n ResNet152,\n UnetPretrainedEncoder,\n IntermediateFeature,\n)\nfrom sleap.nn.heads import (\n Head,\n CentroidConfmapsHead,\n SingleInstanceConfmapsHead,\n CenteredInstanceConfmapsHead,\n MultiInstanceConfmapsHead,\n PartAffinityFieldsHead,\n ClassMapsHead,\n ClassVectorsHead,\n OffsetRefinementHead,\n)\nfrom sleap.nn.config import (\n LEAPConfig,\n UNetConfig,\n HourglassConfig,\n ResNetConfig,\n PretrainedEncoderConfig,\n SingleInstanceConfmapsHeadConfig,\n CentroidsHeadConfig,\n CenteredInstanceConfmapsHeadConfig,\n MultiInstanceConfig,\n MultiClassBottomUpConfig,\n MultiClassTopDownConfig,\n BackboneConfig,\n HeadsConfig,\n ModelConfig,\n)\nfrom sleap.nn.data.utils import ensure_list\n\n\nARCHITECTURES = [\n LeapCNN,\n UNet,\n Hourglass,\n ResNetv1,\n ResNet50,\n ResNet101,\n ResNet152,\n UnetPretrainedEncoder,\n]\nARCHITECTURE_NAMES = [cls.__name__ for cls in ARCHITECTURES]\nArchitecture = TypeVar(\"Architecture\", *ARCHITECTURES)\n\nBACKBONE_CONFIG_TO_CLS = {\n LEAPConfig: LeapCNN,\n UNetConfig: UNet,\n HourglassConfig: Hourglass,\n ResNetConfig: ResNetv1,\n PretrainedEncoderConfig: UnetPretrainedEncoder,\n}\n\nHEADS = [\n Head,\n CentroidConfmapsHead,\n SingleInstanceConfmapsHead,\n CenteredInstanceConfmapsHead,\n MultiInstanceConfmapsHead,\n PartAffinityFieldsHead,\n ClassMapsHead,\n ClassVectorsHead,\n OffsetRefinementHead,\n]\nHead = TypeVar(\"Head\", *HEADS)\n\n\[email protected](auto_attribs=True)\nclass Model:\n \"\"\"SLEAP model that describes an architecture and output types.\n\n Attributes:\n backbone: An `Architecture` class that provides methods for building a\n tf.keras.Model given an input.\n heads: List of `Head`s that define the outputs of the network.\n keras_model: The current `tf.keras.Model` instance if one has been created.\n \"\"\"\n\n backbone: Architecture\n heads: List[Head] = attr.ib(converter=ensure_list)\n keras_model: Optional[tf.keras.Model] = None\n\n @classmethod\n def from_config(\n cls,\n config: ModelConfig,\n skeleton: Optional[sleap.Skeleton] = None,\n tracks: Optional[List[sleap.Track]] = None,\n update_config: bool = False,\n ) -> \"Model\":\n \"\"\"Create a SLEAP model from configurations.\n\n Arguments:\n config: The configurations as a `ModelConfig` instance.\n skeleton: A `sleap.Skeleton` to use if not provided in the config.\n update_config: If `True`, the input model configuration will be updated with\n values inferred from other fields.\n\n Returns:\n An instance of `Model` built with the specified configurations.\n \"\"\"\n # Figure out which backbone class to use.\n backbone_config = config.backbone.which_oneof()\n backbone_cls = BACKBONE_CONFIG_TO_CLS.get(type(backbone_config), None)\n if backbone_cls is None:\n raise ValueError(\n \"Backbone architecture (config.model.backbone) was not specified.\"\n )\n\n # Figure out which head class to use.\n head_config = config.heads.which_oneof()\n if isinstance(head_config, SingleInstanceConfmapsHeadConfig):\n part_names = head_config.part_names\n if part_names is None:\n if skeleton is None:\n raise ValueError(\n \"Skeleton must be provided when the head configuration is \"\n \"incomplete.\"\n )\n part_names = skeleton.node_names\n if update_config:\n head_config.part_names = part_names\n heads = [\n SingleInstanceConfmapsHead.from_config(\n head_config, part_names=part_names\n )\n ]\n output_stride = heads[0].output_stride\n if head_config.offset_refinement:\n heads.append(\n OffsetRefinementHead.from_config(head_config, part_names=part_names)\n )\n\n elif isinstance(head_config, CentroidsHeadConfig):\n heads = [CentroidConfmapsHead.from_config(head_config)]\n output_stride = heads[0].output_stride\n if head_config.offset_refinement:\n heads.append(OffsetRefinementHead.from_config(head_config))\n\n elif isinstance(head_config, CenteredInstanceConfmapsHeadConfig):\n part_names = head_config.part_names\n if part_names is None:\n if skeleton is None:\n raise ValueError(\n \"Skeleton must be provided when the head configuration is \"\n \"incomplete.\"\n )\n part_names = skeleton.node_names\n if update_config:\n head_config.part_names = part_names\n heads = [\n CenteredInstanceConfmapsHead.from_config(\n head_config, part_names=part_names\n )\n ]\n output_stride = heads[0].output_stride\n if head_config.offset_refinement:\n heads.append(\n OffsetRefinementHead.from_config(head_config, part_names=part_names)\n )\n\n elif isinstance(head_config, MultiInstanceConfig):\n part_names = head_config.confmaps.part_names\n if part_names is None:\n if skeleton is None:\n raise ValueError(\n \"Skeleton must be provided when the head configuration is \"\n \"incomplete.\"\n )\n part_names = skeleton.node_names\n if update_config:\n head_config.confmaps.part_names = part_names\n\n edges = head_config.pafs.edges\n if edges is None:\n if skeleton is None:\n raise ValueError(\n \"Skeleton must be provided when the head configuration is \"\n \"incomplete.\"\n )\n edges = skeleton.edge_names\n if update_config:\n head_config.pafs.edges = edges\n\n heads = [\n MultiInstanceConfmapsHead.from_config(\n head_config.confmaps, part_names=part_names\n ),\n PartAffinityFieldsHead.from_config(head_config.pafs, edges=edges),\n ]\n output_stride = min(heads[0].output_stride, heads[1].output_stride)\n output_stride = heads[0].output_stride\n if head_config.confmaps.offset_refinement:\n heads.append(\n OffsetRefinementHead.from_config(\n head_config.confmaps, part_names=part_names\n )\n )\n\n elif isinstance(head_config, MultiClassBottomUpConfig):\n part_names = head_config.confmaps.part_names\n if part_names is None:\n if skeleton is None:\n raise ValueError(\n \"Skeleton must be provided when the head configuration is \"\n \"incomplete.\"\n )\n part_names = skeleton.node_names\n if update_config:\n head_config.confmaps.part_names = part_names\n\n classes = head_config.class_maps.classes\n if classes is None:\n if tracks is None:\n raise ValueError(\n \"Classes must be provided when the head configuration is \"\n \"incomplete.\"\n )\n classes = [t.name for t in tracks]\n if update_config:\n head_config.class_maps.classes = classes\n\n heads = [\n MultiInstanceConfmapsHead.from_config(\n head_config.confmaps, part_names=part_names\n ),\n ClassMapsHead.from_config(head_config.class_maps, classes=classes),\n ]\n output_stride = min(heads[0].output_stride, heads[1].output_stride)\n output_stride = heads[0].output_stride\n if head_config.confmaps.offset_refinement:\n heads.append(\n OffsetRefinementHead.from_config(\n head_config.confmaps, part_names=part_names\n )\n )\n\n elif isinstance(head_config, MultiClassTopDownConfig):\n part_names = head_config.confmaps.part_names\n if part_names is None:\n if skeleton is None:\n raise ValueError(\n \"Skeleton must be provided when the head configuration is \"\n \"incomplete.\"\n )\n part_names = skeleton.node_names\n if update_config:\n head_config.confmaps.part_names = part_names\n\n classes = head_config.class_vectors.classes\n if classes is None:\n if tracks is None:\n raise ValueError(\n \"Classes must be provided when the head configuration is \"\n \"incomplete.\"\n )\n classes = [t.name for t in tracks]\n if update_config:\n head_config.class_vectors.classes = classes\n\n heads = [\n CenteredInstanceConfmapsHead.from_config(\n head_config.confmaps, part_names=part_names\n ),\n ClassVectorsHead.from_config(\n head_config.class_vectors, classes=classes\n ),\n ]\n output_stride = min(heads[0].output_stride, heads[1].output_stride)\n output_stride = heads[0].output_stride\n if head_config.confmaps.offset_refinement:\n heads.append(\n OffsetRefinementHead.from_config(\n head_config.confmaps, part_names=part_names\n )\n )\n else:\n raise ValueError(\n \"Head configuration (config.model.heads) was not specified.\"\n )\n\n backbone_config.output_stride = output_stride\n\n return cls(backbone=backbone_cls.from_config(backbone_config), heads=heads)\n\n @property\n def maximum_stride(self) -> int:\n \"\"\"Return the maximum stride of the model backbone.\"\"\"\n return self.backbone.maximum_stride\n\n def make_model(self, input_shape: Tuple[int, int, int]) -> tf.keras.Model:\n \"\"\"Create a trainable model by connecting the backbone with the heads.\n\n Args:\n input_shape: Tuple of (height, width, channels) specifying the shape of the\n inputs before preprocessing.\n\n Returns:\n An instantiated `tf.keras.Model`.\n \"\"\"\n # Create input layer.\n x_in = tf.keras.layers.Input(input_shape, name=\"input\")\n\n # Create backbone.\n x_main, x_mid = self.backbone.make_backbone(x_in=x_in)\n\n # Make sure main and intermediate feature outputs are lists.\n if type(x_main) != list:\n x_main = [x_main]\n if len(x_mid) > 0 and isinstance(x_mid[0], IntermediateFeature):\n x_mid = [x_mid]\n\n # Build output layers for each head.\n x_outs = []\n for output in self.heads:\n x_head = []\n if output.output_stride == self.backbone.output_stride:\n # The main output has the same stride as the head, so build output layer\n # from that tensor.\n for i, x in enumerate(x_main):\n x_head.append(output.make_head(x))\n\n else:\n # Look for an intermediate activation that has the correct stride.\n for feats in zip(*x_mid):\n # TODO: Test for this assumption?\n assert all([feat.stride == feats[0].stride for feat in feats])\n if feats[0].stride == output.output_stride:\n for i, feat in enumerate(feats):\n x_head.append(output.make_head(feat.tensor))\n break\n\n if len(x_head) == 0:\n raise ValueError(\n f\"Could not find a feature activation for output at stride \"\n f\"{output.output_stride}.\"\n )\n x_outs.extend(x_head)\n # TODO: Warn/error if x_main was not connected to any heads?\n\n # Create model.\n self.keras_model = tf.keras.Model(inputs=x_in, outputs=x_outs)\n return self.keras_model\n"
] |
[
[
"tensorflow.keras.Model",
"tensorflow.keras.layers.Input"
]
] |
cog-imperial/galini
|
[
"b27e62b4e981818624f8dc315f0cadee2f7cbed2"
] |
[
"tests/unit/triangle/test_triangle.py"
] |
[
"import pytest\nimport numpy as np\nimport pyomo.environ as aml\nfrom suspect.pyomo import create_connected_model\nfrom coramin.relaxations.iterators import relaxation_data_objects\nfrom coramin.utils.coramin_enums import RelaxationSide\nfrom coramin.relaxations.mccormick import PWMcCormickRelaxation\nfrom coramin.relaxations.univariate import PWXSquaredRelaxation\nfrom galini.galini import Galini\nfrom galini.branch_and_cut.algorithm import BranchAndCutAlgorithm\nfrom galini.triangle.cuts_generator import TriangleCutsGenerator\nfrom galini.solvers.solution import OptimalObjective, OptimalVariable, Solution, Status, load_solution_from_model\nfrom galini.relaxations.relax import relax\n\n\nclass FakeSolver:\n name = 'branch_and_cut'\n config = {\n 'obbt_simplex_maxiter': 100,\n }\n\n\nclass FakeStatus(Status):\n def is_success(self):\n return True\n\n def is_infeasible(self):\n return False\n\n def is_unbounded(self):\n return FakeSolver\n\n def description(self):\n return ''\n\nQ = [[28.0, 23.0, 0.0, 0.0, 0.0, 2.0, 0.0, 24.0],\n [23.0, 0.0, -23.0, -44.0, 10.0, 0.0, 7.0, -7.0],\n [0.0, -23.0, 18.0, 41.0, 0.0, -3.0, -5.0, 2.0],\n [0.0, -44.0, 41.0, -5.0, 5.0, -1.0, 16.0, -50.0],\n [0.0, 10.0, 0.0, 5.0, 0.0, -2.0, -4.0, 21.0],\n [2.0, 0.0, -3.0, -1.0, -2.0, 34.0, -9.0, 20.0],\n [0.0, 7.0, -5.0, 16.0, -4.0, -9.0, 0.0, 0.0],\n [24.0, -7.0, 2.0, -50.0, 21.0, 20.0, 0.0, -45.0]]\n\nC = [-44, -48, 10, 45, 0, 2, 3, 4, 5]\n\nQc = [\n [-28, 13, 5],\n [13, 0, 0],\n [0, 0, 0],\n]\n\n\[email protected]()\ndef problem():\n m = aml.ConcreteModel(\"model_1\")\n m.I = range(8)\n m.x = aml.Var(m.I, bounds=(0, 1))\n m.f = aml.Objective(\n expr=sum(-Q[i][j] * m.x[i] * m.x[j] for i in m.I for j in m.I) + sum(-C[i] * m.x[i] for i in m.I))\n m.c = aml.Constraint(expr=sum(Qc[i][j] * m.x[i] * m.x[j] for i in m.I[0:3] for j in m.I[0:3]) >= -10)\n\n cm, _ = create_connected_model(m)\n return cm\n\n\[email protected]\ndef galini():\n galini_ = Galini()\n galini_.update_configuration({\n 'cuts_generator': {\n 'generators': ['triangle'],\n 'triangle': {\n 'domain_eps': 1e-3,\n 'thres_triangle_viol': 1e-7,\n 'max_tri_cuts_per_round': 10e3,\n 'selection_size': 2,\n 'min_tri_cuts_per_round': 0,\n },\n }\n })\n return galini_\n\n\ndef test_adjacency_matrix(galini, problem):\n linear_model, _, _ = relax(problem)\n galini.timelimit.start_now()\n\n triangle_cuts_gen = TriangleCutsGenerator(galini, galini._config.cuts_generator.triangle)\n triangle_cuts_gen.before_start_at_root(problem, linear_model)\n lower_bounds, upper_bounds, domains, aux_vars, var_by_id, edges = \\\n triangle_cuts_gen._detect_bilinear_terms(linear_model)\n\n expected_adj = [\n [1, 1, 1, 0, 0, 1, 0, 1],\n [1, 0, 1, 1, 1, 0, 1, 1],\n [1, 1, 1, 1, 0, 1, 1, 1],\n [0, 1, 1, 1, 1, 1, 1, 1],\n [0, 1, 0, 1, 0, 1, 1, 1],\n [1, 0, 1, 1, 1, 1, 1, 1],\n [0, 1, 1, 1, 1, 1, 0, 0],\n [1, 1, 1, 1, 1, 1, 0, 1]\n ]\n\n var_to_idx = aml.ComponentMap()\n\n for i in problem.I:\n x = linear_model.x[i]\n var_to_idx[x] = i\n\n for (x_id, y_id) in edges:\n x = var_by_id[x_id]\n y = var_by_id[y_id]\n x_idx = var_to_idx[x]\n y_idx = var_to_idx[y]\n assert expected_adj[x_idx][y_idx] == 1\n # Mark edge as visited\n expected_adj[x_idx][y_idx] = 0\n\n # Check we visited all edges\n assert np.all(np.isclose(expected_adj, 0))\n\n\ndef test_triange_cut_violations(galini, problem):\n linear_model, _, _ = relax(problem)\n galini.timelimit.start_now()\n triangle_cuts_gen = TriangleCutsGenerator(galini, galini._config.cuts_generator.triangle)\n triangle_cuts_gen.before_start_at_root(problem, linear_model)\n\n _, _, _, aux_vars, _, _ = \\\n triangle_cuts_gen._detect_bilinear_terms(linear_model)\n\n linear_model.x[0].set_value(0.5)\n linear_model.x[1].set_value(0.5)\n linear_model.x[2].set_value(0.5)\n linear_model.x[3].set_value(0.5)\n linear_model.x[4].set_value(0.5)\n linear_model.x[5].set_value(1.0)\n linear_model.x[6].set_value(0.5)\n linear_model.x[7].set_value(0.5)\n\n aux_vars_sol = {\n (4, 7): 0.5,\n (5, 6): 0.5,\n (3, 7): 0.0,\n (5, 7): 0.5,\n (3, 6): 0.5,\n (0, 7): 0.5,\n (1, 3): 0.0,\n (3, 4): 0.5,\n (1, 2): 0.0,\n (0, 5): 0.5,\n (0, 0): 0.5,\n (0, 1): 0.5,\n (1, 4): 0.5,\n (3, 3): 0.0,\n (7, 7): 0.0,\n (3, 5): 0.5,\n (1, 6): 0.5,\n (5, 5): 1.0,\n (1, 7): 0.0,\n (2, 2): 0.5,\n (2, 3): 0.5,\n (4, 6): 0.0,\n (2, 5): 0.5,\n (4, 5): 0.5,\n (2, 6): 0.0,\n (2, 7): 0.5,\n (0, 2): 0.5,\n }\n\n for (i, j), v in aux_vars_sol.items():\n x = linear_model.x[i]\n y = linear_model.x[j]\n w = aux_vars[id(x), id(y)]\n w.set_value(v)\n\n triangle_viol = triangle_cuts_gen._get_triangle_violations()\n\n def make_clique(i, j, k):\n return [linear_model.x[i], linear_model.x[j], linear_model.x[k]]\n\n cliques = [\n make_clique(0, 1, 2), # 0\n make_clique(0, 1, 7), # 1\n make_clique(0, 2, 5), # 2\n make_clique(0, 2, 7), # 3\n make_clique(0, 5, 7), # 4\n make_clique(1, 2, 3), # 5\n make_clique(1, 2, 6), # 6\n make_clique(1, 2, 7), # 7\n make_clique(1, 3, 4), # 8\n make_clique(1, 3, 6), # 9\n make_clique(1, 3, 7), # 10\n make_clique(1, 4, 6), # 11\n make_clique(1, 4, 7), # 12\n make_clique(2, 3, 5), # 13\n make_clique(2, 3, 6), # 14\n make_clique(2, 3, 7), # 15\n make_clique(2, 5, 6), # 16\n make_clique(2, 5, 7), # 17\n make_clique(3, 4, 5), # 18\n make_clique(3, 4, 6), # 19\n make_clique(3, 4, 7), # 20\n make_clique(3, 5, 6), # 21\n make_clique(3, 5, 7), # 22\n make_clique(4, 5, 6), # 23\n make_clique(4, 5, 7), # 24\n ]\n\n expected_triangle_viol = [\n [[0.5, -0.5, -0.5], -0.5],\n [[0.5, -0.5, -0.5], -0.5],\n [[0.0, 0.0, -0.5], -0.5],\n [[0.0, 0.0, 0.0], -1.0],\n [[0.0, -0.5, 0.0], -0.5],\n [[-1.0, 0.0, 0.0], 0.0],\n [[0.0, -1.0, 0.0], 0.0],\n [[-1.0, 0.0, 0.0], 0.0],\n [[-0.5, -0.5, 0.5], -0.5],\n [[-0.5, -0.5, 0.5], -0.5],\n [[-0.5, -0.5, -0.5], 0.5],\n [[0.5, -0.5, -0.5], -0.5],\n [[-0.5, 0.5, -0.5], -0.5],\n [[0.0, 0.0, -0.5], -0.5],\n [[-0.5, 0.5, -0.5], -0.5],\n [[0.5, -0.5, -0.5], -0.5],\n [[-0.5, 0.0, -0.5], 0.0],\n [[0.0, -0.5, 0.0], -0.5],\n [[0.0, 0.0, -0.5], -0.5],\n [[0.5, -0.5, -0.5], -0.5],\n [[-0.5, 0.5, -0.5], -0.5],\n [[0.0, -0.5, 0.0], -0.5],\n [[-0.5, 0.0, -0.5], 0.0],\n [[-0.5, 0.0, -0.5], 0.0],\n [[0.0, -0.5, 0.0], -0.5],\n ]\n\n assert len(triangle_viol) == len(expected_triangle_viol) * 4\n\n for actual_vars, actual_ineq_type, actual_viol in triangle_viol:\n clique_idx = None\n for i, clique in enumerate(cliques):\n if set([id(c) for c in clique]) == set([id(c) for c in actual_vars]):\n clique_idx = i\n break\n\n assert clique_idx is not None\n expected_res = expected_triangle_viol[clique_idx]\n if actual_ineq_type == 3:\n assert np.isclose(actual_viol, expected_res[1])\n else:\n any_match = False\n for i, viol in enumerate(expected_res[0]):\n if np.isclose(viol, actual_viol):\n any_match = True\n expected_res[0][i] = np.inf\n break\n\n assert any_match\n"
] |
[
[
"numpy.isclose"
]
] |
dariusarnold/quadpy
|
[
"9dc7c1ebff99d15ae57ed9195cde94d97a599be8",
"9dc7c1ebff99d15ae57ed9195cde94d97a599be8"
] |
[
"tools/lebedev/import_lebedev.py",
"quadpy/enr/_stroud_secrest.py"
] |
[
"\"\"\"\nThis little helper takes Lebedev point and weight data from [1] and produces JSON files.\n\n[1]\nhttps://people.sc.fsu.edu/~jburkardt/datasets/sphere_lebedev_rule/sphere_lebedev_rule.html\n\"\"\"\nimport os\nimport re\n\nimport numpy\n\n\ndef read(filename):\n data = numpy.loadtxt(filename)\n azimuthal_polar = data[:, :2] / 180.0\n weights = data[:, 2]\n return azimuthal_polar, weights\n\n\ndef chunk_data(weights):\n # divide the weight vector into chunks of 6, 8, 12, 24, or 48\n chunks = []\n k = 0\n ref_weight = 0.0\n tol = 1.0e-12\n while k < len(weights):\n if len(chunks) > 0 and abs(weights[k] - ref_weight) < tol:\n chunks[-1].append(k)\n else:\n chunks.append([k])\n ref_weight = weights[k]\n k += 1\n return chunks\n\n\ndef sort_into_symmetry_classes(weights, azimuthal_polar):\n data = {\"a1\": [], \"a2\": [], \"a3\": [], \"pq0\": [], \"llm\": [], \"rsw\": []}\n for c in chunks:\n if len(c) == 6:\n data[\"a1\"].append([weights[c[0]]])\n elif len(c) == 12:\n data[\"a2\"].append([weights[c[0]]])\n elif len(c) == 8:\n data[\"a3\"].append([weights[c[0]]])\n elif len(c) == 24:\n if any(abs(azimuthal_polar[c, 1] - 0.5) < 1.0e-12):\n # polar == pi/2 => X == [p, q, 0].\n # Find the smallest positive phi that's paired with `polar ==\n # pi/2`; the symmetry is fully characterized by that phi.\n k = numpy.where(abs(azimuthal_polar[c, 1] - 0.5) < 1.0e-12)[0]\n assert len(k) == 8\n k2 = numpy.where(azimuthal_polar[c, 0][k] > 0.0)[0]\n azimuthal_min = numpy.min(azimuthal_polar[c, 0][k][k2])\n data[\"pq0\"].append([weights[c[0]], azimuthal_min])\n else:\n # X = [l, l, m].\n # In this case, there must by exactly two phi with the value\n # pi/4. Take the value of the smaller corresponding `polar`;\n # all points are characterized by it.\n k = numpy.where(abs(azimuthal_polar[c, 0] - 0.25) < 1.0e-12)[0]\n assert len(k) == 2\n k2 = numpy.where(azimuthal_polar[c, 1][k] > 0.0)[0]\n polar_min = numpy.min(azimuthal_polar[c, 1][k][k2])\n data[\"llm\"].append([weights[c[0]], polar_min])\n else:\n assert len(c) == 48\n # This most general symmetry is characterized by two angles; one\n # could take any two here.\n # To make things easier later on, out of the 6 smallest polar\n # angle, take the one with the smallest positive phi.\n min_polar = numpy.min(azimuthal_polar[c, 1])\n k = numpy.where(abs(azimuthal_polar[c, 1] - min_polar) < 1.0e-12)[0]\n k2 = numpy.where(azimuthal_polar[c, 0][k] > 0.0)[0]\n min_azimuthal = numpy.min(azimuthal_polar[c, 0][k][k2])\n data[\"rsw\"].append([weights[c[0]], min_azimuthal, min_polar])\n\n return data\n\n\ndef write_json(filename, d):\n # Getting floats in scientific notation in python.json is almost impossible, so do\n # some work here. Compare with <https://stackoverflow.com/a/1733105/353337>.\n class PrettyFloat(float):\n def __repr__(self):\n return \"{:.16e}\".format(self)\n\n def pretty_floats(obj):\n if isinstance(obj, float):\n return PrettyFloat(obj)\n elif isinstance(obj, dict):\n return dict((k, pretty_floats(v)) for k, v in obj.items())\n elif isinstance(obj, (list, tuple)):\n return list(map(pretty_floats, obj))\n return obj\n\n with open(filename, \"w\") as f:\n string = (\n pretty_floats(d)\n .__repr__()\n .replace(\"'\", '\"')\n .replace(\"{\", \"{\\n \")\n .replace(\"[[\", \"[\\n [\")\n .replace(\"], [\", \"],\\n [\")\n .replace(']], \"', ']\\n ],\\n \"')\n .replace(\"}\", \"\\n}\")\n .replace(\"]]\", \"]\\n ]\")\n )\n f.write(string)\n\n return\n\n\nif __name__ == \"__main__\":\n directory = \"data/\"\n for k, file in enumerate(os.listdir(directory)):\n filename = os.fsdecode(file)\n m = re.match(\"lebedev_([0-9]+)\\\\.txt\", filename)\n degree = int(m.group(1))\n azimuthal_polar, weights = read(os.path.join(\"data\", filename))\n chunks = chunk_data(weights)\n data = sort_into_symmetry_classes(weights, azimuthal_polar)\n\n delete_list = []\n for key in data:\n if len(data[key]) == 0:\n delete_list.append(key)\n for key in delete_list:\n data.pop(key)\n data[\"degree\"] = degree\n\n write_json(\"lebedev_{:03d}.json\".format(degree), data)\n",
"import numpy\nfrom sympy import Rational as frac\nfrom sympy import gamma, pi, sqrt\n\nfrom ..enr2._stroud_secrest import _nsimplex\nfrom ..helpers import article, fsd, pm, untangle\nfrom ._helpers import EnrScheme\n\ncitation = article(\n authors=[\"A.H. Stroud\", \"D. Secrest\"],\n title=\"Approximate integration formulas for certain spherically symmetric regions\",\n journal=\"Math. Comp.\",\n volume=\"17\",\n year=\"1963\",\n pages=\"105-135\",\n url=\"https://doi.org/10.1090/S0025-5718-1963-0161473-0\",\n)\n\n\ndef stroud_secrest_1(n):\n data = [(frac(1, n + 1), sqrt(n + 1) * _nsimplex(n))]\n points, weights = untangle(data)\n weights *= 2 * sqrt(pi) ** n * gamma(n) / gamma(frac(n, 2))\n return EnrScheme(\"Stroud-Secrest I\", n, weights, points, 2, citation)\n\n\ndef stroud_secrest_2(n):\n nu = sqrt(n * (n + 1))\n data = [(frac(1, 2 * n), fsd(n, (nu, 1)))]\n points, weights = untangle(data)\n weights *= 2 * sqrt(pi) ** n * gamma(n) / gamma(frac(n, 2))\n return EnrScheme(\"Stroud-Secrest II\", n, weights, points, 3, citation)\n\n\ndef stroud_secrest_3(n):\n nu = sqrt(n + 1)\n data = [(frac(1, 2 ** n), pm(n, nu))]\n points, weights = untangle(data)\n weights *= 2 * sqrt(pi) ** n * gamma(n) / gamma(frac(n, 2))\n return EnrScheme(\"Stroud-Secrest III\", n, weights, points, 3, citation)\n\n\ndef stroud_secrest_4(n):\n nu = sqrt((n + 2) * (n + 3))\n xi = sqrt(frac((n + 2) * (n + 3), 2))\n A = frac(2 * (2 * n + 3), (n + 2) * (n + 3))\n B = frac((4 - n) * (n + 1), 2 * (n + 2) ** 2 * (n + 3))\n C = frac(n + 1, (n + 2) ** 2 * (n + 3))\n\n data = [(A, numpy.full((1, n), 0)), (B, fsd(n, (nu, 1))), (C, fsd(n, (xi, 2)))]\n points, weights = untangle(data)\n weights *= 2 * sqrt(pi) ** n * gamma(n) / gamma(frac(n, 2))\n return EnrScheme(\"Stroud-Secrest IV\", n, weights, points, 5, citation)\n"
] |
[
[
"numpy.where",
"numpy.loadtxt",
"numpy.min"
],
[
"numpy.full"
]
] |
jasenjackson/FatFlies_scRNA
|
[
"f132ad94072c46d5b5e3778ee44154e189935da0"
] |
[
"sample_sheet_parser.py"
] |
[
"#!/usr/bin/env python3\n\"\"\"\n@author: Timothy Baker\n\nsamplesheet_parser.py\n\n04/01/2019:\n - Broke out original parse_sample_sheet() method to smaller\n parsing functions.\n - included new attributes for downstream configuration for zUMI\n and differential expression inputs\n - need to handle parsing a lot better; perhaps break into subclasses\n\"\"\"\n\nimport pandas as pd\nimport csv \n\nclass SampleSheetParser:\n \"\"\"\n\n A SampleSheetParser object that parses the input sample sheet in specific\n format and holds all instantiated paths.\n\n TODO:\n - fix diff_exp input, can only accept concatenated numbers\n and turns them into a list of strings\n - implement logging\n - implement more secure error handling especially if sample sheet changes\n - devise better parsing methods\n - configure how this object talks with the docker container\n\n params:\n sample_sheet : ab/path/to/sample_sheet.csv\n\n attributes:\n offset_pos : dict of offset positions for better troubleshooting of each [VALUE]\n header_info : dict of header info date, run_name, library_prep, basename\n path_info : dict of paths fastq_r1, fastq_r2, ref.fa, transcript.gtf\n zumi_input : dict of zumi params that will be added to config file, include\n bc_filter_num_bases, bc_filter_phred, bc_ham_dist\n umi_filter_num_bases, umi_filter_phred, umi_ham_dist\n zum_start_stage\n diff_input : dict of condition groups to use during differential expression\n adapters: dict of 5' and 3' adapter sequences for trimming\n cell_data : pandas dataframe of experiment design information and barcodes\n\n methods:\n locate_offsets() : finds each [VALUE] offset position\n parse_sample_sheet_header() : stores header info\n parse_sample_sheet_zumi() : stores zumi input config info\n parse_sample_sheet_diffexp() : stores info for diff expression\n parse_sample_sheet_adapters() : stores adapter info for trimming\n parse_sample_sheet_data() : stores cell data\n run_parsing_methods() : runs all instance methods\n create_adapter_whitelist() : creates the barcode white list for zUMI from barcode_seq dict\n create_cell_data_csv() : creates the csv file containing metadata for each cell\n create_design_csv() : creates experiment design csv file for differential expression\n return_offsets() : returns offset position dict\n return_header_info() : returns header info\n return_zumi_input() : returns zumi config input info\n return_diff_input() : returns diff expression input info\n return_path_info() : returns path_info attr dict\n return_adapters() : return adapters dict for trimming\n return_cell_data() : returns pandas dataframe of cell info\n\n \"\"\"\n\n def __init__(self, sample_sheet):\n self.sample_sheet = sample_sheet\n self.offset_pos = {}\n self.header_info = {}\n self.path_info = {}\n self.zumi_input = {}\n self.diff_input = {}\n self.adapter = {}\n self.cell_data = None\n\n\n def locate_offsets(self):\n \"\"\" parse sample sheet and grab offset positions of bracketed names \"\"\"\n with open(self.sample_sheet, 'r') as csv_handle:\n\n line = csv_handle.readline()\n\n while line:\n\n # grabbing header position\n if line.startswith('[HEADER]'):\n self.offset_pos['header_offset'] = csv_handle.tell()\n\n # grabbing zumi position\n if line.startswith('[ZUMI]'):\n self.offset_pos['zumi_offset'] = csv_handle.tell()\n\n # grabbing diff_exp position\n if line.startswith('[DIFF_EXP]'):\n self.offset_pos['diff_offset'] = csv_handle.tell()\n\n # grabbing setting position\n if line.startswith('[ADAPTERS]'):\n self.offset_pos['adapters_offset'] = csv_handle.tell()\n\n # grabbing data position\n if line.startswith('[DATA]'):\n self.offset_pos['data_offset'] = csv_handle.tell()\n\n line = csv_handle.readline()\n\n\n def parse_sample_sheet_header(self):\n \"\"\" parsing the header section of the sample sheet \"\"\"\n\n with open(self.sample_sheet, 'r') as csv_handle:\n\n header_byte_load = (\\\n self.offset_pos['zumi_offset'] - self.offset_pos['header_offset']\\\n ) - 28\n\n csv_handle.seek(self.offset_pos['header_offset'])\n\n for line in csv_handle.readlines(header_byte_load):\n line_lst = line.split(',')\n\n # parsing overall project information to be used for logging, &\n # filenaming\n if line_lst[0].lower() == 'date':\n self.header_info['date'] = line_lst[1]\n\n if line_lst[0].lower() == 'run_name':\n self.header_info['run_name'] = line_lst[1]\n\n if line_lst[0].lower() == 'library_prep':\n self.header_info['library_prep'] = line_lst[1]\n\n if line_lst[0].lower() == 'basename':\n self.header_info['basename'] = line_lst[1]\n\n # parsing path information from header\n # grouping relevant information together\n if line_lst[0].lower() == 'fastq_read1':\n self.path_info['fastq_read1'] = line_lst[1]\n\n if line_lst[0].lower() == 'fastq_read2':\n self.path_info['fastq_read2'] = line_lst[1]\n\n if line_lst[0].lower() == 'ref_genome':\n self.path_info['ref_genome'] = line_lst[1]\n\n if line_lst[0].lower() == 'annotation':\n self.path_info['annotation'] = line_lst[1]\n\n\n # setting trimmed fastq file paths\n # may need to fix paths\n # trimmed fastq goes to current directory, will need to find it\n # may not be able to use relative paths in docker\n self.path_info['trimmed_r1'] = '/{}.trimmed.R1.fastq.gz'.format(\\\n self.header_info['basename'])\n\n self.path_info['trimmed_r2'] = '/{}.trimmed.R2.fastq.gz'.format(\\\n self.header_info['basename'])\n\n\n def parse_sample_sheet_zumi(self):\n \"\"\" parsing the zumi section of the sample sheet \"\"\"\n\n with open(self.sample_sheet, 'r') as csv_handle:\n\n zumi_byte_load = (\\\n self.offset_pos['diff_offset'] - self.offset_pos['zumi_offset']\\\n ) - 28\n\n # changing file position to zumi offset\n csv_handle.seek(self.offset_pos['zumi_offset'])\n\n # setting up zumi dict for yaml config file generation\n for line in csv_handle.readlines(zumi_byte_load):\n line_lst = line.split(',')\n\n if line_lst[0].lower() == 'bc_filter_num_bases':\n self.zumi_input['bc_filter_num_bases'] = line_lst[1]\n\n if line_lst[0].lower() == 'bc_filter_phred':\n self.zumi_input['bc_filter_phred'] = line_lst[1]\n\n if line_lst[0].lower() == 'bc_ham_dist':\n self.zumi_input['bc_ham_dist'] = line_lst[1]\n\n if line_lst[0].lower() == 'umi_filter_num_bases':\n self.zumi_input['umi_filter_num_bases'] = line_lst[1]\n\n if line_lst[0].lower() == 'umi_filter_phred':\n self.zumi_input['umi_filter_phred'] = line_lst[1]\n\n if line_lst[0].lower() == 'umi_ham_dist':\n self.zumi_input['umi_ham_dist'] = line_lst[1]\n\n if line_lst[0].lower() == 'zum_start_stage':\n self.zumi_input['zum_start_stage'] = line_lst[1]\n\n\n def parse_sample_sheet_diffexp(self):\n \"\"\" parsing the diff_expression section of the sample sheet \"\"\"\n with open(self.sample_sheet, 'r') as csv_handle:\n \n diffexp_byte_load = (\\\n self.offset_pos['adapters_offset'] - self.offset_pos['diff_offset']\\\n ) - 28\n \n csv_handle.seek(self.offset_pos['diff_offset'])\n \n csv_as_string = csv_handle.readlines(diffexp_byte_load)\n reader = csv.reader(csv_as_string, skipinitialspace = True)\n\n for line_lst in reader:\n if line_lst[0].lower() == 'test_group':\n self.diff_input['test_group'] = '\\\"'+str(line_lst[1])+'\\\"'\n if line_lst[0].lower() == 'control_group':\n self.diff_input['control_group'] = '\\\"'+str(line_lst[1])+'\\\"'\n\n\n def parse_sample_sheet_adapters(self):\n \"\"\" parsing the adapter section of sample sheet \"\"\"\n\n with open(self.sample_sheet, 'r') as csv_handle:\n\n # changing to adapter offset position\n csv_handle.seek(self.offset_pos['adapters_offset'])\n\n # setting adapter attributes\n adapter_list = csv_handle.readline().split(',')\n\n # adapter sequences to trim\n self.adapter['adapter_3'] = adapter_list[1]\n self.adapter['adapter_5'] = adapter_list[2]\n\n\n def parse_sample_sheet_data(self):\n \"\"\" parsing cell data section from sample sheet \"\"\"\n\n with open(self.sample_sheet, 'r') as csv_handle:\n\n # changing to data offset position\n csv_handle.seek(self.offset_pos['data_offset'])\n\n # pandas dataframe for easy storage, and retrieval\n self.cell_data = pd.read_csv(csv_handle)\n\n\n def run_parsing_methods(self):\n \"\"\" runs all parsing methods to instantiate all attributes \"\"\"\n\n # grabbing offset\n self.locate_offsets()\n\n # parsing header\n self.parse_sample_sheet_header()\n\n # parsing zumi\n self.parse_sample_sheet_zumi()\n\n # parsing diff exp\n self.parse_sample_sheet_diffexp()\n\n # parsing adapters\n self.parse_sample_sheet_adapters()\n\n # parsing cell data\n self.parse_sample_sheet_data()\n\n\n def create_adapter_whitelist(self, barcode_path):\n \"\"\" create barcode_whitelist text file \"\"\"\n\n # need to direct toward specific path\n # will only exist in Docker\n full_bar_path = barcode_path + '/barcode_whitelist.txt'\n self.cell_data.to_csv(\n full_bar_path,\n sep='\\n',\n columns=['barcode_sequence'],\n header=False,\n index=False\n )\n \n \n def create_cell_data_csv(self, cell_data_path):\n \"\"\" create cell_data csv file \"\"\"\n \n # need to direct toward specific path\n # will only exist in Docker\n full_cell_data_path = cell_data_path + '/cell_data.csv'\n self.cell_data.to_csv(\n full_cell_data_path,\n header=True,\n index=False\n )\n \n \n def create_design_csv(self, design_path):\n \"\"\" create design csv file \"\"\"\n\n # need to direct toward specific path\n # will only exist in Docker\n full_design_path = design_path + '/design.csv'\n with open(full_design_path,'w+') as design_file:\n out = \"test,control\\n\" + \\\n self.diff_input['test_group'] + \",\" + \\\n self.diff_input['control_group'] + '\\n'\n design_file.write(out) \n\n\n def return_offsets(self):\n \"\"\" returns offset position info \"\"\"\n return self.offset_pos\n\n def return_header_info(self):\n \"\"\" returns header info \"\"\"\n return self.header_info\n\n def return_zumi_input(self):\n \"\"\" returns zumi input info \"\"\"\n return self.zumi_input\n\n def return_diff_input(self):\n \"\"\" returns diff exp info \"\"\"\n return self.diff_input\n\n def return_path_info(self):\n \"\"\" return path_info dict \"\"\"\n return self.path_info\n\n def return_adapters(self):\n \"\"\" return adapter dict sequences \"\"\"\n return self.adapter\n\n def return_cell_data(self):\n \"\"\" return cell barcode list \"\"\"\n return self.cell_data\n\n\n\ndef main():\n \"\"\" run main for testing \"\"\"\n pass\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"pandas.read_csv"
]
] |
Niels-vv/Safe-RL-With-DR
|
[
"9f299661bb4cea9f0cd3121ea4b273cfb1016f9f"
] |
[
"deepmdp/DeepMDP.py"
] |
[
"import torch\nfrom torch import nn\nimport torch.nn.functional as F\n\n# Class to compute transition cost for DeepMDP\nclass TransitionAux(nn.Module):\n def __init__(self, device):\n super(TransitionAux, self).__init__()\n self.c_hid = 1\n self.action_dim = 32 * 32 # TODO pysc2 specific\n\n zero_pad = torch.nn.ZeroPad2d((0, 1, 0, 1)) # maintain input dimensionality.\n conv = torch.nn.Conv2d(in_channels = self.c_hid, out_channels = self.c_hid * self.action_dim,kernel_size = 2, stride = 1).to(device)\n self.network = torch.nn.Sequential(zero_pad, conv, nn.ReLU())\n\n def compute_loss(self, embedding, embedding_next_observation, actions):\n \"\"\"\n Compute loss between embedding of next observation and the predicted embedding of the next observation.\n :param embedding: The embedded observation used as an input for the latent transition network\n :param embedding_next_observation: The ground truth embedded next obervation\n :param actions: The actions that caused the embedded next_observations.\n :return: The mean squared error between the predicted and the ground truth embedding of the next observation.\n \"\"\"\n preds = self.network(embedding)\n\n batch_size = actions.size(0)\n # Reshape tensor: B x act * channels ... --> B x channels x ... x act\n preds = preds.unsqueeze(len(preds.size())).reshape(batch_size, self.c_hid, *preds.size()[2:4], self.action_dim)\n\n loss_func = torch.nn.SmoothL1Loss()\n loss = 0\n for i, act in enumerate(actions):\n predicted__next_observation_embedding = preds[i, ..., int(act.item())].squeeze()\n ground_truth_embedding = embedding_next_observation[i, ...].squeeze()\n assert(ground_truth_embedding.size() == predicted__next_observation_embedding.size())\n loss += loss_func(predicted__next_observation_embedding, ground_truth_embedding)\n return loss\n\n\ndef compute_deepmdp_loss(policy_network, auxiliary_objective, s, s_1, actions, state_embeds, new_states, penalty, device):\n loss = 0\n \n with torch.no_grad():\n next_state_embeds, _ = policy_network(s_1, return_deepmdp = True)\n loss += auxiliary_objective.compute_loss(state_embeds, next_state_embeds, actions)\n \n #print(f'Loss after auxiliary: {loss}')\n\n with torch.no_grad():\n new_state_embeds, _ = policy_network(new_states, return_deepmdp = True)\n \n gradient_penalty = 0\n gradient_penalty += compute_gradient_penalty(policy_network.encoder, s, new_states, device)\n #print(f'Gradient penalty after encoder: {gradient_penalty}')\n gradient_penalty += compute_gradient_penalty(policy_network.mlp, state_embeds, new_state_embeds, device)\n #print(f'Gradient penalty after dqn mlp: {gradient_penalty}')\n gradient_penalty += compute_gradient_penalty(auxiliary_objective.network, state_embeds, new_state_embeds, device)\n #print(f'Gradient penalty after auxiliary: {gradient_penalty}')\n loss += penalty * gradient_penalty\n return loss\n\n# Helper function computing Wasserstein Generative Adversarial Network penalty\ndef compute_gradient_penalty(network, samples_a, samples_b, device):\n # https://github.com/eriklindernoren/PyTorch-GAN/blob/master/implementations/wgan_gp/wgan_gp.py\n\n \"\"\"Calculates the gradient penalty loss for WGAN GP\"\"\"\n # Random weight term for interpolation between real and fake samples\n batch_size = samples_a.size(0)\n alpha = torch.rand_like(samples_a)\n # Get random interpolation between real and fake samples\n interpolates = (alpha * samples_a + ((1 - alpha) * samples_b))\n interpolated_obs = torch.autograd.Variable(interpolates, requires_grad=True)\n\n d_interpolates = network(interpolated_obs)\n grad = torch.ones(d_interpolates.size(), requires_grad=False).to(device)\n\n # Get gradient w.r.t. interpolates\n gradients = torch.autograd.grad(\n outputs=d_interpolates,\n inputs=interpolated_obs,\n grad_outputs=grad,\n create_graph=True,\n retain_graph=True,\n only_inputs=True,\n )[0]\n gradients = gradients.view(int(batch_size), -1)\n gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()\n \n return gradient_penalty"
] |
[
[
"torch.nn.SmoothL1Loss",
"torch.rand_like",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.no_grad",
"torch.nn.ZeroPad2d",
"torch.autograd.grad",
"torch.autograd.Variable"
]
] |
ceroo1005/DATL
|
[
"ac7ceee4f6d0f9ce493743e80afdedd808806703"
] |
[
"clusters.py"
] |
[
"import torch\nimport torch.nn.functional as F\nfrom random import shuffle\nimport numpy as np\n\nclass CosineClusters():\n def __init__(self, num_clusters=100, Euclidean=False):\n self.clusters = [] # 储存各个集群\n self.item_cluster = {} # 储存每个样本所属集群\n self.Euclidean = Euclidean\n\n # 初始化集群\n for i in range(0, num_clusters):\n self.clusters.append(Cluster(self.Euclidean))\n\n def add_random_training_items(self, items):\n '''随机分配样本给集群'''\n\n cur_index = 0\n for index, item in enumerate(items):\n self.clusters[cur_index].add_to_cluster(item)\n textid = item[0]\n self.item_cluster[textid] = self.clusters[cur_index]\n \n cur_index += 1\n if cur_index >= len(self.clusters):\n cur_index = 0 \n\n\n def add_items_to_best_cluster(self, items):\n \"\"\"无监督聚类\"\"\"\n added = 0\n for item in items:\n new = self.add_item_to_best_cluster(item)\n if new:\n added += 1\n \n return added\n\n def add_item_to_best_cluster(self, item): \n best_cluster = None \n best_fit = float(\"-inf\") \n previous_cluster = None\n \n # 从当前集群中删除后再匹配\n textid = item[0]\n if textid in self.item_cluster:\n previous_cluster = self.item_cluster[textid]\n previous_cluster.remove_from_cluster(item)\n \n for cluster in self.clusters:\n fit = cluster.cosine_similary(item, Euclidean=self.Euclidean)\n if fit > best_fit:\n best_fit = fit\n best_cluster = cluster \n \n # 重新匹配后得添加到最佳的样本库中\n best_cluster.add_to_cluster(item)\n self.item_cluster[textid] = best_cluster\n \n if best_cluster == previous_cluster:\n return False\n else:\n return True\n \n \n def get_items_cluster(self, item): \n textid = item[0]\n \n if textid in self.item_cluster:\n return self.item_cluster[textid]\n else:\n return None \n \n \n def get_centroids(self, number_per_cluster=1): \n centroids = []\n for cluster in self.clusters:\n centroids.append(cluster.get_centroid(number_per_cluster))\n \n return centroids\n \n \n def get_outliers(self, number_per_cluster=1): \n outliers = []\n for cluster in self.clusters:\n outliers.append(cluster.get_outlier(number_per_cluster))\n \n return outliers\n \n \n def get_randoms(self, number_per_cluster=1): \n randoms = []\n for cluster in self.clusters:\n randoms.append(cluster.get_random_members(number_per_cluster))\n \n return randoms\n \n \n def shape(self): \n lengths = []\n for cluster in self.clusters:\n lengths.append(cluster.size())\n \n return str(lengths)\n\n\n\nclass Cluster():\n\n def __init__(self, Euclidean = False):\n self.members = {} # 该集群中样本ID\n self.feature_vector = None # 该集群整体特征\n self.distance = [] # 集群中的样本到该集群中心的距离\n self.Euclidean = Euclidean\n\n\n def add_to_cluster(self, item):\n dataid = item[0]\n data = item[1]\n\n self.members[dataid] = item \n try:\n if self.feature_vector == None:\n self.feature_vector = data\n except:\n self.feature_vector = self.feature_vector + data\n \n \n def remove_from_cluster(self, item):\n \"\"\"从集群中删除某一个元素\"\"\"\n dataid = item[0]\n data = item[1]\n \n exists = self.members.pop(dataid, False)\n if exists:\n self.feature_vector = self.feature_vector - data\n \n \n def cosine_similary(self, item, Euclidean=False):\n '''计算某样本距离集群中心的余弦距离'''\n data = item[1] \n center_vec = self.feature_vector / len(list(self.members.keys()))\n\n item_tensor = torch.FloatTensor(data)\n center_tensor = torch.FloatTensor(center_vec)\n \n if Euclidean:\n # print('欧式距离',end='\\r')\n similarity = - np.sqrt(np.sum(np.square(data - center_vec)))\n return similarity\n else:\n # print('余弦距离',end='\\r')\n similarity = F.cosine_similarity(item_tensor, center_tensor, 0)\n return similarity.item() # item() converts tensor value to float\n \n \n def size(self):\n return len(self.members.keys())\n \n \n def distance_sort(self):\n self.distance = []\n for textid in self.members.keys():\n item = self.members[textid]\n similarity = self.cosine_similary(item, Euclidean=self.Euclidean)\n self.distance.append([similarity, item[0], item[1]])\n self.distance.sort(reverse=True, key=lambda x: x[0])\n return self.distance\n\n def get_centroid(self, number=1):\n if len(self.members) == 0:\n return []\n return self.distance_sort()[:number]\n\n def get_outlier(self, number=1):\n if len(self.members) == 0:\n return []\n return self.distance_sort()[-number:]\n\n def get_random_members(self, number=1):\n if len(self.members) == 0:\n return [] \n _ = self.distance_sort()\n randoms = []\n for i in range(0, number):\n randoms.append(_[np.random.randint(len(self.members))])\n \n return randoms\n\n \n\nif __name__ == '__main__':\n\n from sklearn.datasets import make_blobs\n n_samples = 1500\n random_state = 170\n X, y = make_blobs(n_samples=n_samples, random_state=random_state)\n num_clusters = 4\n max_epochs = 10\n data = X\n\n\n NEWdata = [[str(index), item] for index, item in enumerate(data)]\n # shuffle(NEWdata)\n # print(NEWdata)\n # raise 'pass'\n # shuffle(NEWdata)\n cosine_clusters = CosineClusters(num_clusters, Euclidean=True)\n cosine_clusters.add_random_training_items(NEWdata)\n for index, cluster in enumerate(cosine_clusters.clusters):\n print(cluster.feature_vector)\n print(set(cosine_clusters.item_cluster.values()))\n\n\n for i in range(0, max_epochs):\n print(\"Epoch \"+str(i))\n added = cosine_clusters.add_items_to_best_cluster(NEWdata)\n if added == 0:\n break\n\n # centroids_per = list(set(cosine_clusters.item_cluster.values()))\n sample_y = [cosine_clusters.clusters.index(_) for _ in cosine_clusters.item_cluster.values()]\n # print(sample_y)\n\n centroids = cosine_clusters.get_centroids(2)\n outliers = cosine_clusters.get_outliers(2)\n randoms = cosine_clusters.get_randoms(2)\n\n centroids + outliers + randoms\n # print(set(cosine_clusters.item_cluster.values()))\n # print(cosine_clusters.clusters)\n\n for index, cluster in enumerate(cosine_clusters.clusters):\n sample_sort = cluster.distance_sort()\n # print('centroids:\\t',centroids[index])\n # print('outliers:\\t',outliers[index])\n # print('randoms:\\t',randoms[index])\n # assert sample_sort[0][1] == centroids[index][0]\n # assert sample_sort[-1][1] == outliers[index][0]\n\n D_id_color = [u'orchid', u'darkcyan', u'dodgerblue', u'turquoise', u'darkviolet']\n import matplotlib.pyplot as plt\n\n\n plt.figure(figsize=(18, 6))\n\n plt.subplot(131)\n plt.scatter(X[:, 0], X[:, 1])\n \n plt.subplot(132)\n for label in [*range(len(cosine_clusters.clusters))]:\n indices = [i for i, l in enumerate(sample_y) if l == label]\n current_tx = np.take(data[:, 0], indices)\n current_ty = np.take(data[:, 1], indices)\n color = D_id_color[label]\n print(current_tx.shape)\n plt.scatter(current_tx, current_ty, c=color, label=label)\n plt.legend(loc='best')\n\n plt.subplot(133)\n plt.scatter(X[:, 0], X[:, 1], alpha=0.2, color='gray')\n f2 = lambda x:[_[2] for _ in x]\n for label in [*range(len(cosine_clusters.clusters))]:\n color = D_id_color[label]\n plt.scatter(np.array(f2(centroids[label]))[:,0], np.array(f2(centroids[label]))[:,1], c=color, label=f'{label} centroids')\n plt.scatter(np.array(f2(outliers[label]))[:,0], np.array(f2(outliers[label]))[:,1], marker='*', c=color, label=f'{label} outliers')\n plt.scatter(np.array(f2(randoms[label]))[:,0], np.array(f2(randoms[label]))[:,1], marker='^', c=color, label=f'{label} randoms')\n \n # sample_sort = cluster.distance_sort()\n # print('centroids:\\t',centroids[index])\n # print('outliers:\\t',outliers[index])\n # print('randoms:\\t',randoms[index])\n\n\n # for index, cluster in enumerate(cosine_clusters.clusters):\n # for item in outliers[index]:\n # plt.scatter(item[-1][0], current_ty, c=color, label=label)\n \n plt.legend(loc='best')\n plt.show()"
] |
[
[
"numpy.square",
"matplotlib.pyplot.legend",
"numpy.take",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.subplot",
"torch.FloatTensor",
"torch.nn.functional.cosine_similarity",
"matplotlib.pyplot.show",
"sklearn.datasets.make_blobs",
"matplotlib.pyplot.figure"
]
] |
DELTA37/TVN
|
[
"2c69a4147a5beedf4246401dafbca5b54906fea4"
] |
[
"tvn/non_local/network.py"
] |
[
"from torch import nn\n# from .non_local_concatenation import NONLocalBlock2D\n# from .non_local_gaussian import NONLocalBlock2D\nfrom .non_local_embedded_gaussian import NONLocalBlock2D\n# from .non_local_dot_product import NONLocalBlock2D\n\n\nclass Network(nn.Module):\n def __init__(self):\n super(Network, self).__init__()\n\n self.conv_1 = nn.Sequential(\n nn.Conv2d(in_channels=1, out_channels=32, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(32),\n nn.ReLU(),\n nn.MaxPool2d(2),\n )\n\n self.nl_1 = NONLocalBlock2D(in_channels=32)\n self.conv_2 = nn.Sequential(\n nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(),\n nn.MaxPool2d(2),\n )\n\n self.nl_2 = NONLocalBlock2D(in_channels=64)\n self.conv_3 = nn.Sequential(\n nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1),\n nn.BatchNorm2d(128),\n nn.ReLU(),\n nn.MaxPool2d(2),\n )\n\n self.fc = nn.Sequential(\n nn.Linear(in_features=128*3*3, out_features=256),\n nn.ReLU(),\n nn.Dropout(0.5),\n\n nn.Linear(in_features=256, out_features=10)\n )\n\n def forward(self, x):\n batch_size = x.size(0)\n\n feature_1 = self.conv_1(x)\n nl_feature_1 = self.nl_1(feature_1)\n\n feature_2 = self.conv_2(nl_feature_1)\n nl_feature_2 = self.nl_2(feature_2)\n\n output = self.conv_3(nl_feature_2).view(batch_size, -1)\n output = self.fc(output)\n\n return output\n\n def forward_with_nl_map(self, x):\n batch_size = x.size(0)\n\n feature_1 = self.conv_1(x)\n nl_feature_1, nl_map_1 = self.nl_1(feature_1, return_nl_map=True)\n\n feature_2 = self.conv_2(nl_feature_1)\n nl_feature_2, nl_map_2 = self.nl_2(feature_2, return_nl_map=True)\n\n output = self.conv_3(nl_feature_2).view(batch_size, -1)\n output = self.fc(output)\n\n return output, [nl_map_1, nl_map_2]\n\n\nif __name__ == '__main__':\n import torch\n\n img = torch.randn(3, 1, 28, 28)\n net = Network()\n out = net(img)\n print(out.size())\n"
] |
[
[
"torch.nn.Dropout",
"torch.randn",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.Linear",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
]
] |
polifonia-project/OCR
|
[
"81db8757556e27765a8512bec4b5271c0a2cb301"
] |
[
"src/ocr_script.py"
] |
[
"import argparse\nimport ntpath\nimport os\nimport re\n\nimport cv2\nimport numpy as np\nimport pytesseract\nfrom PIL import Image\nfrom pdf2image import convert_from_path\nfrom dotenv import load_dotenv\n\nload_dotenv()\nImage.MAX_IMAGE_PIXELS = 933120000\nSUPPORTED_IMAGE_FORMAT = ['.png', '.jpg', '.jpeg', '.tiff', '.gif']\n\n\ndef file_info(file_path):\n \"\"\"Takes as input the file input path and the output path and analyses the file name and the file extension.\n It also set a destination folder for the conversion from pdf to image.\"\"\"\n if file_path == \"\" or file_path is None:\n raise NameError(\"Directories Not Set\")\n elif os.path.isdir(file_path):\n file_ext = None\n file_name_no_ext = ntpath.basename(file_path)\n else:\n file_name = ntpath.basename(file_path)\n file_name_no_ext = os.path.splitext(file_name)[0]\n file_ext = os.path.splitext(file_name)[1]\n\n return file_name_no_ext, file_ext\n\n\ndef convert_image(file_path, out_path, out_format=\"png\"):\n image = Image.open(file_path)\n out_path = f\"{out_path}/{file_path.split('/')[-1].split('.')[0]}.{out_format}\"\n image.save(out_path)\n\n\ndef pdf_to_img(file_path, final_path, out_format='png'):\n file_name_no_ext, file_ext = file_info(file_path)\n results_path = f'{final_path}/{file_name_no_ext}'\n print(file_path)\n # check if exists a folder with the same name of the input file. If not, create one.\n if not os.path.isdir(results_path):\n os.makedirs(results_path)\n else:\n pass\n pages = convert_from_path(file_path, 500)\n\n page_num = 0\n for page in pages:\n page_num += 1\n print(f'SAVING PAGE: {page_num}')\n page.save(f'{results_path}/{page_num}.{out_format}', out_format)\n print(results_path)\n return results_path\n\n\ndef image_processing(input_path, gray_scale, remove_noise, thresholding, dilate, erosion, edge_detection,\n skew_correction, see_image):\n print(input_path, gray_scale, remove_noise, thresholding, dilate, erosion, edge_detection,\n skew_correction, see_image)\n kernel = np.ones((5, 5), np.uint8)\n image = cv2.imread(input_path)\n\n if gray_scale:\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n if remove_noise:\n image = cv2.medianBlur(image, 5)\n if thresholding:\n image = cv2.threshold(image, 125, 255, cv2.THRESH_BINARY)[1]\n # image = cv2.adaptiveThreshold(image,255,cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY,21,4)\n if dilate:\n image = cv2.dilate(image, kernel, iterations=1)\n if erosion:\n image = cv2.erode(image, kernel, iterations=1)\n if edge_detection:\n image = cv2.Canny(image, 100, 200)\n if skew_correction:\n coords = np.column_stack(np.where(image > 0))\n angle = cv2.minAreaRect(coords)[-1]\n if angle < -45:\n angle = -(90 + angle)\n else:\n angle = -angle\n (h, w) = image.shape[:2]\n center = (w // 2, h // 2)\n M = cv2.getRotationMatrix2D(center, angle, 1.0)\n image = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)\n if see_image:\n cv2.imwrite(f'eval_files/{os.path.basename(input_path)[0]}_show{os.path.splitext(input_path)[1]}', image)\n print('SAVED PROCESSED IMAGE')\n return image\n\n\ndef ocr(processed_image, language_mode, psm, oem, multilang=\"\", language=\"\"):\n if type(psm) != int or not psm:\n raise NameError(\"PAGE_SEGMENTATION_MODE not set\")\n if language_mode == \"multi\" and multilang != \"\":\n custom_config = r'-l {} --oem {} --psm {}'.format(multilang, oem, psm)\n ocr_output = pytesseract.image_to_string(processed_image, config=custom_config)\n elif language_mode == \"mono\" and language != \"\":\n if not oem:\n raise NameError(\"OCR_ENGINE_MODE not set\")\n custom_config = r'--oem {} --psm {}'.format(oem, psm)\n # custom_config2 = r'--oem 3 --psm 1'\n ocr_output = pytesseract.image_to_string(processed_image, config=custom_config, lang=language)\n else:\n raise NameError(\"Language Setting Error\")\n return ocr_output\n\n\ndef ocrise_folder(folder_path, saved_file_path, output_format):\n ocr_all, ocr_all_pdf = '', ''\n for path, dirs, files in os.walk(folder_path):\n for file in sorted(files, key=lambda f: int(re.sub('\\D', '1', f))):\n filename, file_extension = os.path.splitext(file)\n if file_extension == '.pdf':\n # folder created inside the same folder as the input one.\n print(f'The file {filename} is a .pdf file. Converting to image in {output_format} format.')\n converted_image_path = pdf_to_img(f'{path}/{file}', folder_path, output_format)\n ocrise_pdf(converted_image_path, filename, saved_file_path)\n elif file_extension in SUPPORTED_IMAGE_FORMAT:\n text = ocrise_single(input_file=f'{path}/{file}',\n language_mode=args.language_mode,\n single_lang=args.single_language,\n multiple_langs=args.multiple_langs,\n psm=args.page_segmentation_mode,\n oem=args.ocr_engine_mode,\n gray_scale=args.gray_scale,\n remove_noise=args.remove_noise,\n thresholding=args.thresholding,\n dilate=args.dilate,\n erosion=args.erosion,\n edge_detection=args.edge_detection,\n skew_correction=args.skew_correction,\n see_image=args.see_image)\n if len([x for x in files if '-' in x]) > 0 and len(file.split('-')[:-1]) > 1:\n if f\"{'-'.join(file.split('-')[:-1])}.txt\" not in [f for f in os.listdir(saved_file_path)]:\n save_to_txt(f\"{saved_file_path}/{'-'.join(file.split('-')[:-1])}.txt\", text)\n elif f\"{'-'.join(file.split('-')[:-1])}.txt\" in [f for f in os.listdir(saved_file_path)]:\n with open(f\"{saved_file_path}/{'-'.join(file.split('-')[:-1])}.txt\", \"a\") as existing_file:\n existing_file.write(f\"\\n\\n\\n{text}\")\n else:\n ocr_all = ocr_all + text\n else:\n print(f'FILE FORMAT NOT SUPPORTED (yet!), SKIPPING {file}')\n if len(ocr_all) > 0:\n save_to_txt(f'{saved_file_path}/{path.split(\"/\")[-1]}.txt', ocr_all)\n print(f'SAVED FILE {saved_file_path}/{path.split(\"/\")[-1]}.txt')\n\n\ndef ocrise_pdf(converted_image_path, filename, output_folder):\n ocr_all_pdf = ''\n for path, dirs, files in os.walk(converted_image_path):\n for img in sorted(files, key=lambda f: int(re.sub('\\D', '1', f))):\n text = ocrise_single(input_file=f'{path}/{img}',\n language_mode=args.language_mode,\n single_lang=args.single_language,\n multiple_langs=args.multiple_langs,\n psm=args.page_segmentation_mode,\n oem=args.ocr_engine_mode,\n gray_scale=args.gray_scale,\n remove_noise=args.remove_noise,\n thresholding=args.thresholding,\n dilate=args.dilate,\n erosion=args.erosion,\n edge_detection=args.edge_detection,\n skew_correction=args.skew_correction,\n see_image=args.see_image)\n ocr_all_pdf = ocr_all_pdf + text\n save_to_txt(f'{output_folder}/{filename}.txt', ocr_all_pdf)\n\n\ndef ocrise_single(input_file, language_mode, single_lang, multiple_langs, psm, oem,\n gray_scale, remove_noise, thresholding, dilate, erosion, edge_detection, skew_correction, see_image):\n print(\"PROCESSING IMAGE: {}\".format(input_file))\n\n image = image_processing(input_file, gray_scale, remove_noise, thresholding, dilate, erosion, edge_detection,\n skew_correction, see_image)\n image_ocr = ocr(image, language_mode, psm, oem, multiple_langs, single_lang)\n\n return image_ocr\n\n\ndef save_to_txt(out_name: str, ocr_res: str):\n f = open(out_name, 'w')\n f.write(str(ocr_res.encode('utf-8')))\n f.close()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n # File parameters\n parser.add_argument('--input_path',\n type=str,\n default=os.getenv('INPUT_PATH')) # accepts pdf files, image files and image folders\n parser.add_argument('--saved_file_path',\n type=str,\n default=os.getenv('OUTPUT_PATH')) # only needed if the input format is pdf\n parser.add_argument('--converted_image_output_path',\n type=str,\n default=os.getenv('CONVERTED_IMAGE_PATH')) # only needed if the input format is pdf\n parser.add_argument('--output_format',\n type=str,\n default=os.getenv('OUTPUT_FORMAT')) # only needed if the input format is pdf\n\n # Language parameters\n parser.add_argument('--language_mode',\n type=str,\n default=os.getenv('LANGUAGE_MODE')) # \"multi\" if working with more tha n one language, \"mono\" otherwise\n parser.add_argument('--single_language',\n type=str,\n default=os.getenv('SINGLE_LANGUAGE')) # needed if working with --language_mode = \"single\"\n parser.add_argument('--multiple_langs',\n type=str,\n default=os.getenv('MULTIPLE_LANGUAGES')) # needed if working with --language_mode = \"multi\"\n\n # Preprocessing parameters\n parser.add_argument('--gray_scale', type=bool, default=os.getenv('GRAY_SCALE'))\n parser.add_argument('--remove_noise', type=bool, default=os.getenv('REMOVE_NOISE'))\n parser.add_argument('--thresholding', type=bool, default=os.getenv('THRESHOLDING'))\n parser.add_argument('--dilate', type=bool, default=os.getenv('DILATE'))\n parser.add_argument('--erosion', type=bool, default=os.getenv('EROSION'))\n parser.add_argument('--edge_detection', type=bool, default=os.getenv('EDGE_DETECTION'))\n parser.add_argument('--skew_correction', type=bool, default=os.getenv('SKEW_CORRECTION'))\n parser.add_argument('--see_image', type=bool, default=os.getenv('SEE_IMAGE'))\n\n # OCR parameters\n parser.add_argument('--page_segmentation_mode', type=int, default=os.getenv('PAGE_SEGMENTATION_MODE'))\n parser.add_argument('--ocr_engine_mode', type=int, default=os.getenv('OCR_ENGINE_MODE'))\n\n args = parser.parse_args()\n\n print(args.skew_correction, os.getenv('SKEW_CORRECTION'))\n\n file_name, extension = file_info(args.input_path)\n if extension == \".pdf\" and not os.path.isdir(args.input_path):\n print(\"The input file is a .pdf file. Converting to image in {} format.\".format(args.output_format))\n converted_images = pdf_to_img(args.input_path, args.converted_image_output_path, args.output_format)\n ocrise_pdf(converted_images, file_name, args.saved_file_path)\n elif os.path.isdir(args.input_path):\n print(\"The input corresponds to a folder. Processing files contained in it.\")\n ocrise_folder(args.input_path, args.saved_file_path, args.output_format)\n else:\n text = ocrise_single(input_file=args.input_path,\n language_mode=args.language_mode,\n single_lang=args.single_language,\n multiple_langs=args.multiple_langs,\n psm=args.page_segmentation_mode,\n oem=args.ocr_engine_mode,\n gray_scale=args.gray_scale,\n remove_noise=args.remove_noise,\n thresholding=args.thresholding,\n dilate=args.dilate,\n erosion=args.erosion,\n edge_detection=args.edge_detection,\n skew_correction=args.skew_correction,\n see_image=args.see_image)\n save_to_txt(f'{args.saved_file_path}/{file_name}.txt', text)\n print(f'SAVED FILE {args.saved_file_path}/{file_name}.txt')\n"
] |
[
[
"numpy.where",
"numpy.ones"
]
] |
svjack/Bu-Hts
|
[
"8621f4439bcd04800f887ac6b023ce49fc9a867f"
] |
[
"script/feature-construct.py"
] |
[
"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nimport pandas as pd\n\n\n# In[2]:\n\n\ntrain = pd.read_csv(\"../data/train.csv\", parse_dates=True, low_memory=False, index_col = \"Date\")\nstore = pd.read_csv(\"../data/store.csv\", low_memory=False)\nstore.fillna(0, inplace = True)\n\n\n# In[3]:\n\n\ntrain[\"Year\"] = train.index.year\ntrain[\"Month\"] = train.index.month\ntrain[\"Day\"] = train.index.day\ntrain[\"WeekOfYear\"] = train.index.weekofyear\ntrain[\"SalesPerCustomer\"] = train[\"Sales\"] / train[\"Customers\"]\n\n\n# In[4]:\n\n\ntrain = train[(train[\"Open\"] != 0) & (train[\"Sales\"] != 0)].copy()\n\n\n# In[5]:\n\n\ntrain_store = pd.merge(train, store, how = \"inner\", on = \"Store\")\n\n\n# In[6]:\n\n\ntrain_store_expand_date = pd.merge(train.reset_index(), store, how = \"inner\", on = \"Store\")\n\n\n# In[7]:\n\n\nschoolh_by_store = train_store_expand_date[[\"Store\", \"SchoolHoliday\", \"Date\"]].groupby(\"Store\")[[\"SchoolHoliday\", \"Date\"]].apply(dict)\n\n\n# In[8]:\n\n\nfrom functools import reduce\ninter_dates = sorted(list(reduce(lambda a, b: a.intersection(b) ,schoolh_by_store.map(lambda dict_: set(dict_[\"Date\"].tolist())).tolist())))\n\n\n# In[9]:\n\n\ndef retrieve_sd_list(dict_):\n SchoolHoliday = dict_[\"SchoolHoliday\"].tolist()\n Date = dict_[\"Date\"].tolist()\n assert len(SchoolHoliday) == len(Date)\n req = []\n for ele in inter_dates:\n idx = Date.index(ele)\n req.append(SchoolHoliday[idx])\n return req\nschoolh_inter_idxes = schoolh_by_store.map(\n retrieve_sd_list\n)\n\n\n# In[10]:\n\n\nschool_holiday_store_df = schoolh_inter_idxes.map(lambda x: \"\".join(map(str,x))).reset_index()\n\n\n# In[11]:\n\n\nfrom copy import deepcopy \nstore_cp = deepcopy(store)\nstore_cp_info = pd.merge(store_cp, school_holiday_store_df, on = \"Store\", how = \"inner\")\n\n\n# In[12]:\n\n\nfrom functools import reduce\nPromoInterval_cnt_series = pd.Series(reduce(lambda a, b: a + b ,store_cp_info[\"PromoInterval\"].map(lambda x: list(map(lambda y :\"PromoInterval_{}\".format(y),x.split(\",\"))) if type(x) == type(\"\") else []).values.tolist())).value_counts()\n\n\n# In[13]:\n\n\nPromoInterval_expand_columns = PromoInterval_cnt_series.sort_index().index.tolist()\n\n\n# In[14]:\n\n\nstore_cp_info_expand_PromoInterval = deepcopy(store_cp_info)\nfor col in PromoInterval_expand_columns:\n store_cp_info_expand_PromoInterval[col] = 0\n\n\n# In[15]:\n\n\nfor ridx, (idx, r) in enumerate(store_cp_info.iterrows()):\n x = r[\"PromoInterval\"]\n set_list = list(map(lambda y :\"PromoInterval_{}\".format(y),x.split(\",\"))) if type(x) == type(\"\") else []\n for ele in set_list:\n store_cp_info_expand_PromoInterval.iloc[ridx, store_cp_info_expand_PromoInterval.columns.get_loc(ele)] = 1\n\n\n# In[16]:\n\n\nstore_cp_info_expand_PromoInterval = store_cp_info_expand_PromoInterval.rename(columns = {\n 0: \"schoolholiday_str\"\n})\n\n\n# In[17]:\n\n\ndef simple_cate_encode(input_series):\n idx_value_dict = dict(enumerate(input_series.value_counts().index.tolist()))\n value_idx_dict = dict(map(lambda t2: (t2[1], t2[0]), idx_value_dict.items()))\n return pd.Series(list(map(lambda x: value_idx_dict[x], input_series.values.tolist())))\nset_new_columns_dict = dict(map(lambda colname: (colname, simple_cate_encode(store_cp_info_expand_PromoInterval[colname])) ,store_cp_info_expand_PromoInterval.dtypes.map(str)[store_cp_info_expand_PromoInterval.dtypes.map(str) == \"object\"].index.tolist()))\n\n\n# In[18]:\n\n\nfor colname, new_col in set_new_columns_dict.items():\n store_cp_info_expand_PromoInterval[\"{}_encode\".format(colname)] = new_col\n\n\n# In[19]:\n\n\ndef transform_columns(left, right):\n assert \"Store\" in left.columns.tolist() and \"Store\" in right.columns.tolist()\n right_encoded_colnames = list(filter(lambda colname: colname.endswith(\"_encode\") ,right.columns.tolist()))\n right_encoded_colnames.remove(\"schoolholiday_str_encode\")\n print(\"add num : {}\".format(len(right_encoded_colnames)))\n left_replace_colnames = list(map(lambda colname: colname.replace(\"_encode\", \"\"), right_encoded_colnames)) \n assert len(left_replace_colnames) == len(set(left_replace_colnames).intersection(set(left.columns.tolist())))\n left_before_merge = left.copy()\n for col in left_replace_colnames:\n del left_before_merge[col]\n right_before_merge = right.copy()\n merged = pd.merge(left = left_before_merge, right = right_before_merge, on = \"Store\", how = \"inner\")\n assert left.shape[0] == merged.shape[0]\n return merged\n \nright_cols = store_cp_info_expand_PromoInterval.columns.tolist()[store_cp_info_expand_PromoInterval.columns.tolist().index(\"PromoInterval_Apr\"):]\nright_cols = [\"Store\"] + right_cols\ntrain_store_encoded = transform_columns(left = train_store_expand_date, right = store_cp_info_expand_PromoInterval[right_cols])\n\n\n# In[20]:\n\n\nobj_cols = train_store_encoded.dtypes.map(str)[train_store_encoded.dtypes.map(str) == \"object\"].index.tolist()\nfor col in obj_cols:\n train_store_encoded[\"{}_encode\".format(col)] = simple_cate_encode(train_store_encoded[col])\n del train_store_encoded[col]\n\n\n# In[21]:\n\n\nif \"Open\" in train_store_encoded.columns.tolist():\n del train_store_encoded[\"Open\"]\n\n\n# In[22]:\n\n\nint_cols = train_store_encoded.dtypes.map(str)[train_store_encoded.dtypes.map(str) == \"int64\"].index.tolist()\nint_cols_stats = train_store_encoded[int_cols].apply(lambda s: len(s.value_counts()), axis = 0)\nbool_cols = int_cols_stats[int_cols_stats == 2]\nbool_stats = int_cols_stats.loc[bool_cols.index]\nothers = list(set(int_cols_stats.index.tolist()).difference(set(bool_cols.index.tolist())))\nother_stats = int_cols_stats.loc[others]\nencode_cols = list(filter(lambda x: x.endswith(\"_encode\"), other_stats.index.tolist()))\nencode_stats = int_cols_stats.loc[encode_cols]\nothers = list(set(others).difference(set(encode_stats.index.tolist())))\nother_stats = int_cols_stats.loc[others]\n\n\n# In[23]:\n\n\nint_cols_stats_list = [bool_stats, encode_stats, other_stats]\nfloat_cols = train_store_encoded.dtypes.map(str)[train_store_encoded.dtypes.map(str) == \"float64\"].index.tolist()\nfloat_stats = train_store_encoded[float_cols].apply(lambda x: len(x.value_counts()), axis = 0)\nds_stats = train_store_encoded[train_store_encoded.dtypes.map(str)[train_store_encoded.dtypes.map(str) == \"datetime64[ns]\"].index.tolist()].apply(lambda x: len(x.value_counts()), axis = 0)\nall_cols_stats_list = int_cols_stats_list + [float_stats, ds_stats]\n\n\n# In[24]:\n\n\nfrom functools import reduce\nassert reduce(lambda a, b: a + b ,map(len ,all_cols_stats_list)) == train_store_encoded.shape[1]\n\n\n# In[25]:\n\n\nfrom sklearn.preprocessing import OneHotEncoder\nenc = OneHotEncoder()\nenc_part = enc.fit_transform(train_store_encoded[encode_stats.index.tolist()])\n\n\n# In[26]:\n\n\nreq = []\nfor col_stats_idx in set(range(len(all_cols_stats_list))).difference(set([1])):\n req.append(train_store_encoded[all_cols_stats_list[col_stats_idx].index.tolist()])\nreq.append(enc_part)\n\n\n# In[27]:\n\n\n#list(map(lambda x: x.shape, req))\n\n\n# In[28]:\n\n\ntrain_store_encoded_onehot = pd.concat(map(lambda x: x if type(x) == type(pd.DataFrame()) else pd.DataFrame(x.toarray()) ,req), axis = 1)\n\n\n# In[29]:\n\n\ntrain_store_encoded_onehot.columns, train_store_encoded_onehot.shape\n\n\n# In[30]:\n\n\ntrain_store_encoded.to_csv(\"../data/train_store_encoded.csv\", index = False)\ntrain_store_expand_date.to_csv(\"../data/train_store_expand_date.csv\", index = False)\ntrain_store_encoded_onehot.to_csv(\"../data/train_store_encoded_onehot.csv\", index = False)\n\n\n# In[ ]:\n\n\n\n\n"
] |
[
[
"pandas.merge",
"pandas.read_csv",
"pandas.DataFrame",
"sklearn.preprocessing.OneHotEncoder"
]
] |
averkij/transformer-deploy
|
[
"2a0b527dc187e14c718d36dbac8a3e29e34cf78b",
"2a0b527dc187e14c718d36dbac8a3e29e34cf78b"
] |
[
"src/transformer_deploy/convert.py",
"src/transformer_deploy/benchmarks/utils.py"
] |
[
"#!/usr/bin/env python3\n\n# Copyright 2021, Lefebvre Sarrut Services\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport logging\nimport os\nfrom pathlib import Path\nfrom typing import Callable, Dict, List, Tuple, Union\n\nimport numpy as np\nimport torch\nfrom numpy import ndarray\nfrom transformers import AutoModelForSequenceClassification, AutoTokenizer, PreTrainedModel, PreTrainedTokenizer\n\nfrom transformer_deploy.backends.ort_utils import (\n convert_to_onnx,\n cpu_quantization,\n create_model_for_provider,\n optimize_onnx,\n)\nfrom transformer_deploy.benchmarks.utils import (\n compare_outputs,\n generate_multiple_inputs,\n print_timings,\n setup_logging,\n track_infer_time,\n)\nfrom transformer_deploy.templates.triton import Configuration, ModelType\nfrom transformer_deploy.utils.args import parse_args\n\n\ndef check_accuracy(\n engine_name: str, pytorch_output: List[np.ndarray], engine_output: List[np.ndarray], tolerance: float\n) -> None:\n \"\"\"\n Compare engine predictions with a reference. Assert that the difference is under a threshold.\n :param engine_name: string used in error message, if any\n :param pytorch_output: reference output used for the comparaison\n :param engine_output: output from the engine\n :param tolerance: if difference in outputs is above threshold, an error will be raised\n \"\"\"\n discrepency = compare_outputs(pytorch_output=pytorch_output, engine_output=engine_output)\n assert discrepency < tolerance, (\n f\"{engine_name} discrepency is too high ({discrepency:.2f} > {tolerance}):\\n\"\n f\"Pythorch:\\n{pytorch_output}\\n\"\n f\"VS\\n\"\n f\"{engine_name}:\\n{engine_output}\\n\"\n f\"Diff:\\n\"\n f\"{np.asarray(pytorch_output) - np.asarray(engine_output)}\\n\"\n \"Tolerance can be increased with --atol parameter.\"\n )\n\n\ndef launch_inference(\n infer: Callable, inputs: List[Dict[str, Union[np.ndarray, torch.Tensor]]], nb_measures: int\n) -> Tuple[List[np.ndarray], List[float]]:\n \"\"\"\n Perform inference and measure latency\n :param infer: a lambda which will perform the inference\n :param inputs: tensor compatible with the lambda (Torch tensor for Pytorch, or numpy otherwise)\n :param nb_measures: number of measures to perform for the latency measure\n :return: a tuple of model output and inference latencies\n \"\"\"\n assert type(inputs) == list\n assert len(inputs) > 0\n outputs = list()\n for batch_input in inputs:\n output = infer(batch_input)\n outputs.append(output)\n time_buffer: List[float] = list()\n for _ in range(nb_measures):\n with track_infer_time(time_buffer):\n _ = infer(inputs[0])\n return outputs, time_buffer\n\n\ndef main(commands: argparse.Namespace):\n setup_logging(level=logging.INFO if commands.verbose else logging.WARNING)\n if commands.device == \"cpu\" and \"tensorrt\" in commands.backend:\n raise Exception(\"can't perform inference on CPU and use Nvidia TensorRT as backend\")\n if len(commands.seq_len) == len(set(commands.seq_len)) and \"tensorrt\" in commands.backend:\n logging.warning(\"having different sequence lengths may make TensorRT slower\")\n\n torch.manual_seed(commands.seed)\n np.random.seed(commands.seed)\n torch.set_num_threads(commands.nb_threads)\n if commands.device is None:\n commands.device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n if isinstance(commands.auth_token, str) and commands.auth_token.lower() in [\"true\", \"t\"]:\n auth_token = True\n elif isinstance(commands.auth_token, str):\n auth_token = commands.auth_token\n else:\n auth_token = None\n\n Path(commands.output).mkdir(parents=True, exist_ok=True)\n onnx_model_path = os.path.join(commands.output, \"model-original.onnx\")\n onnx_optim_model_path = os.path.join(commands.output, \"model.onnx\")\n tensorrt_path = os.path.join(commands.output, \"model.plan\")\n if commands.device == \"cuda\":\n assert torch.cuda.is_available(), \"CUDA is not available. Please check your CUDA installation\"\n tokenizer_path = commands.tokenizer if commands.tokenizer else commands.model\n tokenizer: PreTrainedTokenizer = AutoTokenizer.from_pretrained(tokenizer_path, use_auth_token=auth_token)\n input_names: List[str] = tokenizer.model_input_names\n logging.info(f\"axis: {input_names}\")\n include_token_ids = \"token_type_ids\" in input_names\n model_pytorch: PreTrainedModel = AutoModelForSequenceClassification.from_pretrained(\n commands.model, use_auth_token=auth_token\n )\n model_pytorch.eval()\n if commands.device == \"cuda\":\n model_pytorch.cuda()\n\n tensor_shapes = list(zip(commands.batch_size, commands.seq_len))\n # take optimial size\n inputs_pytorch, inputs_onnx = generate_multiple_inputs(\n batch_size=tensor_shapes[1][0],\n seq_len=tensor_shapes[1][1],\n include_token_ids=include_token_ids,\n device=commands.device,\n nb_inputs_to_gen=commands.warmup,\n )\n\n # create onnx model and compare results\n opset = 12\n if commands.quantization and commands.device == \"cuda\":\n try:\n from pytorch_quantization.nn import TensorQuantizer\n except ImportError:\n raise ImportError(\n \"It seems that pytorch-quantization is not yet installed. \"\n \"It is required when you enable the quantization flag and use CUDA device.\"\n \"Please find installation instructions on \"\n \"https://github.com/NVIDIA/TensorRT/tree/master/tools/pytorch-quantization or use:\\n\"\n \"pip3 install git+ssh://[email protected]/NVIDIA/TensorRT#egg=pytorch-quantization\\\\&\"\n \"subdirectory=tools/pytorch-quantization/\"\n )\n\n TensorQuantizer.use_fb_fake_quant = True\n opset = 13\n\n convert_to_onnx(\n model_pytorch=model_pytorch, output_path=onnx_model_path, inputs_pytorch=inputs_pytorch[0], opset=opset\n )\n if commands.quantization and commands.device == \"cuda\":\n TensorQuantizer.use_fb_fake_quant = False\n\n timings = {}\n\n def infer_classification_pytorch(model: PreTrainedModel) -> Callable[[Dict[str, torch.Tensor]], np.ndarray]:\n def infer(inputs: Dict[str, torch.Tensor]) -> np.ndarray:\n model_output = model(**inputs).logits.detach().cpu().numpy() # noqa: F821\n if commands.device == \"cuda\":\n torch.cuda.synchronize()\n return model_output\n\n return infer\n\n with torch.inference_mode():\n pytorch_output, time_buffer = launch_inference(\n infer=infer_classification_pytorch(model=model_pytorch),\n inputs=inputs_pytorch,\n nb_measures=commands.nb_measures,\n )\n timings[\"Pytorch (FP32)\"] = time_buffer\n if commands.device == \"cuda\":\n from torch.cuda.amp import autocast\n\n with autocast():\n engine_name = \"Pytorch (FP16)\"\n pytorch_fp16_output, time_buffer = launch_inference(\n infer=infer_classification_pytorch(model=model_pytorch),\n inputs=inputs_pytorch,\n nb_measures=commands.nb_measures,\n )\n check_accuracy(\n engine_name=engine_name,\n pytorch_output=pytorch_output,\n engine_output=pytorch_fp16_output,\n tolerance=commands.atol,\n )\n timings[engine_name] = time_buffer\n elif commands.device == \"cpu\":\n model_pytorch = torch.quantization.quantize_dynamic(model_pytorch, {torch.nn.Linear}, dtype=torch.qint8)\n engine_name = \"Pytorch (INT-8)\"\n pytorch_int8_output, time_buffer = launch_inference(\n infer=infer_classification_pytorch(model=model_pytorch),\n inputs=inputs_pytorch,\n nb_measures=commands.nb_measures,\n )\n check_accuracy(\n engine_name=engine_name,\n pytorch_output=pytorch_output,\n engine_output=pytorch_int8_output,\n tolerance=commands.atol,\n )\n timings[engine_name] = time_buffer\n del model_pytorch\n\n if \"tensorrt\" in commands.backend:\n try:\n import tensorrt as trt\n from tensorrt.tensorrt import ICudaEngine, Logger, Runtime\n\n from transformer_deploy.backends.trt_utils import build_engine, load_engine, save_engine\n except ImportError:\n raise ImportError(\n \"It seems that pycuda and TensorRT are not yet installed. \"\n \"They are required when you declare TensorRT backend.\"\n \"Please find installation instruction on \"\n \"https://docs.nvidia.com/deeplearning/tensorrt/install-guide/index.html\"\n )\n\n trt_logger: Logger = trt.Logger(trt.Logger.INFO if commands.verbose else trt.Logger.WARNING)\n runtime: Runtime = trt.Runtime(trt_logger)\n engine: ICudaEngine = build_engine(\n runtime=runtime,\n onnx_file_path=onnx_model_path,\n logger=trt_logger,\n min_shape=tensor_shapes[0],\n optimal_shape=tensor_shapes[1],\n max_shape=tensor_shapes[2],\n workspace_size=commands.workspace_size * 1024 * 1024,\n fp16=not commands.quantization,\n int8=commands.quantization,\n )\n save_engine(engine=engine, engine_file_path=tensorrt_path)\n # important to check the engine has been correctly serialized\n tensorrt_model: Callable[[Dict[str, ndarray]], ndarray] = load_engine(\n runtime=runtime, engine_file_path=tensorrt_path\n )\n\n engine_name = \"TensorRT (FP16)\"\n tensorrt_output, time_buffer = launch_inference(\n infer=tensorrt_model, inputs=inputs_onnx, nb_measures=commands.nb_measures\n )\n check_accuracy(\n engine_name=engine_name,\n pytorch_output=pytorch_output,\n engine_output=tensorrt_output,\n tolerance=commands.atol,\n )\n timings[engine_name] = time_buffer\n del engine, tensorrt_model, runtime # delete all tensorrt objects\n conf = Configuration(\n model_name=commands.name,\n model_type=ModelType.TensorRT,\n batch_size=0,\n nb_output=pytorch_output[0].shape[1],\n nb_instance=commands.nb_instances,\n include_token_type=include_token_ids,\n workind_directory=commands.output,\n device=commands.device,\n )\n conf.create_folders(tokenizer=tokenizer, model_path=tensorrt_path)\n\n if \"onnx\" in commands.backend:\n # create optimized onnx model and compare results\n optimize_onnx(\n onnx_path=onnx_model_path,\n onnx_optim_model_path=onnx_optim_model_path,\n fp16=commands.device == \"cuda\",\n use_cuda=commands.device == \"cuda\",\n )\n if commands.device == \"cpu\" and commands.quantization:\n cpu_quantization(input_model_path=onnx_optim_model_path, output_model_path=onnx_optim_model_path)\n\n ort_provider = \"CUDAExecutionProvider\" if commands.device == \"cuda\" else \"CPUExecutionProvider\"\n for provider, model_path, benchmark_name in [\n (ort_provider, onnx_model_path, \"ONNX Runtime (FP32)\"),\n (ort_provider, onnx_optim_model_path, \"ONNX Runtime (optimized)\"),\n ]:\n ort_model = create_model_for_provider(\n path=model_path,\n provider_to_use=provider,\n nb_threads=commands.nb_threads,\n nb_instances=commands.nb_instances,\n )\n\n def infer_ort(inputs: Dict[str, np.ndarray]) -> np.ndarray:\n return ort_model.run(None, inputs)\n\n ort_output, time_buffer = launch_inference(\n infer=infer_ort, inputs=inputs_onnx, nb_measures=commands.nb_measures\n )\n check_accuracy(\n engine_name=benchmark_name,\n pytorch_output=pytorch_output,\n engine_output=ort_output,\n tolerance=commands.atol,\n )\n timings[benchmark_name] = time_buffer\n del ort_model\n\n conf = Configuration(\n model_name=commands.name,\n model_type=ModelType.ONNX,\n batch_size=0,\n nb_output=pytorch_output[0].shape[1],\n nb_instance=commands.nb_instances,\n include_token_type=include_token_ids,\n workind_directory=commands.output,\n device=commands.device,\n )\n conf.create_folders(tokenizer=tokenizer, model_path=onnx_optim_model_path)\n\n if commands.device == \"cuda\":\n from torch.cuda import get_device_name\n\n print(f\"Inference done on {get_device_name(0)}\")\n\n print(\"latencies:\")\n for name, time_buffer in timings.items():\n print_timings(name=name, timings=time_buffer)\n\n\ndef entrypoint():\n args = parse_args()\n main(commands=args)\n\n\nif __name__ == \"__main__\":\n entrypoint()\n",
"# Copyright 2021, Lefebvre Sarrut Services\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport time\nfrom collections import OrderedDict\nfrom contextlib import contextmanager\nfrom typing import Dict, List, Tuple\n\nimport numpy as np\nimport torch\n\n\ndef print_timings(name: str, timings: List[float]) -> None:\n \"\"\"\n Format and print latencies\n :param name: engine name\n :param timings: latencies measured during the inference\n \"\"\"\n mean_time = 1e3 * np.mean(timings)\n std_time = 1e3 * np.std(timings)\n min_time = 1e3 * np.min(timings)\n max_time = 1e3 * np.max(timings)\n median, percent_95_time, percent_99_time = 1e3 * np.percentile(timings, [50, 95, 99])\n print(\n f\"[{name}] \"\n f\"mean={mean_time:.2f}ms, \"\n f\"sd={std_time:.2f}ms, \"\n f\"min={min_time:.2f}ms, \"\n f\"max={max_time:.2f}ms, \"\n f\"median={median:.2f}ms, \"\n f\"95p={percent_95_time:.2f}ms, \"\n f\"99p={percent_99_time:.2f}ms\"\n )\n\n\ndef setup_logging(level: int = logging.INFO) -> None:\n \"\"\"\n Set the generic Python logger\n :param level: logger level\n \"\"\"\n logging.basicConfig(format=\"%(asctime)s %(levelname)-8s %(message)s\", datefmt=\"%m/%d/%Y %H:%M:%S\", level=level)\n\n\n@contextmanager\ndef track_infer_time(buffer: List[int]) -> None:\n \"\"\"\n A context manager to perform latency measures\n :param buffer: a List where to save latencies for each input\n \"\"\"\n start = time.perf_counter()\n yield\n end = time.perf_counter()\n buffer.append(end - start)\n\n\ndef generate_input(\n seq_len: int, batch_size: int, include_token_ids: bool, device: str = \"cuda\"\n) -> Tuple[Dict[str, torch.Tensor], Dict[str, np.ndarray]]:\n \"\"\"\n Generate dummy inputs.\n :param seq_len: number of token per input.\n :param batch_size: first dimension of the tensor\n :param include_token_ids: should we add token_type_ids\n :param device: where to store tensors (Pytorch only). One of [cpu, cuda]\n :return: a tuple of tensors, Pytorch and numpy\n \"\"\"\n assert device in [\"cpu\", \"cuda\"]\n shape = (batch_size, seq_len)\n inputs_pytorch: OrderedDict[str, torch.Tensor] = OrderedDict()\n inputs_pytorch[\"input_ids\"] = torch.randint(high=100, size=shape, dtype=torch.long, device=device)\n if include_token_ids:\n inputs_pytorch[\"token_type_ids\"] = torch.ones(size=shape, dtype=torch.long, device=device)\n inputs_pytorch[\"attention_mask\"] = torch.ones(size=shape, dtype=torch.long, device=device)\n inputs_onnx: Dict[str, np.ndarray] = {\n k: np.ascontiguousarray(v.detach().cpu().numpy()) for k, v in inputs_pytorch.items()\n }\n return inputs_pytorch, inputs_onnx\n\n\ndef generate_multiple_inputs(\n seq_len: int, batch_size: int, include_token_ids: bool, nb_inputs_to_gen: int, device: str = \"cuda\"\n):\n all_inputs_pytorch = list()\n all_inputs_onnx = list()\n for _ in range(nb_inputs_to_gen):\n inputs_pytorch, inputs_onnx = generate_input(\n seq_len=seq_len, batch_size=batch_size, include_token_ids=include_token_ids, device=device\n )\n all_inputs_pytorch.append(inputs_pytorch)\n all_inputs_onnx.append(inputs_onnx)\n return all_inputs_pytorch, all_inputs_onnx\n\n\ndef compare_outputs(pytorch_output: List[np.ndarray], engine_output: List[np.ndarray]) -> float:\n return np.mean(np.abs(np.asarray(pytorch_output) - np.asarray(engine_output)))\n"
] |
[
[
"torch.cuda.synchronize",
"numpy.random.seed",
"numpy.asarray",
"torch.manual_seed",
"torch.inference_mode",
"torch.cuda.amp.autocast",
"torch.quantization.quantize_dynamic",
"torch.set_num_threads",
"torch.cuda.is_available",
"torch.cuda.get_device_name"
],
[
"torch.randint",
"torch.ones",
"numpy.min",
"numpy.asarray",
"numpy.percentile",
"numpy.max",
"numpy.std",
"numpy.mean"
]
] |
iht/kschool-challenge-dl
|
[
"da46222b1c8bd02e5294b9f6a7570dedb7a0ea9a"
] |
[
"trainer/preprocessor.py"
] |
[
"\"\"\"A class to preprocess images.\"\"\"\n\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\n\n\nclass MyImagePreprocessor:\n \"\"\"Preprocess images for training or inference.\"\"\"\n\n def __init__(self, img_size, batch_size):\n \"\"\"Create a MyImagePreprocessor.\n\n Args:\n img_size: The size of the resulting images.\n batch_size: The size of the batch (for training purposes)\n\n \"\"\"\n self._img_size = img_size\n self._batch_size = batch_size\n self._reduction_factor = 255.0\n\n self._img_datagen = ImageDataGenerator(rescale=1 / self._reduction_factor)\n\n def generator(self, dirname, ):\n \"\"\"Create and return a generator for the preprocessed images.\n\n Args:\n dirname: The name of the directory with the images.\n \"\"\"\n self._img_generator = self._img_datagen.flow_from_directory(\n dirname,\n target_size=(self._img_size, self._img_size),\n batch_size=self._batch_size,\n class_mode='binary')\n\n return self._img_generator\n\n def preprocess_params(self):\n \"\"\"Get the ImageDataGenerator.\"\"\"\n return (self._img_size, self._reduction_factor)\n"
] |
[
[
"tensorflow.keras.preprocessing.image.ImageDataGenerator"
]
] |
alexmagsam/keras-rpn
|
[
"394ed2506003f3bed3c5247d47213163f47931c0"
] |
[
"lib/model.py"
] |
[
"import os\nimport keras.layers as KL\nimport keras.models as KM\nimport keras.optimizers as KO\nimport tensorflow as tf\nfrom keras.callbacks import CSVLogger, ModelCheckpoint\n\nfrom lib import losses as ls\n\n\nclass RPN:\n\n def __init__(self, config, mode='train'):\n assert mode in ['train', 'inference']\n self.config = config\n\n # Build the model\n self.model = self.build_entire_model(mode)\n print(self.model.summary())\n\n # Compile in training mode\n if mode == 'train':\n self.compile()\n\n @staticmethod\n def build_backbone(input_tensor, architecture, stage5=False, train_bn=None):\n \"\"\"Build a ResNet model.\n\n Arguments\n ----------\n input_tensor: Keras Input layer\n Tensor for image input\n architecture: str, \"resnet50\" or \"resnet101\"\n Architecture to use\n stage5: bool\n If False, stage5 of the network is not created\n train_bn: bool.\n Train or freeze Batch Normalization layers\n\n Returns\n -------\n list\n Backbone layers of ResNet 50 or 101\n\n \"\"\"\n\n # Code adopted from:\n # https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py\n\n def identity_block(tensor, kernel_size, filters, stage, block, use_bias=True):\n \"\"\"The identity_block is the block that has no convolution layer at shortcut\n\n Arguments\n --------\n tensor: Keras Layer\n The tensor to connect to this block.\n kernel_size: int\n The kernel size of the convolutional layer\n filters: list\n List of integers indicating how many filters to use for each convolution layer\n stage: int\n Current stage label for generating layer names\n block: str\n Current block label for generating layer names\n use_bias: bool\n To use or not use a bias in conv layers.\n\n Returns\n -------\n y: Keras Layer\n Output of the Resnet identity block\n \"\"\"\n\n nb_filter1, nb_filter2, nb_filter3 = filters\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n y = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a', use_bias=use_bias)(tensor)\n y = KL.BatchNormalization(name=bn_name_base + '2a')(y, training=train_bn)\n y = KL.Activation('relu')(y)\n\n y = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same', name=conv_name_base + '2b',\n use_bias=use_bias)(y)\n y = KL.BatchNormalization(name=bn_name_base + '2b')(y, training=train_bn)\n y = KL.Activation('relu')(y)\n\n y = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c', use_bias=use_bias)(y)\n y = KL.BatchNormalization(name=bn_name_base + '2c')(y, training=train_bn)\n\n y = KL.Add()([y, tensor])\n y = KL.Activation('relu', name='res' + str(stage) + block + '_out')(y)\n return y\n\n def conv_block(tensor, kernel_size, filters, stage, block, strides=(2, 2), use_bias=True):\n\n \"\"\"conv_block is the block that has a conv layer at shortcut\n\n Arguments\n ---------\n tensor: Keras Layer\n The tensor to connect to this block.\n kernel_size: int\n The kernel size of the convolutional layer\n filters: list\n List of integers indicating how many filters to use for each convolution layer\n stage: int\n Current stage label for generating layer names\n block: str\n Current block label for generating layer names\n strides: tuple\n A tuple of integers indicating the strides to make during convolution.\n use_bias: bool\n To use or not use a bias in conv layers.\n\n Returns\n -------\n y: Keras Layer\n Output layer of Resnet conv block\n\n \"\"\"\n nb_filter1, nb_filter2, nb_filter3 = filters\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n y = KL.Conv2D(nb_filter1, (1, 1), strides=strides, name=conv_name_base + '2a', use_bias=use_bias)(\n tensor)\n y = KL.BatchNormalization(name=bn_name_base + '2a')(y, training=train_bn)\n y = KL.Activation('relu')(y)\n\n y = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same', name=conv_name_base + '2b',\n use_bias=use_bias)(y)\n y = KL.BatchNormalization(name=bn_name_base + '2b')(y, training=train_bn)\n y = KL.Activation('relu')(y)\n\n y = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c', use_bias=use_bias)(y)\n y = KL.BatchNormalization(name=bn_name_base + '2c')(y, training=train_bn)\n\n shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides, name=conv_name_base + '1', use_bias=use_bias)(\n tensor)\n shortcut = KL.BatchNormalization(name=bn_name_base + '1')(shortcut, training=train_bn)\n\n y = KL.Add()([y, shortcut])\n y = KL.Activation('relu', name='res' + str(stage) + block + '_out')(y)\n return y\n\n assert architecture in [\"resnet50\", \"resnet101\"]\n # Stage 1\n x = KL.ZeroPadding2D((3, 3))(input_tensor)\n x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)\n x = KL.BatchNormalization(name='bn_conv1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding=\"same\")(x)\n # Stage 2\n x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')\n C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')\n # Stage 3\n x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')\n C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')\n # Stage 4\n x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')\n block_count = {\"resnet50\": 5, \"resnet101\": 22}[architecture]\n for i in range(block_count):\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i))\n C4 = x\n # Stage 5\n if stage5:\n x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')\n x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')\n C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')\n else:\n C5 = None\n return [C1, C2, C3, C4, C5]\n\n def build_feature_maps(self, input_tensor):\n\n \"\"\"Build the feature maps for the feature pyramid.\n\n Arguments\n ---------\n input_tensor: Keras Input layer [height, width, channels]\n\n Returns\n -------\n list\n Pyramid layers\n\n \"\"\"\n\n # Don't create the head (stage 5), so we pick the 4th item in the list.\n _, C2, C3, C4, C5 = self.build_backbone(input_tensor, self.config.BACKBONE, stage5=True,\n train_bn=self.config.TRAIN_BN)\n\n # Top-down Layers\n P5 = KL.Conv2D(self.config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c5p5')(C5)\n P4 = KL.Add(name=\"fpn_p4add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p5upsampled\")(P5),\n KL.Conv2D(self.config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c4p4')(C4)])\n P3 = KL.Add(name=\"fpn_p3add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p4upsampled\")(P4),\n KL.Conv2D(self.config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c3p3')(C3)])\n P2 = KL.Add(name=\"fpn_p2add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p3upsampled\")(P3),\n KL.Conv2D(self.config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c2p2')(C2)])\n\n # Attach 3x3 conv to all P layers to get the final feature maps.\n P2 = KL.Conv2D(self.config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p2\")(P2)\n P3 = KL.Conv2D(self.config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p3\")(P3)\n P4 = KL.Conv2D(self.config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p4\")(P4)\n P5 = KL.Conv2D(self.config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p5\")(P5)\n\n # P6 is used for the 5th anchor scale in RPN. Generated by sub-sampling from P5 with stride of 2.\n P6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name=\"fpn_p6\")(P5)\n\n # Note that P6 is used in RPN, but not in the classifier heads.\n return [P2, P3, P4, P5, P6]\n\n @staticmethod\n def build_rpn_model(anchor_stride, anchors_per_location, depth):\n \"\"\"Builds a Keras model of the Region Proposal Network.\n\n Arguments\n ---------\n anchor_stride: int\n Controls the density of anchors. Typically 1 (anchors for every pixel in the feature map), or 2.\n anchors_per_location: int\n Number of anchors per pixel in the feature map. Equivalent to length of anchor ratios.\n depth: int,\n Depth of the backbone feature map. Same as TOP_DOWN_PYRAMID_SIZE\n\n Returns\n -------\n Keras Model\n\n The model outputs, when called, are:\n rpn_class_logits: [batch, H * W * anchors_per_location, 2]\n Anchor classifier logits (before softmax)\n rpn_probs: [batch, H * W * anchors_per_location, 2]\n Anchor classifier probabilities.\n rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))]\n Deltas to be applied to anchors.\n\n \"\"\"\n\n input_feature_map = KL.Input(shape=[None, None, depth], name=\"input_rpn_feature_map\")\n\n # Shared convolutional base of the RPN\n shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu', strides=anchor_stride,\n name='rpn_conv_shared')(input_feature_map)\n\n # Anchor Score. [batch, height, width, anchors per location * 2].\n x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid', activation='linear',\n name='rpn_class_raw')(shared)\n\n # Reshape to [batch, anchors, 2]\n rpn_class_logits = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 2]))(x)\n\n # Softmax on last dimension of BG/FG.\n rpn_probs = KL.Activation(\"softmax\", name=\"rpn_class_xxx\")(rpn_class_logits)\n\n # Bounding box refinement. [batch, H, W, anchors per location * depth]\n # where depth is [x, y, log(w), log(h)]\n x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding=\"valid\", activation='linear', name='rpn_bbox_pred')(\n shared)\n\n # Reshape to [batch, anchors, 4]\n rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 4]))(x)\n\n outputs = [rpn_class_logits, rpn_probs, rpn_bbox]\n return KM.Model([input_feature_map], outputs, name=\"rpn_model\")\n\n def build_entire_model(self, mode='train'):\n\n assert mode in ['train', 'inference']\n\n # Input image\n input_tensor = KL.Input(shape=[self.config.IMAGE_SHAPE[0], self.config.IMAGE_SHAPE[1],\n self.config.NUM_CHANNELS], name=\"input_image\")\n\n # RPN feature maps\n rpn_feature_maps = self.build_feature_maps(input_tensor)\n\n # RPN Network\n rpn = self.build_rpn_model(self.config.ANCHOR_STRIDE, len(self.config.ANCHOR_RATIOS),\n self.config.TOP_DOWN_PYRAMID_SIZE)\n\n # Restructures [[a1, b1, c1], [a2, b2, c2]] -> [[a1, a2], [b1, b2], [c1, c2]]\n layer_outputs = []\n for layer in rpn_feature_maps:\n layer_outputs.append(rpn([layer]))\n output_names = [\"rpn_class_logits\", \"rpn_class\", \"rpn_bbox\"]\n rpn_outputs = list(zip(*layer_outputs))\n rpn_outputs = [KL.Concatenate(axis=1, name=n)(list(o)) for o, n in zip(rpn_outputs, output_names)]\n\n # Outputs of RPN\n rpn_class_logits, rpn_class, rpn_bbox = rpn_outputs\n\n # Loss functions\n # GT inputs to RPN\n input_rpn_match = KL.Input(shape=[None, 1], name=\"input_rpn_match\", dtype=tf.int32)\n input_rpn_bbox = KL.Input(shape=[None, 4], name=\"input_rpn_bbox\", dtype=tf.float32)\n rpn_class_loss = KL.Lambda(lambda x: ls.rpn_match_loss(*x), name=\"rpn_class_loss\")(\n [input_rpn_match, rpn_class_logits])\n rpn_bbox_loss = KL.Lambda(lambda x: ls.rpn_bbox_loss(self.config, *x), name=\"rpn_bbox_loss\")(\n [input_rpn_match, input_rpn_bbox, rpn_bbox])\n\n # Inputs and outputs of the model\n if mode == 'train':\n inputs = [input_tensor, input_rpn_match, input_rpn_bbox]\n outputs = [rpn_class_logits, rpn_class, rpn_bbox, rpn_class_loss, rpn_bbox_loss]\n elif mode == 'inference':\n inputs = [input_tensor]\n outputs = [rpn_class, rpn_bbox]\n\n # Set the model attribute\n return KM.Model(inputs, outputs, name='rpn')\n\n def compile(self):\n\n # Create the optimizer\n optimizer = KO.SGD(lr=self.config.LEARNING_RATE, momentum=self.config.LEARNING_MOMENTUM,\n clipnorm=self.config.GRADIENT_CLIP_NORM)\n\n # Add Losses\n self.model._losses = []\n self.model._per_input_losses = {}\n loss_names = [\"rpn_class_loss\", \"rpn_bbox_loss\"]\n for name in loss_names:\n layer = self.model.get_layer(name)\n if layer.output in self.model.losses:\n continue\n loss = (tf.reduce_mean(layer.output, keepdims=True) * self.config.LOSS_WEIGHTS.get(name, 1.))\n self.model.add_loss(loss)\n\n self.model.compile(optimizer=optimizer, loss=[None] * len(self.model.outputs))\n\n # Add metrics for losses\n for name in loss_names:\n if name in self.model.metrics_names:\n continue\n layer = self.model.get_layer(name)\n self.model.metrics_names.append(name)\n loss = (tf.reduce_mean(layer.output, keepdims=True) * self.config.LOSS_WEIGHTS.get(name, 1.))\n self.model.metrics_tensors.append(loss)\n\n def train(self, dataset):\n\n \"\"\"Train the region proposal network using training and validation data\n\n Arguments\n ---------\n dataset: dict\n Dictionary with 'train' and 'validation' keys that hold custom instances of a DataSequence in data_utils.py\n that is dataset dependent.\n\n \"\"\"\n\n # Create the training directories\n self.config.create_training_directory()\n\n # Create a callback for saving weights\n filename = \"rpn_weights.{epoch:02d}.hdf5\"\n callbacks = [ModelCheckpoint(os.path.join(self.config.CNN_WEIGHTS_PATH, filename), save_weights_only=True)]\n\n # Create a callback for logging training information\n callbacks.append(CSVLogger(os.path.join(self.config.LOGS, self.config.NAME,\n self.config.TIME_STAMP, 'training.csv')))\n\n # Train the model\n self.model.fit_generator(dataset[\"train\"], len(dataset[\"train\"]), epochs=self.config.EPOCHS, callbacks=callbacks,\n validation_data=dataset[\"validation\"], validation_steps=len(dataset[\"validation\"]))\n\n"
] |
[
[
"tensorflow.shape",
"tensorflow.reduce_mean"
]
] |
finnhacks42/pandas
|
[
"64de074403c6e9a574e61aa0500d32ae48d2d4c6"
] |
[
"pandas/io/parsers.py"
] |
[
"\"\"\"\nModule contains tools for processing files into DataFrames or other objects\n\"\"\"\nfrom __future__ import print_function\nfrom collections import defaultdict\nimport re\nimport csv\nimport sys\nimport warnings\nimport datetime\nfrom textwrap import fill\n\nimport numpy as np\n\nfrom pandas import compat\nfrom pandas.compat import (range, lrange, PY3, StringIO, lzip,\n zip, string_types, map, u)\nfrom pandas.core.dtypes.common import (\n is_integer, ensure_object,\n is_list_like, is_integer_dtype,\n is_float, is_dtype_equal,\n is_object_dtype, is_string_dtype,\n is_scalar, is_categorical_dtype)\nfrom pandas.core.dtypes.dtypes import CategoricalDtype\nfrom pandas.core.dtypes.missing import isna\nfrom pandas.core.dtypes.cast import astype_nansafe\nfrom pandas.core.index import (Index, MultiIndex, RangeIndex,\n ensure_index_from_sequences)\nfrom pandas.core.series import Series\nfrom pandas.core.frame import DataFrame\nfrom pandas.core.arrays import Categorical\nfrom pandas.core import algorithms\nimport pandas.core.common as com\nfrom pandas.io.date_converters import generic_parser\nfrom pandas.errors import ParserWarning, ParserError, EmptyDataError\nfrom pandas.io.common import (get_filepath_or_buffer, is_file_like,\n _validate_header_arg, _get_handle,\n UnicodeReader, UTF8Recoder, _NA_VALUES,\n BaseIterator, _infer_compression)\nfrom pandas.core.tools import datetimes as tools\n\nfrom pandas.util._decorators import Appender\n\nimport pandas._libs.lib as lib\nimport pandas._libs.parsers as parsers\nimport pandas._libs.ops as libops\nfrom pandas._libs.tslibs import parsing\n\n# BOM character (byte order mark)\n# This exists at the beginning of a file to indicate endianness\n# of a file (stream). Unfortunately, this marker screws up parsing,\n# so we need to remove it if we see it.\n_BOM = u('\\ufeff')\n\n_parser_params = r\"\"\"Also supports optionally iterating or breaking of the file\ninto chunks.\n\nAdditional help can be found in the `online docs for IO Tools\n<http://pandas.pydata.org/pandas-docs/stable/io.html>`_.\n\nParameters\n----------\nfilepath_or_buffer : str, path object, or file-like object\n Any valid string path is acceptable. The string could be a URL. Valid\n URL schemes include http, ftp, s3, and file. For file URLs, a host is\n expected. A local file could be: file://localhost/path/to/table.csv.\n\n If you want to pass in a path object, pandas accepts either\n ``pathlib.Path`` or ``py._path.local.LocalPath``.\n\n By file-like object, we refer to objects with a ``read()`` method, such as\n a file handler (e.g. via builtin ``open`` function) or ``StringIO``.\n%s\ndelim_whitespace : boolean, default False\n Specifies whether or not whitespace (e.g. ``' '`` or ``'\\t'``) will be\n used as the sep. Equivalent to setting ``sep='\\s+'``. If this option\n is set to True, nothing should be passed in for the ``delimiter``\n parameter.\n\n .. versionadded:: 0.18.1 support for the Python parser.\n\nheader : int or list of ints, default 'infer'\n Row number(s) to use as the column names, and the start of the\n data. Default behavior is to infer the column names: if no names\n are passed the behavior is identical to ``header=0`` and column\n names are inferred from the first line of the file, if column\n names are passed explicitly then the behavior is identical to\n ``header=None``. Explicitly pass ``header=0`` to be able to\n replace existing names. The header can be a list of integers that\n specify row locations for a multi-index on the columns\n e.g. [0,1,3]. Intervening rows that are not specified will be\n skipped (e.g. 2 in this example is skipped). Note that this\n parameter ignores commented lines and empty lines if\n ``skip_blank_lines=True``, so header=0 denotes the first line of\n data rather than the first line of the file.\nnames : array-like, default None\n List of column names to use. If file contains no header row, then you\n should explicitly pass header=None. Duplicates in this list will cause\n a ``UserWarning`` to be issued.\nindex_col : int or sequence or False, default None\n Column to use as the row labels of the DataFrame. If a sequence is given, a\n MultiIndex is used. If you have a malformed file with delimiters at the end\n of each line, you might consider index_col=False to force pandas to _not_\n use the first column as the index (row names)\nusecols : list-like or callable, default None\n Return a subset of the columns. If list-like, all elements must either\n be positional (i.e. integer indices into the document columns) or strings\n that correspond to column names provided either by the user in `names` or\n inferred from the document header row(s). For example, a valid list-like\n `usecols` parameter would be [0, 1, 2] or ['foo', 'bar', 'baz']. Element\n order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``.\n To instantiate a DataFrame from ``data`` with element order preserved use\n ``pd.read_csv(data, usecols=['foo', 'bar'])[['foo', 'bar']]`` for columns\n in ``['foo', 'bar']`` order or\n ``pd.read_csv(data, usecols=['foo', 'bar'])[['bar', 'foo']]``\n for ``['bar', 'foo']`` order.\n\n If callable, the callable function will be evaluated against the column\n names, returning names where the callable function evaluates to True. An\n example of a valid callable argument would be ``lambda x: x.upper() in\n ['AAA', 'BBB', 'DDD']``. Using this parameter results in much faster\n parsing time and lower memory usage.\nsqueeze : boolean, default False\n If the parsed data only contains one column then return a Series\nprefix : str, default None\n Prefix to add to column numbers when no header, e.g. 'X' for X0, X1, ...\nmangle_dupe_cols : boolean, default True\n Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than\n 'X'...'X'. Passing in False will cause data to be overwritten if there\n are duplicate names in the columns.\ndtype : Type name or dict of column -> type, default None\n Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32}\n Use `str` or `object` together with suitable `na_values` settings\n to preserve and not interpret dtype.\n If converters are specified, they will be applied INSTEAD\n of dtype conversion.\n%s\nconverters : dict, default None\n Dict of functions for converting values in certain columns. Keys can either\n be integers or column labels\ntrue_values : list, default None\n Values to consider as True\nfalse_values : list, default None\n Values to consider as False\nskipinitialspace : boolean, default False\n Skip spaces after delimiter.\nskiprows : list-like or integer or callable, default None\n Line numbers to skip (0-indexed) or number of lines to skip (int)\n at the start of the file.\n\n If callable, the callable function will be evaluated against the row\n indices, returning True if the row should be skipped and False otherwise.\n An example of a valid callable argument would be ``lambda x: x in [0, 2]``.\nskipfooter : int, default 0\n Number of lines at bottom of file to skip (Unsupported with engine='c')\nnrows : int, default None\n Number of rows of file to read. Useful for reading pieces of large files\nna_values : scalar, str, list-like, or dict, default None\n Additional strings to recognize as NA/NaN. If dict passed, specific\n per-column NA values. By default the following values are interpreted as\n NaN: '\"\"\" + fill(\"', '\".join(sorted(_NA_VALUES)),\n 70, subsequent_indent=\" \") + \"\"\"'.\nkeep_default_na : bool, default True\n Whether or not to include the default NaN values when parsing the data.\n Depending on whether `na_values` is passed in, the behavior is as follows:\n\n * If `keep_default_na` is True, and `na_values` are specified, `na_values`\n is appended to the default NaN values used for parsing.\n * If `keep_default_na` is True, and `na_values` are not specified, only\n the default NaN values are used for parsing.\n * If `keep_default_na` is False, and `na_values` are specified, only\n the NaN values specified `na_values` are used for parsing.\n * If `keep_default_na` is False, and `na_values` are not specified, no\n strings will be parsed as NaN.\n\n Note that if `na_filter` is passed in as False, the `keep_default_na` and\n `na_values` parameters will be ignored.\nna_filter : boolean, default True\n Detect missing value markers (empty strings and the value of na_values). In\n data without any NAs, passing na_filter=False can improve the performance\n of reading a large file\nverbose : boolean, default False\n Indicate number of NA values placed in non-numeric columns\nskip_blank_lines : boolean, default True\n If True, skip over blank lines rather than interpreting as NaN values\nparse_dates : boolean or list of ints or names or list of lists or dict, \\\ndefault False\n\n * boolean. If True -> try parsing the index.\n * list of ints or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3\n each as a separate date column.\n * list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as\n a single date column.\n * dict, e.g. {'foo' : [1, 3]} -> parse columns 1, 3 as date and call result\n 'foo'\n\n If a column or index contains an unparseable date, the entire column or\n index will be returned unaltered as an object data type. For non-standard\n datetime parsing, use ``pd.to_datetime`` after ``pd.read_csv``\n\n Note: A fast-path exists for iso8601-formatted dates.\ninfer_datetime_format : boolean, default False\n If True and `parse_dates` is enabled, pandas will attempt to infer the\n format of the datetime strings in the columns, and if it can be inferred,\n switch to a faster method of parsing them. In some cases this can increase\n the parsing speed by 5-10x.\nkeep_date_col : boolean, default False\n If True and `parse_dates` specifies combining multiple columns then\n keep the original columns.\ndate_parser : function, default None\n Function to use for converting a sequence of string columns to an array of\n datetime instances. The default uses ``dateutil.parser.parser`` to do the\n conversion. Pandas will try to call `date_parser` in three different ways,\n advancing to the next if an exception occurs: 1) Pass one or more arrays\n (as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the\n string values from the columns defined by `parse_dates` into a single array\n and pass that; and 3) call `date_parser` once for each row using one or\n more strings (corresponding to the columns defined by `parse_dates`) as\n arguments.\ndayfirst : boolean, default False\n DD/MM format dates, international and European format\niterator : boolean, default False\n Return TextFileReader object for iteration or getting chunks with\n ``get_chunk()``.\nchunksize : int, default None\n Return TextFileReader object for iteration.\n See the `IO Tools docs\n <http://pandas.pydata.org/pandas-docs/stable/io.html#io-chunking>`_\n for more information on ``iterator`` and ``chunksize``.\ncompression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer'\n For on-the-fly decompression of on-disk data. If 'infer' and\n `filepath_or_buffer` is path-like, then detect compression from the\n following extensions: '.gz', '.bz2', '.zip', or '.xz' (otherwise no\n decompression). If using 'zip', the ZIP file must contain only one data\n file to be read in. Set to None for no decompression.\n\n .. versionadded:: 0.18.1 support for 'zip' and 'xz' compression.\n\nthousands : str, default None\n Thousands separator\ndecimal : str, default '.'\n Character to recognize as decimal point (e.g. use ',' for European data).\nfloat_precision : string, default None\n Specifies which converter the C engine should use for floating-point\n values. The options are `None` for the ordinary converter,\n `high` for the high-precision converter, and `round_trip` for the\n round-trip converter.\nlineterminator : str (length 1), default None\n Character to break file into lines. Only valid with C parser.\nquotechar : str (length 1), optional\n The character used to denote the start and end of a quoted item. Quoted\n items can include the delimiter and it will be ignored.\nquoting : int or csv.QUOTE_* instance, default 0\n Control field quoting behavior per ``csv.QUOTE_*`` constants. Use one of\n QUOTE_MINIMAL (0), QUOTE_ALL (1), QUOTE_NONNUMERIC (2) or QUOTE_NONE (3).\ndoublequote : boolean, default ``True``\n When quotechar is specified and quoting is not ``QUOTE_NONE``, indicate\n whether or not to interpret two consecutive quotechar elements INSIDE a\n field as a single ``quotechar`` element.\nescapechar : str (length 1), default None\n One-character string used to escape delimiter when quoting is QUOTE_NONE.\ncomment : str, default None\n Indicates remainder of line should not be parsed. If found at the beginning\n of a line, the line will be ignored altogether. This parameter must be a\n single character. Like empty lines (as long as ``skip_blank_lines=True``),\n fully commented lines are ignored by the parameter `header` but not by\n `skiprows`. For example, if ``comment='#'``, parsing\n ``#empty\\\\na,b,c\\\\n1,2,3`` with ``header=0`` will result in 'a,b,c' being\n treated as the header.\nencoding : str, default None\n Encoding to use for UTF when reading/writing (ex. 'utf-8'). `List of Python\n standard encodings\n <https://docs.python.org/3/library/codecs.html#standard-encodings>`_\ndialect : str or csv.Dialect instance, default None\n If provided, this parameter will override values (default or not) for the\n following parameters: `delimiter`, `doublequote`, `escapechar`,\n `skipinitialspace`, `quotechar`, and `quoting`. If it is necessary to\n override values, a ParserWarning will be issued. See csv.Dialect\n documentation for more details.\ntupleize_cols : boolean, default False\n .. deprecated:: 0.21.0\n This argument will be removed and will always convert to MultiIndex\n\n Leave a list of tuples on columns as is (default is to convert to\n a MultiIndex on the columns)\nerror_bad_lines : boolean, default True\n Lines with too many fields (e.g. a csv line with too many commas) will by\n default cause an exception to be raised, and no DataFrame will be returned.\n If False, then these \"bad lines\" will dropped from the DataFrame that is\n returned.\nwarn_bad_lines : boolean, default True\n If error_bad_lines is False, and warn_bad_lines is True, a warning for each\n \"bad line\" will be output.\nlow_memory : boolean, default True\n Internally process the file in chunks, resulting in lower memory use\n while parsing, but possibly mixed type inference. To ensure no mixed\n types either set False, or specify the type with the `dtype` parameter.\n Note that the entire file is read into a single DataFrame regardless,\n use the `chunksize` or `iterator` parameter to return the data in chunks.\n (Only valid with C parser)\nmemory_map : boolean, default False\n If a filepath is provided for `filepath_or_buffer`, map the file object\n directly onto memory and access the data directly from there. Using this\n option can improve performance because there is no longer any I/O overhead.\n\nReturns\n-------\nresult : DataFrame or TextParser\n\"\"\"\n\n# engine is not used in read_fwf() so is factored out of the shared docstring\n_engine_doc = \"\"\"engine : {'c', 'python'}, optional\n Parser engine to use. The C engine is faster while the python engine is\n currently more feature-complete.\"\"\"\n\n_sep_doc = r\"\"\"sep : str, default {default}\n Delimiter to use. If sep is None, the C engine cannot automatically detect\n the separator, but the Python parsing engine can, meaning the latter will\n be used and automatically detect the separator by Python's builtin sniffer\n tool, ``csv.Sniffer``. In addition, separators longer than 1 character and\n different from ``'\\s+'`` will be interpreted as regular expressions and\n will also force the use of the Python parsing engine. Note that regex\n delimiters are prone to ignoring quoted data. Regex example: ``'\\r\\t'``\ndelimiter : str, default ``None``\n Alternative argument name for sep.\"\"\"\n\n_read_csv_doc = \"\"\"\nRead CSV (comma-separated) file into DataFrame\n\n%s\n\"\"\" % (_parser_params % (_sep_doc.format(default=\"','\"), _engine_doc))\n\n_read_table_doc = \"\"\"\n\n.. deprecated:: 0.24.0\n Use :func:`pandas.read_csv` instead, passing ``sep='\\t'`` if necessary.\n\nRead general delimited file into DataFrame\n\n%s\n\"\"\" % (_parser_params % (_sep_doc.format(default=\"\\\\t (tab-stop)\"),\n _engine_doc))\n\n_fwf_widths = \"\"\"\\\ncolspecs : list of pairs (int, int) or 'infer'. optional\n A list of pairs (tuples) giving the extents of the fixed-width\n fields of each line as half-open intervals (i.e., [from, to[ ).\n String value 'infer' can be used to instruct the parser to try\n detecting the column specifications from the first 100 rows of\n the data which are not being skipped via skiprows (default='infer').\nwidths : list of ints. optional\n A list of field widths which can be used instead of 'colspecs' if\n the intervals are contiguous.\ndelimiter : str, default ``'\\t' + ' '``\n Characters to consider as filler characters in the fixed-width file.\n Can be used to specify the filler character of the fields\n if it is not spaces (e.g., '~').\n\"\"\"\n\n_read_fwf_doc = \"\"\"\nRead a table of fixed-width formatted lines into DataFrame\n\n%s\n\"\"\" % (_parser_params % (_fwf_widths, ''))\n\n\ndef _validate_integer(name, val, min_val=0):\n \"\"\"\n Checks whether the 'name' parameter for parsing is either\n an integer OR float that can SAFELY be cast to an integer\n without losing accuracy. Raises a ValueError if that is\n not the case.\n\n Parameters\n ----------\n name : string\n Parameter name (used for error reporting)\n val : int or float\n The value to check\n min_val : int\n Minimum allowed value (val < min_val will result in a ValueError)\n \"\"\"\n msg = \"'{name:s}' must be an integer >={min_val:d}\".format(name=name,\n min_val=min_val)\n\n if val is not None:\n if is_float(val):\n if int(val) != val:\n raise ValueError(msg)\n val = int(val)\n elif not (is_integer(val) and val >= min_val):\n raise ValueError(msg)\n\n return val\n\n\ndef _validate_names(names):\n \"\"\"\n Check if the `names` parameter contains duplicates.\n\n If duplicates are found, we issue a warning before returning.\n\n Parameters\n ----------\n names : array-like or None\n An array containing a list of the names used for the output DataFrame.\n\n Returns\n -------\n names : array-like or None\n The original `names` parameter.\n \"\"\"\n\n if names is not None:\n if len(names) != len(set(names)):\n msg = (\"Duplicate names specified. This \"\n \"will raise an error in the future.\")\n warnings.warn(msg, UserWarning, stacklevel=3)\n\n return names\n\n\ndef _read(filepath_or_buffer, kwds):\n \"\"\"Generic reader of line files.\"\"\"\n encoding = kwds.get('encoding', None)\n if encoding is not None:\n encoding = re.sub('_', '-', encoding).lower()\n kwds['encoding'] = encoding\n\n compression = kwds.get('compression')\n compression = _infer_compression(filepath_or_buffer, compression)\n filepath_or_buffer, _, compression, should_close = get_filepath_or_buffer(\n filepath_or_buffer, encoding, compression)\n kwds['compression'] = compression\n\n if kwds.get('date_parser', None) is not None:\n if isinstance(kwds['parse_dates'], bool):\n kwds['parse_dates'] = True\n\n # Extract some of the arguments (pass chunksize on).\n iterator = kwds.get('iterator', False)\n chunksize = _validate_integer('chunksize', kwds.get('chunksize', None), 1)\n nrows = kwds.get('nrows', None)\n\n # Check for duplicates in names.\n _validate_names(kwds.get(\"names\", None))\n\n # Create the parser.\n parser = TextFileReader(filepath_or_buffer, **kwds)\n\n if chunksize or iterator:\n return parser\n\n try:\n data = parser.read(nrows)\n finally:\n parser.close()\n\n if should_close:\n try:\n filepath_or_buffer.close()\n except: # noqa: flake8\n pass\n\n return data\n\n\n_parser_defaults = {\n 'delimiter': None,\n\n 'doublequote': True,\n 'escapechar': None,\n 'quotechar': '\"',\n 'quoting': csv.QUOTE_MINIMAL,\n 'skipinitialspace': False,\n 'lineterminator': None,\n\n 'header': 'infer',\n 'index_col': None,\n 'names': None,\n 'prefix': None,\n 'skiprows': None,\n 'na_values': None,\n 'true_values': None,\n 'false_values': None,\n 'converters': None,\n 'dtype': None,\n 'skipfooter': 0,\n\n 'keep_default_na': True,\n 'thousands': None,\n 'comment': None,\n 'decimal': b'.',\n\n # 'engine': 'c',\n 'parse_dates': False,\n 'keep_date_col': False,\n 'dayfirst': False,\n 'date_parser': None,\n\n 'usecols': None,\n\n 'nrows': None,\n # 'iterator': False,\n 'chunksize': None,\n 'verbose': False,\n 'encoding': None,\n 'squeeze': False,\n 'compression': None,\n 'mangle_dupe_cols': True,\n 'tupleize_cols': False,\n 'infer_datetime_format': False,\n 'skip_blank_lines': True\n}\n\n\n_c_parser_defaults = {\n 'delim_whitespace': False,\n 'na_filter': True,\n 'low_memory': True,\n 'memory_map': False,\n 'error_bad_lines': True,\n 'warn_bad_lines': True,\n 'tupleize_cols': False,\n 'float_precision': None\n}\n\n_fwf_defaults = {\n 'colspecs': 'infer',\n 'widths': None,\n}\n\n_c_unsupported = {'skipfooter'}\n_python_unsupported = {\n 'low_memory',\n 'float_precision',\n}\n\n_deprecated_defaults = {\n 'tupleize_cols': None\n}\n_deprecated_args = {\n 'tupleize_cols',\n}\n\n\ndef _make_parser_function(name, default_sep=','):\n\n # prepare read_table deprecation\n if name == \"read_table\":\n sep = False\n else:\n sep = default_sep\n\n def parser_f(filepath_or_buffer,\n sep=sep,\n delimiter=None,\n\n # Column and Index Locations and Names\n header='infer',\n names=None,\n index_col=None,\n usecols=None,\n squeeze=False,\n prefix=None,\n mangle_dupe_cols=True,\n\n # General Parsing Configuration\n dtype=None,\n engine=None,\n converters=None,\n true_values=None,\n false_values=None,\n skipinitialspace=False,\n skiprows=None,\n nrows=None,\n\n # NA and Missing Data Handling\n na_values=None,\n keep_default_na=True,\n na_filter=True,\n verbose=False,\n skip_blank_lines=True,\n\n # Datetime Handling\n parse_dates=False,\n infer_datetime_format=False,\n keep_date_col=False,\n date_parser=None,\n dayfirst=False,\n\n # Iteration\n iterator=False,\n chunksize=None,\n\n # Quoting, Compression, and File Format\n compression='infer',\n thousands=None,\n decimal=b'.',\n lineterminator=None,\n quotechar='\"',\n quoting=csv.QUOTE_MINIMAL,\n escapechar=None,\n comment=None,\n encoding=None,\n dialect=None,\n tupleize_cols=None,\n\n # Error Handling\n error_bad_lines=True,\n warn_bad_lines=True,\n\n skipfooter=0,\n\n # Internal\n doublequote=True,\n delim_whitespace=False,\n low_memory=_c_parser_defaults['low_memory'],\n memory_map=False,\n float_precision=None):\n\n # deprecate read_table GH21948\n if name == \"read_table\":\n if sep is False and delimiter is None:\n warnings.warn(\"read_table is deprecated, use read_csv \"\n \"instead, passing sep='\\\\t'.\",\n FutureWarning, stacklevel=2)\n else:\n warnings.warn(\"read_table is deprecated, use read_csv \"\n \"instead.\",\n FutureWarning, stacklevel=2)\n if sep is False:\n sep = default_sep\n\n # Alias sep -> delimiter.\n if delimiter is None:\n delimiter = sep\n\n if delim_whitespace and delimiter != default_sep:\n raise ValueError(\"Specified a delimiter with both sep and\"\n \" delim_whitespace=True; you can only\"\n \" specify one.\")\n\n if engine is not None:\n engine_specified = True\n else:\n engine = 'c'\n engine_specified = False\n\n kwds = dict(delimiter=delimiter,\n engine=engine,\n dialect=dialect,\n compression=compression,\n engine_specified=engine_specified,\n\n doublequote=doublequote,\n escapechar=escapechar,\n quotechar=quotechar,\n quoting=quoting,\n skipinitialspace=skipinitialspace,\n lineterminator=lineterminator,\n\n header=header,\n index_col=index_col,\n names=names,\n prefix=prefix,\n skiprows=skiprows,\n na_values=na_values,\n true_values=true_values,\n false_values=false_values,\n keep_default_na=keep_default_na,\n thousands=thousands,\n comment=comment,\n decimal=decimal,\n\n parse_dates=parse_dates,\n keep_date_col=keep_date_col,\n dayfirst=dayfirst,\n date_parser=date_parser,\n\n nrows=nrows,\n iterator=iterator,\n chunksize=chunksize,\n skipfooter=skipfooter,\n converters=converters,\n dtype=dtype,\n usecols=usecols,\n verbose=verbose,\n encoding=encoding,\n squeeze=squeeze,\n memory_map=memory_map,\n float_precision=float_precision,\n\n na_filter=na_filter,\n delim_whitespace=delim_whitespace,\n warn_bad_lines=warn_bad_lines,\n error_bad_lines=error_bad_lines,\n low_memory=low_memory,\n mangle_dupe_cols=mangle_dupe_cols,\n tupleize_cols=tupleize_cols,\n infer_datetime_format=infer_datetime_format,\n skip_blank_lines=skip_blank_lines)\n\n return _read(filepath_or_buffer, kwds)\n\n parser_f.__name__ = name\n\n return parser_f\n\n\nread_csv = _make_parser_function('read_csv', default_sep=',')\nread_csv = Appender(_read_csv_doc)(read_csv)\n\nread_table = _make_parser_function('read_table', default_sep='\\t')\nread_table = Appender(_read_table_doc)(read_table)\n\n\n@Appender(_read_fwf_doc)\ndef read_fwf(filepath_or_buffer, colspecs='infer', widths=None, **kwds):\n # Check input arguments.\n if colspecs is None and widths is None:\n raise ValueError(\"Must specify either colspecs or widths\")\n elif colspecs not in (None, 'infer') and widths is not None:\n raise ValueError(\"You must specify only one of 'widths' and \"\n \"'colspecs'\")\n\n # Compute 'colspecs' from 'widths', if specified.\n if widths is not None:\n colspecs, col = [], 0\n for w in widths:\n colspecs.append((col, col + w))\n col += w\n\n kwds['colspecs'] = colspecs\n kwds['engine'] = 'python-fwf'\n return _read(filepath_or_buffer, kwds)\n\n\nclass TextFileReader(BaseIterator):\n \"\"\"\n\n Passed dialect overrides any of the related parser options\n\n \"\"\"\n\n def __init__(self, f, engine=None, **kwds):\n\n self.f = f\n\n if engine is not None:\n engine_specified = True\n else:\n engine = 'python'\n engine_specified = False\n\n self._engine_specified = kwds.get('engine_specified', engine_specified)\n\n if kwds.get('dialect') is not None:\n dialect = kwds['dialect']\n if dialect in csv.list_dialects():\n dialect = csv.get_dialect(dialect)\n\n # Any valid dialect should have these attributes.\n # If any are missing, we will raise automatically.\n for param in ('delimiter', 'doublequote', 'escapechar',\n 'skipinitialspace', 'quotechar', 'quoting'):\n try:\n dialect_val = getattr(dialect, param)\n except AttributeError:\n raise ValueError(\"Invalid dialect '{dialect}' provided\"\n .format(dialect=kwds['dialect']))\n provided = kwds.get(param, _parser_defaults[param])\n\n # Messages for conflicting values between the dialect instance\n # and the actual parameters provided.\n conflict_msgs = []\n\n if dialect_val != provided:\n conflict_msgs.append((\n \"Conflicting values for '{param}': '{val}' was \"\n \"provided, but the dialect specifies '{diaval}'. \"\n \"Using the dialect-specified value.\".format(\n param=param, val=provided, diaval=dialect_val)))\n\n if conflict_msgs:\n warnings.warn('\\n\\n'.join(conflict_msgs), ParserWarning,\n stacklevel=2)\n kwds[param] = dialect_val\n\n if kwds.get('header', 'infer') == 'infer':\n kwds['header'] = 0 if kwds.get('names') is None else None\n\n self.orig_options = kwds\n\n # miscellanea\n self.engine = engine\n self._engine = None\n self._currow = 0\n\n options = self._get_options_with_defaults(engine)\n\n self.chunksize = options.pop('chunksize', None)\n self.nrows = options.pop('nrows', None)\n self.squeeze = options.pop('squeeze', False)\n\n # might mutate self.engine\n self.engine = self._check_file_or_buffer(f, engine)\n self.options, self.engine = self._clean_options(options, engine)\n\n if 'has_index_names' in kwds:\n self.options['has_index_names'] = kwds['has_index_names']\n\n self._make_engine(self.engine)\n\n def close(self):\n self._engine.close()\n\n def _get_options_with_defaults(self, engine):\n kwds = self.orig_options\n\n options = {}\n\n for argname, default in compat.iteritems(_parser_defaults):\n value = kwds.get(argname, default)\n\n # see gh-12935\n if argname == 'mangle_dupe_cols' and not value:\n raise ValueError('Setting mangle_dupe_cols=False is '\n 'not supported yet')\n else:\n options[argname] = value\n\n for argname, default in compat.iteritems(_c_parser_defaults):\n if argname in kwds:\n value = kwds[argname]\n\n if engine != 'c' and value != default:\n if ('python' in engine and\n argname not in _python_unsupported):\n pass\n elif value == _deprecated_defaults.get(argname, default):\n pass\n else:\n raise ValueError(\n 'The %r option is not supported with the'\n ' %r engine' % (argname, engine))\n else:\n value = _deprecated_defaults.get(argname, default)\n options[argname] = value\n\n if engine == 'python-fwf':\n for argname, default in compat.iteritems(_fwf_defaults):\n options[argname] = kwds.get(argname, default)\n\n return options\n\n def _check_file_or_buffer(self, f, engine):\n # see gh-16530\n if is_file_like(f):\n next_attr = \"__next__\" if PY3 else \"next\"\n\n # The C engine doesn't need the file-like to have the \"next\" or\n # \"__next__\" attribute. However, the Python engine explicitly calls\n # \"next(...)\" when iterating through such an object, meaning it\n # needs to have that attribute (\"next\" for Python 2.x, \"__next__\"\n # for Python 3.x)\n if engine != \"c\" and not hasattr(f, next_attr):\n msg = (\"The 'python' engine cannot iterate \"\n \"through this file buffer.\")\n raise ValueError(msg)\n\n return engine\n\n def _clean_options(self, options, engine):\n result = options.copy()\n\n engine_specified = self._engine_specified\n fallback_reason = None\n\n sep = options['delimiter']\n delim_whitespace = options['delim_whitespace']\n\n # C engine not supported yet\n if engine == 'c':\n if options['skipfooter'] > 0:\n fallback_reason = \"the 'c' engine does not support\"\\\n \" skipfooter\"\n engine = 'python'\n\n encoding = sys.getfilesystemencoding() or 'utf-8'\n if sep is None and not delim_whitespace:\n if engine == 'c':\n fallback_reason = \"the 'c' engine does not support\"\\\n \" sep=None with delim_whitespace=False\"\n engine = 'python'\n elif sep is not None and len(sep) > 1:\n if engine == 'c' and sep == r'\\s+':\n result['delim_whitespace'] = True\n del result['delimiter']\n elif engine not in ('python', 'python-fwf'):\n # wait until regex engine integrated\n fallback_reason = \"the 'c' engine does not support\"\\\n \" regex separators (separators > 1 char and\"\\\n r\" different from '\\s+' are\"\\\n \" interpreted as regex)\"\n engine = 'python'\n elif delim_whitespace:\n if 'python' in engine:\n result['delimiter'] = r'\\s+'\n elif sep is not None:\n encodeable = True\n try:\n if len(sep.encode(encoding)) > 1:\n encodeable = False\n except UnicodeDecodeError:\n encodeable = False\n if not encodeable and engine not in ('python', 'python-fwf'):\n fallback_reason = \"the separator encoded in {encoding}\" \\\n \" is > 1 char long, and the 'c' engine\" \\\n \" does not support such separators\".format(\n encoding=encoding)\n engine = 'python'\n\n quotechar = options['quotechar']\n if (quotechar is not None and\n isinstance(quotechar, (str, compat.text_type, bytes))):\n if (len(quotechar) == 1 and ord(quotechar) > 127 and\n engine not in ('python', 'python-fwf')):\n fallback_reason = (\"ord(quotechar) > 127, meaning the \"\n \"quotechar is larger than one byte, \"\n \"and the 'c' engine does not support \"\n \"such quotechars\")\n engine = 'python'\n\n if fallback_reason and engine_specified:\n raise ValueError(fallback_reason)\n\n if engine == 'c':\n for arg in _c_unsupported:\n del result[arg]\n\n if 'python' in engine:\n for arg in _python_unsupported:\n if fallback_reason and result[arg] != _c_parser_defaults[arg]:\n msg = (\"Falling back to the 'python' engine because\"\n \" {reason}, but this causes {option!r} to be\"\n \" ignored as it is not supported by the 'python'\"\n \" engine.\").format(reason=fallback_reason,\n option=arg)\n raise ValueError(msg)\n del result[arg]\n\n if fallback_reason:\n warnings.warn((\"Falling back to the 'python' engine because\"\n \" {0}; you can avoid this warning by specifying\"\n \" engine='python'.\").format(fallback_reason),\n ParserWarning, stacklevel=5)\n\n index_col = options['index_col']\n names = options['names']\n converters = options['converters']\n na_values = options['na_values']\n skiprows = options['skiprows']\n\n _validate_header_arg(options['header'])\n\n depr_warning = ''\n\n for arg in _deprecated_args:\n parser_default = _c_parser_defaults[arg]\n depr_default = _deprecated_defaults[arg]\n\n msg = (\"The '{arg}' argument has been deprecated \"\n \"and will be removed in a future version.\"\n .format(arg=arg))\n\n if arg == 'tupleize_cols':\n msg += (' Column tuples will then '\n 'always be converted to MultiIndex.')\n\n if result.get(arg, depr_default) != depr_default:\n # raise Exception(result.get(arg, depr_default), depr_default)\n depr_warning += msg + '\\n\\n'\n else:\n result[arg] = parser_default\n\n if depr_warning != '':\n warnings.warn(depr_warning, FutureWarning, stacklevel=2)\n\n if index_col is True:\n raise ValueError(\"The value of index_col couldn't be 'True'\")\n if _is_index_col(index_col):\n if not isinstance(index_col, (list, tuple, np.ndarray)):\n index_col = [index_col]\n result['index_col'] = index_col\n\n names = list(names) if names is not None else names\n\n # type conversion-related\n if converters is not None:\n if not isinstance(converters, dict):\n raise TypeError('Type converters must be a dict or'\n ' subclass, input was '\n 'a {0!r}'.format(type(converters).__name__))\n else:\n converters = {}\n\n # Converting values to NA\n keep_default_na = options['keep_default_na']\n na_values, na_fvalues = _clean_na_values(na_values, keep_default_na)\n\n # handle skiprows; this is internally handled by the\n # c-engine, so only need for python parsers\n if engine != 'c':\n if is_integer(skiprows):\n skiprows = lrange(skiprows)\n if skiprows is None:\n skiprows = set()\n elif not callable(skiprows):\n skiprows = set(skiprows)\n\n # put stuff back\n result['names'] = names\n result['converters'] = converters\n result['na_values'] = na_values\n result['na_fvalues'] = na_fvalues\n result['skiprows'] = skiprows\n\n return result, engine\n\n def __next__(self):\n try:\n return self.get_chunk()\n except StopIteration:\n self.close()\n raise\n\n def _make_engine(self, engine='c'):\n if engine == 'c':\n self._engine = CParserWrapper(self.f, **self.options)\n else:\n if engine == 'python':\n klass = PythonParser\n elif engine == 'python-fwf':\n klass = FixedWidthFieldParser\n else:\n raise ValueError('Unknown engine: {engine} (valid options are'\n ' \"c\", \"python\", or' ' \"python-fwf\")'.format(\n engine=engine))\n self._engine = klass(self.f, **self.options)\n\n def _failover_to_python(self):\n raise com.AbstractMethodError(self)\n\n def read(self, nrows=None):\n nrows = _validate_integer('nrows', nrows)\n\n if nrows is not None:\n if self.options.get('skipfooter'):\n raise ValueError('skipfooter not supported for iteration')\n\n ret = self._engine.read(nrows)\n\n # May alter columns / col_dict\n index, columns, col_dict = self._create_index(ret)\n\n if index is None:\n if col_dict:\n # Any column is actually fine:\n new_rows = len(compat.next(compat.itervalues(col_dict)))\n index = RangeIndex(self._currow, self._currow + new_rows)\n else:\n new_rows = 0\n else:\n new_rows = len(index)\n\n df = DataFrame(col_dict, columns=columns, index=index)\n\n self._currow += new_rows\n\n if self.squeeze and len(df.columns) == 1:\n return df[df.columns[0]].copy()\n return df\n\n def _create_index(self, ret):\n index, columns, col_dict = ret\n return index, columns, col_dict\n\n def get_chunk(self, size=None):\n if size is None:\n size = self.chunksize\n if self.nrows is not None:\n if self._currow >= self.nrows:\n raise StopIteration\n size = min(size, self.nrows - self._currow)\n return self.read(nrows=size)\n\n\ndef _is_index_col(col):\n return col is not None and col is not False\n\n\ndef _is_potential_multi_index(columns):\n \"\"\"\n Check whether or not the `columns` parameter\n could be converted into a MultiIndex.\n\n Parameters\n ----------\n columns : array-like\n Object which may or may not be convertible into a MultiIndex\n\n Returns\n -------\n boolean : Whether or not columns could become a MultiIndex\n \"\"\"\n return (len(columns) and not isinstance(columns, MultiIndex) and\n all(isinstance(c, tuple) for c in columns))\n\n\ndef _evaluate_usecols(usecols, names):\n \"\"\"\n Check whether or not the 'usecols' parameter\n is a callable. If so, enumerates the 'names'\n parameter and returns a set of indices for\n each entry in 'names' that evaluates to True.\n If not a callable, returns 'usecols'.\n \"\"\"\n if callable(usecols):\n return {i for i, name in enumerate(names) if usecols(name)}\n return usecols\n\n\ndef _validate_usecols_names(usecols, names):\n \"\"\"\n Validates that all usecols are present in a given\n list of names. If not, raise a ValueError that\n shows what usecols are missing.\n\n Parameters\n ----------\n usecols : iterable of usecols\n The columns to validate are present in names.\n names : iterable of names\n The column names to check against.\n\n Returns\n -------\n usecols : iterable of usecols\n The `usecols` parameter if the validation succeeds.\n\n Raises\n ------\n ValueError : Columns were missing. Error message will list them.\n \"\"\"\n missing = [c for c in usecols if c not in names]\n if len(missing) > 0:\n raise ValueError(\n \"Usecols do not match columns, \"\n \"columns expected but not found: {missing}\".format(missing=missing)\n )\n\n return usecols\n\n\ndef _validate_skipfooter_arg(skipfooter):\n \"\"\"\n Validate the 'skipfooter' parameter.\n\n Checks whether 'skipfooter' is a non-negative integer.\n Raises a ValueError if that is not the case.\n\n Parameters\n ----------\n skipfooter : non-negative integer\n The number of rows to skip at the end of the file.\n\n Returns\n -------\n validated_skipfooter : non-negative integer\n The original input if the validation succeeds.\n\n Raises\n ------\n ValueError : 'skipfooter' was not a non-negative integer.\n \"\"\"\n\n if not is_integer(skipfooter):\n raise ValueError(\"skipfooter must be an integer\")\n\n if skipfooter < 0:\n raise ValueError(\"skipfooter cannot be negative\")\n\n return skipfooter\n\n\ndef _validate_usecols_arg(usecols):\n \"\"\"\n Validate the 'usecols' parameter.\n\n Checks whether or not the 'usecols' parameter contains all integers\n (column selection by index), strings (column by name) or is a callable.\n Raises a ValueError if that is not the case.\n\n Parameters\n ----------\n usecols : list-like, callable, or None\n List of columns to use when parsing or a callable that can be used\n to filter a list of table columns.\n\n Returns\n -------\n usecols_tuple : tuple\n A tuple of (verified_usecols, usecols_dtype).\n\n 'verified_usecols' is either a set if an array-like is passed in or\n 'usecols' if a callable or None is passed in.\n\n 'usecols_dtype` is the inferred dtype of 'usecols' if an array-like\n is passed in or None if a callable or None is passed in.\n \"\"\"\n msg = (\"'usecols' must either be list-like of all strings, all unicode, \"\n \"all integers or a callable.\")\n if usecols is not None:\n if callable(usecols):\n return usecols, None\n # GH20529, ensure is iterable container but not string.\n elif not is_list_like(usecols):\n raise ValueError(msg)\n else:\n usecols_dtype = lib.infer_dtype(usecols)\n if usecols_dtype not in ('empty', 'integer',\n 'string', 'unicode'):\n raise ValueError(msg)\n return set(usecols), usecols_dtype\n return usecols, None\n\n\ndef _validate_parse_dates_arg(parse_dates):\n \"\"\"\n Check whether or not the 'parse_dates' parameter\n is a non-boolean scalar. Raises a ValueError if\n that is the case.\n \"\"\"\n msg = (\"Only booleans, lists, and \"\n \"dictionaries are accepted \"\n \"for the 'parse_dates' parameter\")\n\n if parse_dates is not None:\n if is_scalar(parse_dates):\n if not lib.is_bool(parse_dates):\n raise TypeError(msg)\n\n elif not isinstance(parse_dates, (list, dict)):\n raise TypeError(msg)\n\n return parse_dates\n\n\nclass ParserBase(object):\n\n def __init__(self, kwds):\n self.names = kwds.get('names')\n self.orig_names = None\n self.prefix = kwds.pop('prefix', None)\n\n self.index_col = kwds.get('index_col', None)\n self.index_names = None\n self.col_names = None\n\n self.parse_dates = _validate_parse_dates_arg(\n kwds.pop('parse_dates', False))\n self.date_parser = kwds.pop('date_parser', None)\n self.dayfirst = kwds.pop('dayfirst', False)\n self.keep_date_col = kwds.pop('keep_date_col', False)\n\n self.na_values = kwds.get('na_values')\n self.na_fvalues = kwds.get('na_fvalues')\n self.na_filter = kwds.get('na_filter', False)\n self.keep_default_na = kwds.get('keep_default_na', True)\n\n self.true_values = kwds.get('true_values')\n self.false_values = kwds.get('false_values')\n self.tupleize_cols = kwds.get('tupleize_cols', False)\n self.mangle_dupe_cols = kwds.get('mangle_dupe_cols', True)\n self.infer_datetime_format = kwds.pop('infer_datetime_format', False)\n\n self._date_conv = _make_date_converter(\n date_parser=self.date_parser,\n dayfirst=self.dayfirst,\n infer_datetime_format=self.infer_datetime_format\n )\n\n # validate header options for mi\n self.header = kwds.get('header')\n if isinstance(self.header, (list, tuple, np.ndarray)):\n if not all(map(is_integer, self.header)):\n raise ValueError(\"header must be integer or list of integers\")\n if kwds.get('usecols'):\n raise ValueError(\"cannot specify usecols when \"\n \"specifying a multi-index header\")\n if kwds.get('names'):\n raise ValueError(\"cannot specify names when \"\n \"specifying a multi-index header\")\n\n # validate index_col that only contains integers\n if self.index_col is not None:\n is_sequence = isinstance(self.index_col, (list, tuple,\n np.ndarray))\n if not (is_sequence and\n all(map(is_integer, self.index_col)) or\n is_integer(self.index_col)):\n raise ValueError(\"index_col must only contain row numbers \"\n \"when specifying a multi-index header\")\n\n # GH 16338\n elif self.header is not None and not is_integer(self.header):\n raise ValueError(\"header must be integer or list of integers\")\n\n self._name_processed = False\n\n self._first_chunk = True\n\n # GH 13932\n # keep references to file handles opened by the parser itself\n self.handles = []\n\n def close(self):\n for f in self.handles:\n f.close()\n\n @property\n def _has_complex_date_col(self):\n return (isinstance(self.parse_dates, dict) or\n (isinstance(self.parse_dates, list) and\n len(self.parse_dates) > 0 and\n isinstance(self.parse_dates[0], list)))\n\n def _should_parse_dates(self, i):\n if isinstance(self.parse_dates, bool):\n return self.parse_dates\n else:\n if self.index_names is not None:\n name = self.index_names[i]\n else:\n name = None\n j = self.index_col[i]\n\n if is_scalar(self.parse_dates):\n return ((j == self.parse_dates) or\n (name is not None and name == self.parse_dates))\n else:\n return ((j in self.parse_dates) or\n (name is not None and name in self.parse_dates))\n\n def _extract_multi_indexer_columns(self, header, index_names, col_names,\n passed_names=False):\n \"\"\" extract and return the names, index_names, col_names\n header is a list-of-lists returned from the parsers \"\"\"\n if len(header) < 2:\n return header[0], index_names, col_names, passed_names\n\n # the names are the tuples of the header that are not the index cols\n # 0 is the name of the index, assuming index_col is a list of column\n # numbers\n ic = self.index_col\n if ic is None:\n ic = []\n\n if not isinstance(ic, (list, tuple, np.ndarray)):\n ic = [ic]\n sic = set(ic)\n\n # clean the index_names\n index_names = header.pop(-1)\n index_names, names, index_col = _clean_index_names(index_names,\n self.index_col)\n\n # extract the columns\n field_count = len(header[0])\n\n def extract(r):\n return tuple(r[i] for i in range(field_count) if i not in sic)\n\n columns = lzip(*[extract(r) for r in header])\n names = ic + columns\n\n def tostr(x):\n return str(x) if not isinstance(x, compat.string_types) else x\n\n # if we find 'Unnamed' all of a single level, then our header was too\n # long\n for n in range(len(columns[0])):\n if all('Unnamed' in tostr(c[n]) for c in columns):\n raise ParserError(\n \"Passed header=[%s] are too many rows for this \"\n \"multi_index of columns\"\n % ','.join(str(x) for x in self.header)\n )\n\n # clean the column names (if we have an index_col)\n if len(ic):\n col_names = [r[0] if len(r[0]) and 'Unnamed' not in r[0] else None\n for r in header]\n else:\n col_names = [None] * len(header)\n\n passed_names = True\n\n return names, index_names, col_names, passed_names\n\n def _maybe_dedup_names(self, names):\n # see gh-7160 and gh-9424: this helps to provide\n # immediate alleviation of the duplicate names\n # issue and appears to be satisfactory to users,\n # but ultimately, not needing to butcher the names\n # would be nice!\n if self.mangle_dupe_cols:\n names = list(names) # so we can index\n counts = defaultdict(int)\n is_potential_mi = _is_potential_multi_index(names)\n\n for i, col in enumerate(names):\n cur_count = counts[col]\n\n while cur_count > 0:\n counts[col] = cur_count + 1\n\n if is_potential_mi:\n col = col[:-1] + ('%s.%d' % (col[-1], cur_count),)\n else:\n col = '%s.%d' % (col, cur_count)\n cur_count = counts[col]\n\n names[i] = col\n counts[col] = cur_count + 1\n\n return names\n\n def _maybe_make_multi_index_columns(self, columns, col_names=None):\n # possibly create a column mi here\n if _is_potential_multi_index(columns):\n columns = MultiIndex.from_tuples(columns, names=col_names)\n return columns\n\n def _make_index(self, data, alldata, columns, indexnamerow=False):\n if not _is_index_col(self.index_col) or not self.index_col:\n index = None\n\n elif not self._has_complex_date_col:\n index = self._get_simple_index(alldata, columns)\n index = self._agg_index(index)\n elif self._has_complex_date_col:\n if not self._name_processed:\n (self.index_names, _,\n self.index_col) = _clean_index_names(list(columns),\n self.index_col)\n self._name_processed = True\n index = self._get_complex_date_index(data, columns)\n index = self._agg_index(index, try_parse_dates=False)\n\n # add names for the index\n if indexnamerow:\n coffset = len(indexnamerow) - len(columns)\n index = index.set_names(indexnamerow[:coffset])\n\n # maybe create a mi on the columns\n columns = self._maybe_make_multi_index_columns(columns, self.col_names)\n\n return index, columns\n\n _implicit_index = False\n\n def _get_simple_index(self, data, columns):\n def ix(col):\n if not isinstance(col, compat.string_types):\n return col\n raise ValueError('Index %s invalid' % col)\n\n to_remove = []\n index = []\n for idx in self.index_col:\n i = ix(idx)\n to_remove.append(i)\n index.append(data[i])\n\n # remove index items from content and columns, don't pop in\n # loop\n for i in reversed(sorted(to_remove)):\n data.pop(i)\n if not self._implicit_index:\n columns.pop(i)\n\n return index\n\n def _get_complex_date_index(self, data, col_names):\n def _get_name(icol):\n if isinstance(icol, compat.string_types):\n return icol\n\n if col_names is None:\n raise ValueError(('Must supply column order to use %s as '\n 'index') % str(icol))\n\n for i, c in enumerate(col_names):\n if i == icol:\n return c\n\n to_remove = []\n index = []\n for idx in self.index_col:\n name = _get_name(idx)\n to_remove.append(name)\n index.append(data[name])\n\n # remove index items from content and columns, don't pop in\n # loop\n for c in reversed(sorted(to_remove)):\n data.pop(c)\n col_names.remove(c)\n\n return index\n\n def _agg_index(self, index, try_parse_dates=True):\n arrays = []\n\n for i, arr in enumerate(index):\n\n if try_parse_dates and self._should_parse_dates(i):\n arr = self._date_conv(arr)\n\n if self.na_filter:\n col_na_values = self.na_values\n col_na_fvalues = self.na_fvalues\n else:\n col_na_values = set()\n col_na_fvalues = set()\n\n if isinstance(self.na_values, dict):\n col_name = self.index_names[i]\n if col_name is not None:\n col_na_values, col_na_fvalues = _get_na_values(\n col_name, self.na_values, self.na_fvalues,\n self.keep_default_na)\n\n arr, _ = self._infer_types(arr, col_na_values | col_na_fvalues)\n arrays.append(arr)\n\n names = self.index_names\n index = ensure_index_from_sequences(arrays, names)\n\n return index\n\n def _convert_to_ndarrays(self, dct, na_values, na_fvalues, verbose=False,\n converters=None, dtypes=None):\n result = {}\n for c, values in compat.iteritems(dct):\n conv_f = None if converters is None else converters.get(c, None)\n if isinstance(dtypes, dict):\n cast_type = dtypes.get(c, None)\n else:\n # single dtype or None\n cast_type = dtypes\n\n if self.na_filter:\n col_na_values, col_na_fvalues = _get_na_values(\n c, na_values, na_fvalues, self.keep_default_na)\n else:\n col_na_values, col_na_fvalues = set(), set()\n\n if conv_f is not None:\n # conv_f applied to data before inference\n if cast_type is not None:\n warnings.warn((\"Both a converter and dtype were specified \"\n \"for column {0} - only the converter will \"\n \"be used\").format(c), ParserWarning,\n stacklevel=7)\n\n try:\n values = lib.map_infer(values, conv_f)\n except ValueError:\n mask = algorithms.isin(\n values, list(na_values)).view(np.uint8)\n values = lib.map_infer_mask(values, conv_f, mask)\n\n cvals, na_count = self._infer_types(\n values, set(col_na_values) | col_na_fvalues,\n try_num_bool=False)\n else:\n # skip inference if specified dtype is object\n try_num_bool = not (cast_type and is_string_dtype(cast_type))\n\n # general type inference and conversion\n cvals, na_count = self._infer_types(\n values, set(col_na_values) | col_na_fvalues,\n try_num_bool)\n\n # type specified in dtype param\n if cast_type and not is_dtype_equal(cvals, cast_type):\n cvals = self._cast_types(cvals, cast_type, c)\n\n result[c] = cvals\n if verbose and na_count:\n print('Filled %d NA values in column %s' % (na_count, str(c)))\n return result\n\n def _infer_types(self, values, na_values, try_num_bool=True):\n \"\"\"\n Infer types of values, possibly casting\n\n Parameters\n ----------\n values : ndarray\n na_values : set\n try_num_bool : bool, default try\n try to cast values to numeric (first preference) or boolean\n\n Returns:\n --------\n converted : ndarray\n na_count : int\n \"\"\"\n na_count = 0\n if issubclass(values.dtype.type, (np.number, np.bool_)):\n mask = algorithms.isin(values, list(na_values))\n na_count = mask.sum()\n if na_count > 0:\n if is_integer_dtype(values):\n values = values.astype(np.float64)\n np.putmask(values, mask, np.nan)\n return values, na_count\n\n if try_num_bool:\n try:\n result = lib.maybe_convert_numeric(np.asarray(values),\n na_values, False)\n na_count = isna(result).sum()\n except Exception:\n result = values\n if values.dtype == np.object_:\n na_count = parsers.sanitize_objects(np.asarray(result),\n na_values, False)\n else:\n result = values\n if values.dtype == np.object_:\n na_count = parsers.sanitize_objects(np.asarray(values),\n na_values, False)\n\n if result.dtype == np.object_ and try_num_bool:\n result = libops.maybe_convert_bool(np.asarray(values),\n true_values=self.true_values,\n false_values=self.false_values)\n\n return result, na_count\n\n def _cast_types(self, values, cast_type, column):\n \"\"\"\n Cast values to specified type\n\n Parameters\n ----------\n values : ndarray\n cast_type : string or np.dtype\n dtype to cast values to\n column : string\n column name - used only for error reporting\n\n Returns\n -------\n converted : ndarray\n \"\"\"\n\n if is_categorical_dtype(cast_type):\n known_cats = (isinstance(cast_type, CategoricalDtype) and\n cast_type.categories is not None)\n\n if not is_object_dtype(values) and not known_cats:\n # XXX this is for consistency with\n # c-parser which parses all categories\n # as strings\n values = astype_nansafe(values, str)\n\n cats = Index(values).unique().dropna()\n values = Categorical._from_inferred_categories(\n cats, cats.get_indexer(values), cast_type\n )\n\n else:\n try:\n values = astype_nansafe(values, cast_type, copy=True)\n except ValueError:\n raise ValueError(\"Unable to convert column %s to \"\n \"type %s\" % (column, cast_type))\n return values\n\n def _do_date_conversions(self, names, data):\n # returns data, columns\n\n if self.parse_dates is not None:\n data, names = _process_date_conversion(\n data, self._date_conv, self.parse_dates, self.index_col,\n self.index_names, names, keep_date_col=self.keep_date_col)\n\n return names, data\n\n\nclass CParserWrapper(ParserBase):\n \"\"\"\n\n \"\"\"\n\n def __init__(self, src, **kwds):\n self.kwds = kwds\n kwds = kwds.copy()\n\n ParserBase.__init__(self, kwds)\n\n if (kwds.get('compression') is None\n and 'utf-16' in (kwds.get('encoding') or '')):\n # if source is utf-16 plain text, convert source to utf-8\n if isinstance(src, compat.string_types):\n src = open(src, 'rb')\n self.handles.append(src)\n src = UTF8Recoder(src, kwds['encoding'])\n kwds['encoding'] = 'utf-8'\n\n # #2442\n kwds['allow_leading_cols'] = self.index_col is not False\n\n # GH20529, validate usecol arg before TextReader\n self.usecols, self.usecols_dtype = _validate_usecols_arg(\n kwds['usecols'])\n kwds['usecols'] = self.usecols\n\n self._reader = parsers.TextReader(src, **kwds)\n\n passed_names = self.names is None\n\n if self._reader.header is None:\n self.names = None\n else:\n if len(self._reader.header) > 1:\n # we have a multi index in the columns\n self.names, self.index_names, self.col_names, passed_names = (\n self._extract_multi_indexer_columns(\n self._reader.header, self.index_names, self.col_names,\n passed_names\n )\n )\n else:\n self.names = list(self._reader.header[0])\n\n if self.names is None:\n if self.prefix:\n self.names = ['%s%d' % (self.prefix, i)\n for i in range(self._reader.table_width)]\n else:\n self.names = lrange(self._reader.table_width)\n\n # gh-9755\n #\n # need to set orig_names here first\n # so that proper indexing can be done\n # with _set_noconvert_columns\n #\n # once names has been filtered, we will\n # then set orig_names again to names\n self.orig_names = self.names[:]\n\n if self.usecols:\n usecols = _evaluate_usecols(self.usecols, self.orig_names)\n\n # GH 14671\n if (self.usecols_dtype == 'string' and\n not set(usecols).issubset(self.orig_names)):\n _validate_usecols_names(usecols, self.orig_names)\n\n if len(self.names) > len(usecols):\n self.names = [n for i, n in enumerate(self.names)\n if (i in usecols or n in usecols)]\n\n if len(self.names) < len(usecols):\n _validate_usecols_names(usecols, self.names)\n\n self._set_noconvert_columns()\n\n self.orig_names = self.names\n\n if not self._has_complex_date_col:\n if (self._reader.leading_cols == 0 and\n _is_index_col(self.index_col)):\n\n self._name_processed = True\n (index_names, self.names,\n self.index_col) = _clean_index_names(self.names,\n self.index_col)\n\n if self.index_names is None:\n self.index_names = index_names\n\n if self._reader.header is None and not passed_names:\n self.index_names = [None] * len(self.index_names)\n\n self._implicit_index = self._reader.leading_cols > 0\n\n def close(self):\n for f in self.handles:\n f.close()\n\n # close additional handles opened by C parser (for compression)\n try:\n self._reader.close()\n except:\n pass\n\n def _set_noconvert_columns(self):\n \"\"\"\n Set the columns that should not undergo dtype conversions.\n\n Currently, any column that is involved with date parsing will not\n undergo such conversions.\n \"\"\"\n names = self.orig_names\n if self.usecols_dtype == 'integer':\n # A set of integers will be converted to a list in\n # the correct order every single time.\n usecols = list(self.usecols)\n usecols.sort()\n elif (callable(self.usecols) or\n self.usecols_dtype not in ('empty', None)):\n # The names attribute should have the correct columns\n # in the proper order for indexing with parse_dates.\n usecols = self.names[:]\n else:\n # Usecols is empty.\n usecols = None\n\n def _set(x):\n if usecols is not None and is_integer(x):\n x = usecols[x]\n\n if not is_integer(x):\n x = names.index(x)\n\n self._reader.set_noconvert(x)\n\n if isinstance(self.parse_dates, list):\n for val in self.parse_dates:\n if isinstance(val, list):\n for k in val:\n _set(k)\n else:\n _set(val)\n\n elif isinstance(self.parse_dates, dict):\n for val in self.parse_dates.values():\n if isinstance(val, list):\n for k in val:\n _set(k)\n else:\n _set(val)\n\n elif self.parse_dates:\n if isinstance(self.index_col, list):\n for k in self.index_col:\n _set(k)\n elif self.index_col is not None:\n _set(self.index_col)\n\n def set_error_bad_lines(self, status):\n self._reader.set_error_bad_lines(int(status))\n\n def read(self, nrows=None):\n try:\n data = self._reader.read(nrows)\n except StopIteration:\n if self._first_chunk:\n self._first_chunk = False\n names = self._maybe_dedup_names(self.orig_names)\n index, columns, col_dict = _get_empty_meta(\n names, self.index_col, self.index_names,\n dtype=self.kwds.get('dtype'))\n columns = self._maybe_make_multi_index_columns(\n columns, self.col_names)\n\n if self.usecols is not None:\n columns = self._filter_usecols(columns)\n\n col_dict = dict(filter(lambda item: item[0] in columns,\n col_dict.items()))\n\n return index, columns, col_dict\n\n else:\n raise\n\n # Done with first read, next time raise StopIteration\n self._first_chunk = False\n\n names = self.names\n\n if self._reader.leading_cols:\n if self._has_complex_date_col:\n raise NotImplementedError('file structure not yet supported')\n\n # implicit index, no index names\n arrays = []\n\n for i in range(self._reader.leading_cols):\n if self.index_col is None:\n values = data.pop(i)\n else:\n values = data.pop(self.index_col[i])\n\n values = self._maybe_parse_dates(values, i,\n try_parse_dates=True)\n arrays.append(values)\n\n index = ensure_index_from_sequences(arrays)\n\n if self.usecols is not None:\n names = self._filter_usecols(names)\n\n names = self._maybe_dedup_names(names)\n\n # rename dict keys\n data = sorted(data.items())\n data = {k: v for k, (i, v) in zip(names, data)}\n\n names, data = self._do_date_conversions(names, data)\n\n else:\n # rename dict keys\n data = sorted(data.items())\n\n # ugh, mutation\n names = list(self.orig_names)\n names = self._maybe_dedup_names(names)\n\n if self.usecols is not None:\n names = self._filter_usecols(names)\n\n # columns as list\n alldata = [x[1] for x in data]\n\n data = {k: v for k, (i, v) in zip(names, data)}\n\n names, data = self._do_date_conversions(names, data)\n index, names = self._make_index(data, alldata, names)\n\n # maybe create a mi on the columns\n names = self._maybe_make_multi_index_columns(names, self.col_names)\n\n return index, names, data\n\n def _filter_usecols(self, names):\n # hackish\n usecols = _evaluate_usecols(self.usecols, names)\n if usecols is not None and len(names) != len(usecols):\n names = [name for i, name in enumerate(names)\n if i in usecols or name in usecols]\n return names\n\n def _get_index_names(self):\n names = list(self._reader.header[0])\n idx_names = None\n\n if self._reader.leading_cols == 0 and self.index_col is not None:\n (idx_names, names,\n self.index_col) = _clean_index_names(names, self.index_col)\n\n return names, idx_names\n\n def _maybe_parse_dates(self, values, index, try_parse_dates=True):\n if try_parse_dates and self._should_parse_dates(index):\n values = self._date_conv(values)\n return values\n\n\ndef TextParser(*args, **kwds):\n \"\"\"\n Converts lists of lists/tuples into DataFrames with proper type inference\n and optional (e.g. string to datetime) conversion. Also enables iterating\n lazily over chunks of large files\n\n Parameters\n ----------\n data : file-like object or list\n delimiter : separator character to use\n dialect : str or csv.Dialect instance, default None\n Ignored if delimiter is longer than 1 character\n names : sequence, default\n header : int, default 0\n Row to use to parse column labels. Defaults to the first row. Prior\n rows will be discarded\n index_col : int or list, default None\n Column or columns to use as the (possibly hierarchical) index\n has_index_names: boolean, default False\n True if the cols defined in index_col have an index name and are\n not in the header\n na_values : scalar, str, list-like, or dict, default None\n Additional strings to recognize as NA/NaN.\n keep_default_na : bool, default True\n thousands : str, default None\n Thousands separator\n comment : str, default None\n Comment out remainder of line\n parse_dates : boolean, default False\n keep_date_col : boolean, default False\n date_parser : function, default None\n skiprows : list of integers\n Row numbers to skip\n skipfooter : int\n Number of line at bottom of file to skip\n converters : dict, default None\n Dict of functions for converting values in certain columns. Keys can\n either be integers or column labels, values are functions that take one\n input argument, the cell (not column) content, and return the\n transformed content.\n encoding : string, default None\n Encoding to use for UTF when reading/writing (ex. 'utf-8')\n squeeze : boolean, default False\n returns Series if only one column\n infer_datetime_format: boolean, default False\n If True and `parse_dates` is True for a column, try to infer the\n datetime format based on the first datetime string. If the format\n can be inferred, there often will be a large parsing speed-up.\n float_precision : string, default None\n Specifies which converter the C engine should use for floating-point\n values. The options are None for the ordinary converter,\n 'high' for the high-precision converter, and 'round_trip' for the\n round-trip converter.\n \"\"\"\n kwds['engine'] = 'python'\n return TextFileReader(*args, **kwds)\n\n\ndef count_empty_vals(vals):\n return sum(1 for v in vals if v == '' or v is None)\n\n\nclass PythonParser(ParserBase):\n\n def __init__(self, f, **kwds):\n \"\"\"\n Workhorse function for processing nested list into DataFrame\n\n Should be replaced by np.genfromtxt eventually?\n \"\"\"\n ParserBase.__init__(self, kwds)\n\n self.data = None\n self.buf = []\n self.pos = 0\n self.line_pos = 0\n\n self.encoding = kwds['encoding']\n self.compression = kwds['compression']\n self.memory_map = kwds['memory_map']\n self.skiprows = kwds['skiprows']\n\n if callable(self.skiprows):\n self.skipfunc = self.skiprows\n else:\n self.skipfunc = lambda x: x in self.skiprows\n\n self.skipfooter = _validate_skipfooter_arg(kwds['skipfooter'])\n self.delimiter = kwds['delimiter']\n\n self.quotechar = kwds['quotechar']\n if isinstance(self.quotechar, compat.text_type):\n self.quotechar = str(self.quotechar)\n\n self.escapechar = kwds['escapechar']\n self.doublequote = kwds['doublequote']\n self.skipinitialspace = kwds['skipinitialspace']\n self.lineterminator = kwds['lineterminator']\n self.quoting = kwds['quoting']\n self.usecols, _ = _validate_usecols_arg(kwds['usecols'])\n self.skip_blank_lines = kwds['skip_blank_lines']\n\n self.warn_bad_lines = kwds['warn_bad_lines']\n self.error_bad_lines = kwds['error_bad_lines']\n\n self.names_passed = kwds['names'] or None\n\n self.has_index_names = False\n if 'has_index_names' in kwds:\n self.has_index_names = kwds['has_index_names']\n\n self.verbose = kwds['verbose']\n self.converters = kwds['converters']\n self.dtype = kwds['dtype']\n\n self.thousands = kwds['thousands']\n self.decimal = kwds['decimal']\n\n self.comment = kwds['comment']\n self._comment_lines = []\n\n mode = 'r' if PY3 else 'rb'\n f, handles = _get_handle(f, mode, encoding=self.encoding,\n compression=self.compression,\n memory_map=self.memory_map)\n self.handles.extend(handles)\n\n # Set self.data to something that can read lines.\n if hasattr(f, 'readline'):\n self._make_reader(f)\n else:\n self.data = f\n\n # Get columns in two steps: infer from data, then\n # infer column indices from self.usecols if it is specified.\n self._col_indices = None\n self.columns, self.num_original_columns = self._infer_columns()\n\n # Now self.columns has the set of columns that we will process.\n # The original set is stored in self.original_columns.\n if len(self.columns) > 1:\n # we are processing a multi index column\n self.columns, self.index_names, self.col_names, _ = (\n self._extract_multi_indexer_columns(\n self.columns, self.index_names, self.col_names\n )\n )\n # Update list of original names to include all indices.\n self.num_original_columns = len(self.columns)\n else:\n self.columns = self.columns[0]\n\n # get popped off for index\n self.orig_names = list(self.columns)\n\n # needs to be cleaned/refactored\n # multiple date column thing turning into a real spaghetti factory\n\n if not self._has_complex_date_col:\n (index_names, self.orig_names, self.columns) = (\n self._get_index_name(self.columns))\n self._name_processed = True\n if self.index_names is None:\n self.index_names = index_names\n\n if self.parse_dates:\n self._no_thousands_columns = self._set_no_thousands_columns()\n else:\n self._no_thousands_columns = None\n\n if len(self.decimal) != 1:\n raise ValueError('Only length-1 decimal markers supported')\n\n if self.thousands is None:\n self.nonnum = re.compile('[^-^0-9^%s]+' % self.decimal)\n else:\n self.nonnum = re.compile('[^-^0-9^%s^%s]+' % (self.thousands,\n self.decimal))\n\n def _set_no_thousands_columns(self):\n # Create a set of column ids that are not to be stripped of thousands\n # operators.\n noconvert_columns = set()\n\n def _set(x):\n if is_integer(x):\n noconvert_columns.add(x)\n else:\n noconvert_columns.add(self.columns.index(x))\n\n if isinstance(self.parse_dates, list):\n for val in self.parse_dates:\n if isinstance(val, list):\n for k in val:\n _set(k)\n else:\n _set(val)\n\n elif isinstance(self.parse_dates, dict):\n for val in self.parse_dates.values():\n if isinstance(val, list):\n for k in val:\n _set(k)\n else:\n _set(val)\n\n elif self.parse_dates:\n if isinstance(self.index_col, list):\n for k in self.index_col:\n _set(k)\n elif self.index_col is not None:\n _set(self.index_col)\n\n return noconvert_columns\n\n def _make_reader(self, f):\n sep = self.delimiter\n\n if sep is None or len(sep) == 1:\n if self.lineterminator:\n raise ValueError('Custom line terminators not supported in '\n 'python parser (yet)')\n\n class MyDialect(csv.Dialect):\n delimiter = self.delimiter\n quotechar = self.quotechar\n escapechar = self.escapechar\n doublequote = self.doublequote\n skipinitialspace = self.skipinitialspace\n quoting = self.quoting\n lineterminator = '\\n'\n\n dia = MyDialect\n\n sniff_sep = True\n\n if sep is not None:\n sniff_sep = False\n dia.delimiter = sep\n # attempt to sniff the delimiter\n if sniff_sep:\n line = f.readline()\n while self.skipfunc(self.pos):\n self.pos += 1\n line = f.readline()\n\n line = self._check_comments([line])[0]\n\n self.pos += 1\n self.line_pos += 1\n sniffed = csv.Sniffer().sniff(line)\n dia.delimiter = sniffed.delimiter\n if self.encoding is not None:\n self.buf.extend(list(\n UnicodeReader(StringIO(line),\n dialect=dia,\n encoding=self.encoding)))\n else:\n self.buf.extend(list(csv.reader(StringIO(line),\n dialect=dia)))\n\n if self.encoding is not None:\n reader = UnicodeReader(f, dialect=dia,\n encoding=self.encoding,\n strict=True)\n else:\n reader = csv.reader(f, dialect=dia,\n strict=True)\n\n else:\n def _read():\n line = f.readline()\n\n if compat.PY2 and self.encoding:\n line = line.decode(self.encoding)\n\n pat = re.compile(sep)\n yield pat.split(line.strip())\n for line in f:\n yield pat.split(line.strip())\n reader = _read()\n\n self.data = reader\n\n def read(self, rows=None):\n try:\n content = self._get_lines(rows)\n except StopIteration:\n if self._first_chunk:\n content = []\n else:\n raise\n\n # done with first read, next time raise StopIteration\n self._first_chunk = False\n\n columns = list(self.orig_names)\n if not len(content): # pragma: no cover\n # DataFrame with the right metadata, even though it's length 0\n names = self._maybe_dedup_names(self.orig_names)\n index, columns, col_dict = _get_empty_meta(\n names, self.index_col, self.index_names, self.dtype)\n columns = self._maybe_make_multi_index_columns(\n columns, self.col_names)\n return index, columns, col_dict\n\n # handle new style for names in index\n count_empty_content_vals = count_empty_vals(content[0])\n indexnamerow = None\n if self.has_index_names and count_empty_content_vals == len(columns):\n indexnamerow = content[0]\n content = content[1:]\n\n alldata = self._rows_to_cols(content)\n data = self._exclude_implicit_index(alldata)\n\n columns = self._maybe_dedup_names(self.columns)\n columns, data = self._do_date_conversions(columns, data)\n\n data = self._convert_data(data)\n index, columns = self._make_index(data, alldata, columns, indexnamerow)\n\n return index, columns, data\n\n def _exclude_implicit_index(self, alldata):\n names = self._maybe_dedup_names(self.orig_names)\n\n if self._implicit_index:\n excl_indices = self.index_col\n\n data = {}\n offset = 0\n for i, col in enumerate(names):\n while i + offset in excl_indices:\n offset += 1\n data[col] = alldata[i + offset]\n else:\n data = {k: v for k, v in zip(names, alldata)}\n\n return data\n\n # legacy\n def get_chunk(self, size=None):\n if size is None:\n size = self.chunksize\n return self.read(rows=size)\n\n def _convert_data(self, data):\n # apply converters\n def _clean_mapping(mapping):\n \"converts col numbers to names\"\n clean = {}\n for col, v in compat.iteritems(mapping):\n if isinstance(col, int) and col not in self.orig_names:\n col = self.orig_names[col]\n clean[col] = v\n return clean\n\n clean_conv = _clean_mapping(self.converters)\n if not isinstance(self.dtype, dict):\n # handles single dtype applied to all columns\n clean_dtypes = self.dtype\n else:\n clean_dtypes = _clean_mapping(self.dtype)\n\n # Apply NA values.\n clean_na_values = {}\n clean_na_fvalues = {}\n\n if isinstance(self.na_values, dict):\n for col in self.na_values:\n na_value = self.na_values[col]\n na_fvalue = self.na_fvalues[col]\n\n if isinstance(col, int) and col not in self.orig_names:\n col = self.orig_names[col]\n\n clean_na_values[col] = na_value\n clean_na_fvalues[col] = na_fvalue\n else:\n clean_na_values = self.na_values\n clean_na_fvalues = self.na_fvalues\n\n return self._convert_to_ndarrays(data, clean_na_values,\n clean_na_fvalues, self.verbose,\n clean_conv, clean_dtypes)\n\n def _infer_columns(self):\n names = self.names\n num_original_columns = 0\n clear_buffer = True\n if self.header is not None:\n header = self.header\n\n if isinstance(header, (list, tuple, np.ndarray)):\n have_mi_columns = len(header) > 1\n # we have a mi columns, so read an extra line\n if have_mi_columns:\n header = list(header) + [header[-1] + 1]\n else:\n have_mi_columns = False\n header = [header]\n\n columns = []\n for level, hr in enumerate(header):\n try:\n line = self._buffered_line()\n\n while self.line_pos <= hr:\n line = self._next_line()\n\n except StopIteration:\n if self.line_pos < hr:\n raise ValueError(\n 'Passed header=%s but only %d lines in file'\n % (hr, self.line_pos + 1))\n\n # We have an empty file, so check\n # if columns are provided. That will\n # serve as the 'line' for parsing\n if have_mi_columns and hr > 0:\n if clear_buffer:\n self._clear_buffer()\n columns.append([None] * len(columns[-1]))\n return columns, num_original_columns\n\n if not self.names:\n raise EmptyDataError(\n \"No columns to parse from file\")\n\n line = self.names[:]\n\n unnamed_count = 0\n this_columns = []\n for i, c in enumerate(line):\n if c == '':\n if have_mi_columns:\n this_columns.append('Unnamed: %d_level_%d'\n % (i, level))\n else:\n this_columns.append('Unnamed: %d' % i)\n unnamed_count += 1\n else:\n this_columns.append(c)\n\n if not have_mi_columns and self.mangle_dupe_cols:\n counts = defaultdict(int)\n\n for i, col in enumerate(this_columns):\n cur_count = counts[col]\n\n while cur_count > 0:\n counts[col] = cur_count + 1\n col = \"%s.%d\" % (col, cur_count)\n cur_count = counts[col]\n\n this_columns[i] = col\n counts[col] = cur_count + 1\n elif have_mi_columns:\n\n # if we have grabbed an extra line, but its not in our\n # format so save in the buffer, and create an blank extra\n # line for the rest of the parsing code\n if hr == header[-1]:\n lc = len(this_columns)\n ic = (len(self.index_col)\n if self.index_col is not None else 0)\n if lc != unnamed_count and lc - ic > unnamed_count:\n clear_buffer = False\n this_columns = [None] * lc\n self.buf = [self.buf[-1]]\n\n columns.append(this_columns)\n if len(columns) == 1:\n num_original_columns = len(this_columns)\n\n if clear_buffer:\n self._clear_buffer()\n\n if names is not None:\n if ((self.usecols is not None and\n len(names) != len(self.usecols)) or\n (self.usecols is None and\n len(names) != len(columns[0]))):\n raise ValueError('Number of passed names did not match '\n 'number of header fields in the file')\n if len(columns) > 1:\n raise TypeError('Cannot pass names with multi-index '\n 'columns')\n\n if self.usecols is not None:\n # Set _use_cols. We don't store columns because they are\n # overwritten.\n self._handle_usecols(columns, names)\n else:\n self._col_indices = None\n num_original_columns = len(names)\n columns = [names]\n else:\n columns = self._handle_usecols(columns, columns[0])\n else:\n try:\n line = self._buffered_line()\n\n except StopIteration:\n if not names:\n raise EmptyDataError(\n \"No columns to parse from file\")\n\n line = names[:]\n\n ncols = len(line)\n num_original_columns = ncols\n\n if not names:\n if self.prefix:\n columns = [['%s%d' % (self.prefix, i)\n for i in range(ncols)]]\n else:\n columns = [lrange(ncols)]\n columns = self._handle_usecols(columns, columns[0])\n else:\n if self.usecols is None or len(names) >= num_original_columns:\n columns = self._handle_usecols([names], names)\n num_original_columns = len(names)\n else:\n if (not callable(self.usecols) and\n len(names) != len(self.usecols)):\n raise ValueError(\n 'Number of passed names did not match number of '\n 'header fields in the file'\n )\n # Ignore output but set used columns.\n self._handle_usecols([names], names)\n columns = [names]\n num_original_columns = ncols\n\n return columns, num_original_columns\n\n def _handle_usecols(self, columns, usecols_key):\n \"\"\"\n Sets self._col_indices\n\n usecols_key is used if there are string usecols.\n \"\"\"\n if self.usecols is not None:\n if callable(self.usecols):\n col_indices = _evaluate_usecols(self.usecols, usecols_key)\n elif any(isinstance(u, string_types) for u in self.usecols):\n if len(columns) > 1:\n raise ValueError(\"If using multiple headers, usecols must \"\n \"be integers.\")\n col_indices = []\n\n for col in self.usecols:\n if isinstance(col, string_types):\n try:\n col_indices.append(usecols_key.index(col))\n except ValueError:\n _validate_usecols_names(self.usecols, usecols_key)\n else:\n col_indices.append(col)\n else:\n col_indices = self.usecols\n\n columns = [[n for i, n in enumerate(column) if i in col_indices]\n for column in columns]\n self._col_indices = col_indices\n return columns\n\n def _buffered_line(self):\n \"\"\"\n Return a line from buffer, filling buffer if required.\n \"\"\"\n if len(self.buf) > 0:\n return self.buf[0]\n else:\n return self._next_line()\n\n def _check_for_bom(self, first_row):\n \"\"\"\n Checks whether the file begins with the BOM character.\n If it does, remove it. In addition, if there is quoting\n in the field subsequent to the BOM, remove it as well\n because it technically takes place at the beginning of\n the name, not the middle of it.\n \"\"\"\n # first_row will be a list, so we need to check\n # that that list is not empty before proceeding.\n if not first_row:\n return first_row\n\n # The first element of this row is the one that could have the\n # BOM that we want to remove. Check that the first element is a\n # string before proceeding.\n if not isinstance(first_row[0], compat.string_types):\n return first_row\n\n # Check that the string is not empty, as that would\n # obviously not have a BOM at the start of it.\n if not first_row[0]:\n return first_row\n\n # Since the string is non-empty, check that it does\n # in fact begin with a BOM.\n first_elt = first_row[0][0]\n\n # This is to avoid warnings we get in Python 2.x if\n # we find ourselves comparing with non-Unicode\n if compat.PY2 and not isinstance(first_elt, unicode): # noqa\n try:\n first_elt = u(first_elt)\n except UnicodeDecodeError:\n return first_row\n\n if first_elt != _BOM:\n return first_row\n\n first_row = first_row[0]\n\n if len(first_row) > 1 and first_row[1] == self.quotechar:\n start = 2\n quote = first_row[1]\n end = first_row[2:].index(quote) + 2\n\n # Extract the data between the quotation marks\n new_row = first_row[start:end]\n\n # Extract any remaining data after the second\n # quotation mark.\n if len(first_row) > end + 1:\n new_row += first_row[end + 1:]\n return [new_row]\n elif len(first_row) > 1:\n return [first_row[1:]]\n else:\n # First row is just the BOM, so we\n # return an empty string.\n return [\"\"]\n\n def _is_line_empty(self, line):\n \"\"\"\n Check if a line is empty or not.\n\n Parameters\n ----------\n line : str, array-like\n The line of data to check.\n\n Returns\n -------\n boolean : Whether or not the line is empty.\n \"\"\"\n return not line or all(not x for x in line)\n\n def _next_line(self):\n if isinstance(self.data, list):\n while self.skipfunc(self.pos):\n self.pos += 1\n\n while True:\n try:\n line = self._check_comments([self.data[self.pos]])[0]\n self.pos += 1\n # either uncommented or blank to begin with\n if (not self.skip_blank_lines and\n (self._is_line_empty(\n self.data[self.pos - 1]) or line)):\n break\n elif self.skip_blank_lines:\n ret = self._remove_empty_lines([line])\n if ret:\n line = ret[0]\n break\n except IndexError:\n raise StopIteration\n else:\n while self.skipfunc(self.pos):\n self.pos += 1\n next(self.data)\n\n while True:\n orig_line = self._next_iter_line(row_num=self.pos + 1)\n self.pos += 1\n\n if orig_line is not None:\n line = self._check_comments([orig_line])[0]\n\n if self.skip_blank_lines:\n ret = self._remove_empty_lines([line])\n\n if ret:\n line = ret[0]\n break\n elif self._is_line_empty(orig_line) or line:\n break\n\n # This was the first line of the file,\n # which could contain the BOM at the\n # beginning of it.\n if self.pos == 1:\n line = self._check_for_bom(line)\n\n self.line_pos += 1\n self.buf.append(line)\n return line\n\n def _alert_malformed(self, msg, row_num):\n \"\"\"\n Alert a user about a malformed row.\n\n If `self.error_bad_lines` is True, the alert will be `ParserError`.\n If `self.warn_bad_lines` is True, the alert will be printed out.\n\n Parameters\n ----------\n msg : The error message to display.\n row_num : The row number where the parsing error occurred.\n Because this row number is displayed, we 1-index,\n even though we 0-index internally.\n \"\"\"\n\n if self.error_bad_lines:\n raise ParserError(msg)\n elif self.warn_bad_lines:\n base = 'Skipping line {row_num}: '.format(row_num=row_num)\n sys.stderr.write(base + msg + '\\n')\n\n def _next_iter_line(self, row_num):\n \"\"\"\n Wrapper around iterating through `self.data` (CSV source).\n\n When a CSV error is raised, we check for specific\n error messages that allow us to customize the\n error message displayed to the user.\n\n Parameters\n ----------\n row_num : The row number of the line being parsed.\n \"\"\"\n\n try:\n return next(self.data)\n except csv.Error as e:\n if self.warn_bad_lines or self.error_bad_lines:\n msg = str(e)\n\n if 'NULL byte' in msg:\n msg = ('NULL byte detected. This byte '\n 'cannot be processed in Python\\'s '\n 'native csv library at the moment, '\n 'so please pass in engine=\\'c\\' instead')\n elif 'newline inside string' in msg:\n msg = ('EOF inside string starting with '\n 'line ' + str(row_num))\n\n if self.skipfooter > 0:\n reason = ('Error could possibly be due to '\n 'parsing errors in the skipped footer rows '\n '(the skipfooter keyword is only applied '\n 'after Python\\'s csv library has parsed '\n 'all rows).')\n msg += '. ' + reason\n\n self._alert_malformed(msg, row_num)\n return None\n\n def _check_comments(self, lines):\n if self.comment is None:\n return lines\n ret = []\n for l in lines:\n rl = []\n for x in l:\n if (not isinstance(x, compat.string_types) or\n self.comment not in x):\n rl.append(x)\n else:\n x = x[:x.find(self.comment)]\n if len(x) > 0:\n rl.append(x)\n break\n ret.append(rl)\n return ret\n\n def _remove_empty_lines(self, lines):\n \"\"\"\n Iterate through the lines and remove any that are\n either empty or contain only one whitespace value\n\n Parameters\n ----------\n lines : array-like\n The array of lines that we are to filter.\n\n Returns\n -------\n filtered_lines : array-like\n The same array of lines with the \"empty\" ones removed.\n \"\"\"\n\n ret = []\n for l in lines:\n # Remove empty lines and lines with only one whitespace value\n if (len(l) > 1 or len(l) == 1 and\n (not isinstance(l[0], compat.string_types) or\n l[0].strip())):\n ret.append(l)\n return ret\n\n def _check_thousands(self, lines):\n if self.thousands is None:\n return lines\n\n return self._search_replace_num_columns(lines=lines,\n search=self.thousands,\n replace='')\n\n def _search_replace_num_columns(self, lines, search, replace):\n ret = []\n for l in lines:\n rl = []\n for i, x in enumerate(l):\n if (not isinstance(x, compat.string_types) or\n search not in x or\n (self._no_thousands_columns and\n i in self._no_thousands_columns) or\n self.nonnum.search(x.strip())):\n rl.append(x)\n else:\n rl.append(x.replace(search, replace))\n ret.append(rl)\n return ret\n\n def _check_decimal(self, lines):\n if self.decimal == _parser_defaults['decimal']:\n return lines\n\n return self._search_replace_num_columns(lines=lines,\n search=self.decimal,\n replace='.')\n\n def _clear_buffer(self):\n self.buf = []\n\n _implicit_index = False\n\n def _get_index_name(self, columns):\n \"\"\"\n Try several cases to get lines:\n\n 0) There are headers on row 0 and row 1 and their\n total summed lengths equals the length of the next line.\n Treat row 0 as columns and row 1 as indices\n 1) Look for implicit index: there are more columns\n on row 1 than row 0. If this is true, assume that row\n 1 lists index columns and row 0 lists normal columns.\n 2) Get index from the columns if it was listed.\n \"\"\"\n orig_names = list(columns)\n columns = list(columns)\n\n try:\n line = self._next_line()\n except StopIteration:\n line = None\n\n try:\n next_line = self._next_line()\n except StopIteration:\n next_line = None\n\n # implicitly index_col=0 b/c 1 fewer column names\n implicit_first_cols = 0\n if line is not None:\n # leave it 0, #2442\n # Case 1\n if self.index_col is not False:\n implicit_first_cols = len(line) - self.num_original_columns\n\n # Case 0\n if next_line is not None:\n if len(next_line) == len(line) + self.num_original_columns:\n # column and index names on diff rows\n self.index_col = lrange(len(line))\n self.buf = self.buf[1:]\n\n for c in reversed(line):\n columns.insert(0, c)\n\n # Update list of original names to include all indices.\n orig_names = list(columns)\n self.num_original_columns = len(columns)\n return line, orig_names, columns\n\n if implicit_first_cols > 0:\n # Case 1\n self._implicit_index = True\n if self.index_col is None:\n self.index_col = lrange(implicit_first_cols)\n\n index_name = None\n\n else:\n # Case 2\n (index_name, columns_,\n self.index_col) = _clean_index_names(columns, self.index_col)\n\n return index_name, orig_names, columns\n\n def _rows_to_cols(self, content):\n col_len = self.num_original_columns\n\n if self._implicit_index:\n col_len += len(self.index_col)\n\n max_len = max(len(row) for row in content)\n\n # Check that there are no rows with too many\n # elements in their row (rows with too few\n # elements are padded with NaN).\n if (max_len > col_len and\n self.index_col is not False and\n self.usecols is None):\n\n footers = self.skipfooter if self.skipfooter else 0\n bad_lines = []\n\n iter_content = enumerate(content)\n content_len = len(content)\n content = []\n\n for (i, l) in iter_content:\n actual_len = len(l)\n\n if actual_len > col_len:\n if self.error_bad_lines or self.warn_bad_lines:\n row_num = self.pos - (content_len - i + footers)\n bad_lines.append((row_num, actual_len))\n\n if self.error_bad_lines:\n break\n else:\n content.append(l)\n\n for row_num, actual_len in bad_lines:\n msg = ('Expected %d fields in line %d, saw %d' %\n (col_len, row_num + 1, actual_len))\n if (self.delimiter and\n len(self.delimiter) > 1 and\n self.quoting != csv.QUOTE_NONE):\n # see gh-13374\n reason = ('Error could possibly be due to quotes being '\n 'ignored when a multi-char delimiter is used.')\n msg += '. ' + reason\n\n self._alert_malformed(msg, row_num + 1)\n\n # see gh-13320\n zipped_content = list(lib.to_object_array(\n content, min_width=col_len).T)\n\n if self.usecols:\n if self._implicit_index:\n zipped_content = [\n a for i, a in enumerate(zipped_content)\n if (i < len(self.index_col) or\n i - len(self.index_col) in self._col_indices)]\n else:\n zipped_content = [a for i, a in enumerate(zipped_content)\n if i in self._col_indices]\n return zipped_content\n\n def _get_lines(self, rows=None):\n lines = self.buf\n new_rows = None\n\n # already fetched some number\n if rows is not None:\n # we already have the lines in the buffer\n if len(self.buf) >= rows:\n new_rows, self.buf = self.buf[:rows], self.buf[rows:]\n\n # need some lines\n else:\n rows -= len(self.buf)\n\n if new_rows is None:\n if isinstance(self.data, list):\n if self.pos > len(self.data):\n raise StopIteration\n if rows is None:\n new_rows = self.data[self.pos:]\n new_pos = len(self.data)\n else:\n new_rows = self.data[self.pos:self.pos + rows]\n new_pos = self.pos + rows\n\n # Check for stop rows. n.b.: self.skiprows is a set.\n if self.skiprows:\n new_rows = [row for i, row in enumerate(new_rows)\n if not self.skipfunc(i + self.pos)]\n\n lines.extend(new_rows)\n self.pos = new_pos\n\n else:\n new_rows = []\n try:\n if rows is not None:\n for _ in range(rows):\n new_rows.append(next(self.data))\n lines.extend(new_rows)\n else:\n rows = 0\n\n while True:\n new_row = self._next_iter_line(\n row_num=self.pos + rows + 1)\n rows += 1\n\n if new_row is not None:\n new_rows.append(new_row)\n\n except StopIteration:\n if self.skiprows:\n new_rows = [row for i, row in enumerate(new_rows)\n if not self.skipfunc(i + self.pos)]\n lines.extend(new_rows)\n if len(lines) == 0:\n raise\n self.pos += len(new_rows)\n\n self.buf = []\n else:\n lines = new_rows\n\n if self.skipfooter:\n lines = lines[:-self.skipfooter]\n\n lines = self._check_comments(lines)\n if self.skip_blank_lines:\n lines = self._remove_empty_lines(lines)\n lines = self._check_thousands(lines)\n return self._check_decimal(lines)\n\n\ndef _make_date_converter(date_parser=None, dayfirst=False,\n infer_datetime_format=False):\n def converter(*date_cols):\n if date_parser is None:\n strs = _concat_date_cols(date_cols)\n\n try:\n return tools.to_datetime(\n ensure_object(strs),\n utc=None,\n box=True,\n dayfirst=dayfirst,\n errors='ignore',\n infer_datetime_format=infer_datetime_format\n )\n except:\n return tools.to_datetime(\n parsing.try_parse_dates(strs, dayfirst=dayfirst))\n else:\n try:\n result = tools.to_datetime(\n date_parser(*date_cols), errors='ignore')\n if isinstance(result, datetime.datetime):\n raise Exception('scalar parser')\n return result\n except Exception:\n try:\n return tools.to_datetime(\n parsing.try_parse_dates(_concat_date_cols(date_cols),\n parser=date_parser,\n dayfirst=dayfirst),\n errors='ignore')\n except Exception:\n return generic_parser(date_parser, *date_cols)\n\n return converter\n\n\ndef _process_date_conversion(data_dict, converter, parse_spec,\n index_col, index_names, columns,\n keep_date_col=False):\n def _isindex(colspec):\n return ((isinstance(index_col, list) and\n colspec in index_col) or\n (isinstance(index_names, list) and\n colspec in index_names))\n\n new_cols = []\n new_data = {}\n\n orig_names = columns\n columns = list(columns)\n\n date_cols = set()\n\n if parse_spec is None or isinstance(parse_spec, bool):\n return data_dict, columns\n\n if isinstance(parse_spec, list):\n # list of column lists\n for colspec in parse_spec:\n if is_scalar(colspec):\n if isinstance(colspec, int) and colspec not in data_dict:\n colspec = orig_names[colspec]\n if _isindex(colspec):\n continue\n data_dict[colspec] = converter(data_dict[colspec])\n else:\n new_name, col, old_names = _try_convert_dates(\n converter, colspec, data_dict, orig_names)\n if new_name in data_dict:\n raise ValueError('New date column already in dict %s' %\n new_name)\n new_data[new_name] = col\n new_cols.append(new_name)\n date_cols.update(old_names)\n\n elif isinstance(parse_spec, dict):\n # dict of new name to column list\n for new_name, colspec in compat.iteritems(parse_spec):\n if new_name in data_dict:\n raise ValueError('Date column %s already in dict' %\n new_name)\n\n _, col, old_names = _try_convert_dates(converter, colspec,\n data_dict, orig_names)\n\n new_data[new_name] = col\n new_cols.append(new_name)\n date_cols.update(old_names)\n\n data_dict.update(new_data)\n new_cols.extend(columns)\n\n if not keep_date_col:\n for c in list(date_cols):\n data_dict.pop(c)\n new_cols.remove(c)\n\n return data_dict, new_cols\n\n\ndef _try_convert_dates(parser, colspec, data_dict, columns):\n colset = set(columns)\n colnames = []\n\n for c in colspec:\n if c in colset:\n colnames.append(c)\n elif isinstance(c, int) and c not in columns:\n colnames.append(columns[c])\n else:\n colnames.append(c)\n\n new_name = '_'.join(str(x) for x in colnames)\n to_parse = [data_dict[c] for c in colnames if c in data_dict]\n\n new_col = parser(*to_parse)\n return new_name, new_col, colnames\n\n\ndef _clean_na_values(na_values, keep_default_na=True):\n\n if na_values is None:\n if keep_default_na:\n na_values = _NA_VALUES\n else:\n na_values = set()\n na_fvalues = set()\n elif isinstance(na_values, dict):\n old_na_values = na_values.copy()\n na_values = {} # Prevent aliasing.\n\n # Convert the values in the na_values dictionary\n # into array-likes for further use. This is also\n # where we append the default NaN values, provided\n # that `keep_default_na=True`.\n for k, v in compat.iteritems(old_na_values):\n if not is_list_like(v):\n v = [v]\n\n if keep_default_na:\n v = set(v) | _NA_VALUES\n\n na_values[k] = v\n na_fvalues = {k: _floatify_na_values(v) for k, v in na_values.items()}\n else:\n if not is_list_like(na_values):\n na_values = [na_values]\n na_values = _stringify_na_values(na_values)\n if keep_default_na:\n na_values = na_values | _NA_VALUES\n\n na_fvalues = _floatify_na_values(na_values)\n\n return na_values, na_fvalues\n\n\ndef _clean_index_names(columns, index_col):\n if not _is_index_col(index_col):\n return None, columns, index_col\n\n columns = list(columns)\n\n cp_cols = list(columns)\n index_names = []\n\n # don't mutate\n index_col = list(index_col)\n\n for i, c in enumerate(index_col):\n if isinstance(c, compat.string_types):\n index_names.append(c)\n for j, name in enumerate(cp_cols):\n if name == c:\n index_col[i] = j\n columns.remove(name)\n break\n else:\n name = cp_cols[c]\n columns.remove(name)\n index_names.append(name)\n\n # hack\n if isinstance(index_names[0], compat.string_types)\\\n and 'Unnamed' in index_names[0]:\n index_names[0] = None\n\n return index_names, columns, index_col\n\n\ndef _get_empty_meta(columns, index_col, index_names, dtype=None):\n columns = list(columns)\n\n # Convert `dtype` to a defaultdict of some kind.\n # This will enable us to write `dtype[col_name]`\n # without worrying about KeyError issues later on.\n if not isinstance(dtype, dict):\n # if dtype == None, default will be np.object.\n default_dtype = dtype or np.object\n dtype = defaultdict(lambda: default_dtype)\n else:\n # Save a copy of the dictionary.\n _dtype = dtype.copy()\n dtype = defaultdict(lambda: np.object)\n\n # Convert column indexes to column names.\n for k, v in compat.iteritems(_dtype):\n col = columns[k] if is_integer(k) else k\n dtype[col] = v\n\n # Even though we have no data, the \"index\" of the empty DataFrame\n # could for example still be an empty MultiIndex. Thus, we need to\n # check whether we have any index columns specified, via either:\n #\n # 1) index_col (column indices)\n # 2) index_names (column names)\n #\n # Both must be non-null to ensure a successful construction. Otherwise,\n # we have to create a generic emtpy Index.\n if (index_col is None or index_col is False) or index_names is None:\n index = Index([])\n else:\n data = [Series([], dtype=dtype[name]) for name in index_names]\n index = ensure_index_from_sequences(data, names=index_names)\n index_col.sort()\n\n for i, n in enumerate(index_col):\n columns.pop(n - i)\n\n col_dict = {col_name: Series([], dtype=dtype[col_name])\n for col_name in columns}\n\n return index, columns, col_dict\n\n\ndef _floatify_na_values(na_values):\n # create float versions of the na_values\n result = set()\n for v in na_values:\n try:\n v = float(v)\n if not np.isnan(v):\n result.add(v)\n except:\n pass\n return result\n\n\ndef _stringify_na_values(na_values):\n \"\"\" return a stringified and numeric for these values \"\"\"\n result = []\n for x in na_values:\n result.append(str(x))\n result.append(x)\n try:\n v = float(x)\n\n # we are like 999 here\n if v == int(v):\n v = int(v)\n result.append(\"%s.0\" % v)\n result.append(str(v))\n\n result.append(v)\n except:\n pass\n try:\n result.append(int(x))\n except:\n pass\n return set(result)\n\n\ndef _get_na_values(col, na_values, na_fvalues, keep_default_na):\n \"\"\"\n Get the NaN values for a given column.\n\n Parameters\n ----------\n col : str\n The name of the column.\n na_values : array-like, dict\n The object listing the NaN values as strings.\n na_fvalues : array-like, dict\n The object listing the NaN values as floats.\n keep_default_na : bool\n If `na_values` is a dict, and the column is not mapped in the\n dictionary, whether to return the default NaN values or the empty set.\n\n Returns\n -------\n nan_tuple : A length-two tuple composed of\n\n 1) na_values : the string NaN values for that column.\n 2) na_fvalues : the float NaN values for that column.\n \"\"\"\n\n if isinstance(na_values, dict):\n if col in na_values:\n return na_values[col], na_fvalues[col]\n else:\n if keep_default_na:\n return _NA_VALUES, set()\n\n return set(), set()\n else:\n return na_values, na_fvalues\n\n\ndef _get_col_names(colspec, columns):\n colset = set(columns)\n colnames = []\n for c in colspec:\n if c in colset:\n colnames.append(c)\n elif isinstance(c, int):\n colnames.append(columns[c])\n return colnames\n\n\ndef _concat_date_cols(date_cols):\n if len(date_cols) == 1:\n if compat.PY3:\n return np.array([compat.text_type(x) for x in date_cols[0]],\n dtype=object)\n else:\n return np.array([\n str(x) if not isinstance(x, compat.string_types) else x\n for x in date_cols[0]\n ], dtype=object)\n\n rs = np.array([' '.join(compat.text_type(y) for y in x)\n for x in zip(*date_cols)], dtype=object)\n return rs\n\n\nclass FixedWidthReader(BaseIterator):\n \"\"\"\n A reader of fixed-width lines.\n \"\"\"\n\n def __init__(self, f, colspecs, delimiter, comment, skiprows=None):\n self.f = f\n self.buffer = None\n self.delimiter = '\\r\\n' + delimiter if delimiter else '\\n\\r\\t '\n self.comment = comment\n if colspecs == 'infer':\n self.colspecs = self.detect_colspecs(skiprows=skiprows)\n else:\n self.colspecs = colspecs\n\n if not isinstance(self.colspecs, (tuple, list)):\n raise TypeError(\"column specifications must be a list or tuple, \"\n \"input was a %r\" % type(colspecs).__name__)\n\n for colspec in self.colspecs:\n if not (isinstance(colspec, (tuple, list)) and\n len(colspec) == 2 and\n isinstance(colspec[0], (int, np.integer, type(None))) and\n isinstance(colspec[1], (int, np.integer, type(None)))):\n raise TypeError('Each column specification must be '\n '2 element tuple or list of integers')\n\n def get_rows(self, n, skiprows=None):\n \"\"\"\n Read rows from self.f, skipping as specified.\n\n We distinguish buffer_rows (the first <= n lines)\n from the rows returned to detect_colspecs because\n it's simpler to leave the other locations with\n skiprows logic alone than to modify them to deal\n with the fact we skipped some rows here as well.\n\n Parameters\n ----------\n n : int\n Number of rows to read from self.f, not counting\n rows that are skipped.\n skiprows: set, optional\n Indices of rows to skip.\n\n Returns\n -------\n detect_rows : list of str\n A list containing the rows to read.\n\n \"\"\"\n if skiprows is None:\n skiprows = set()\n buffer_rows = []\n detect_rows = []\n for i, row in enumerate(self.f):\n if i not in skiprows:\n detect_rows.append(row)\n buffer_rows.append(row)\n if len(detect_rows) >= n:\n break\n self.buffer = iter(buffer_rows)\n return detect_rows\n\n def detect_colspecs(self, n=100, skiprows=None):\n # Regex escape the delimiters\n delimiters = ''.join(r'\\%s' % x for x in self.delimiter)\n pattern = re.compile('([^%s]+)' % delimiters)\n rows = self.get_rows(n, skiprows)\n if not rows:\n raise EmptyDataError(\"No rows from which to infer column width\")\n max_len = max(map(len, rows))\n mask = np.zeros(max_len + 1, dtype=int)\n if self.comment is not None:\n rows = [row.partition(self.comment)[0] for row in rows]\n for row in rows:\n for m in pattern.finditer(row):\n mask[m.start():m.end()] = 1\n shifted = np.roll(mask, 1)\n shifted[0] = 0\n edges = np.where((mask ^ shifted) == 1)[0]\n edge_pairs = list(zip(edges[::2], edges[1::2]))\n return edge_pairs\n\n def __next__(self):\n if self.buffer is not None:\n try:\n line = next(self.buffer)\n except StopIteration:\n self.buffer = None\n line = next(self.f)\n else:\n line = next(self.f)\n # Note: 'colspecs' is a sequence of half-open intervals.\n return [line[fromm:to].strip(self.delimiter)\n for (fromm, to) in self.colspecs]\n\n\nclass FixedWidthFieldParser(PythonParser):\n \"\"\"\n Specialization that Converts fixed-width fields into DataFrames.\n See PythonParser for details.\n \"\"\"\n\n def __init__(self, f, **kwds):\n # Support iterators, convert to a list.\n self.colspecs = kwds.pop('colspecs')\n PythonParser.__init__(self, f, **kwds)\n\n def _make_reader(self, f):\n self.data = FixedWidthReader(f, self.colspecs, self.delimiter,\n self.comment, self.skiprows)\n"
] |
[
[
"pandas.core.index.ensure_index_from_sequences",
"pandas.errors.EmptyDataError",
"numpy.asarray",
"pandas.core.common.AbstractMethodError",
"pandas.core.dtypes.common.is_dtype_equal",
"pandas.errors.ParserError",
"pandas.compat.range",
"pandas.io.common._infer_compression",
"pandas.compat.map",
"pandas.compat.iteritems",
"pandas.io.common.is_file_like",
"pandas.core.dtypes.common.ensure_object",
"pandas.core.frame.DataFrame",
"numpy.where",
"numpy.roll",
"pandas.core.series.Series",
"pandas.compat.text_type",
"pandas._libs.lib.map_infer_mask",
"pandas.compat.StringIO",
"pandas.core.index.MultiIndex.from_tuples",
"pandas._libs.lib.map_infer",
"pandas.core.dtypes.common.is_float",
"pandas.core.dtypes.common.is_string_dtype",
"pandas.core.index.RangeIndex",
"numpy.zeros",
"pandas.core.dtypes.common.is_categorical_dtype",
"pandas.core.dtypes.common.is_list_like",
"pandas.core.dtypes.common.is_integer_dtype",
"pandas.util._decorators.Appender",
"pandas.compat.u",
"numpy.putmask",
"pandas.io.common.UnicodeReader",
"numpy.isnan",
"pandas._libs.tslibs.parsing.try_parse_dates",
"pandas.io.date_converters.generic_parser",
"pandas._libs.lib.to_object_array",
"pandas.compat.itervalues",
"pandas.io.common._validate_header_arg",
"pandas.core.dtypes.cast.astype_nansafe",
"pandas.io.common._get_handle",
"pandas.io.common.UTF8Recoder",
"pandas.core.dtypes.common.is_scalar",
"pandas.core.dtypes.common.is_integer",
"pandas._libs.parsers.TextReader",
"pandas.core.dtypes.common.is_object_dtype",
"pandas.io.common.get_filepath_or_buffer",
"pandas.compat.zip",
"pandas._libs.lib.is_bool",
"pandas.core.dtypes.missing.isna",
"pandas._libs.lib.infer_dtype",
"pandas.compat.lrange",
"pandas.core.index.Index"
]
] |
UT-Austin-RPL/BUDS
|
[
"6b5ae1864b50bb6212fae4fdfba4ffc8e74f2e85",
"6b5ae1864b50bb6212fae4fdfba4ffc8e74f2e85"
] |
[
"models/model_utils.py",
"multitask/generate_subgoal_embedding_singletask.py"
] |
[
"import torch\nimport torch.nn as nn\n\nfrom collections import namedtuple\n\nUSE_GPU = torch.cuda.is_available()\n\nModality_input = namedtuple(\"Modality_input\", [\"agentview\", \"eye_in_hand\", \"force\", \"proprio\", \"frontview\"])\nModality_output = namedtuple(\"Modality_output\", [\"agentview_recon\", \"eye_in_hand_recon\", \"contact\", 'proprio', 'frontview_recon'])\n\ndef safe_cuda(x):\n if USE_GPU:\n return x.cuda()\n return x\n\ndef product_of_experts(m_vect, v_vect):\n T_vect = 1.0 / v_vect\n\n mu = (m_vect * T_vect).sum(2) * (1 / T_vect.sum(2))\n var = 1 / T_vect.sum(2)\n\n return mu, var\n\n\nclass CausalConv1d(nn.Conv1d):\n \"\"\"\n Causal Convolution in 1d.\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride=1,\n dilation=1,\n bias=True):\n self._padding = (kernel_size - 1) * dilation\n\n super().__init__(in_channels,\n out_channels,\n kernel_size=kernel_size,\n stride=stride,\n padding=self._padding,\n dilation=dilation,\n bias=bias)\n\n def forward(self, x):\n y = super().forward(x)\n if self.padding != 0:\n return y[:, :, :-self._padding]\n return y\n\ndef conv2d_leakyrelu(in_channels, out_channels, kernel_size, stride=1, dilation=1, alpha=0.1, bias=True):\n\n dilated_kernel_size = (kernel_size - 1) * (dilation - 1) + kernel_size\n same_padding = (dilated_kernel_size - 1) // 2\n\n return nn.Sequential(\n nn.Conv2d(in_channels,\n out_channels,\n kernel_size=kernel_size,\n stride=stride,\n padding=same_padding,\n dilation=dilation,\n bias=bias),\n nn.LeakyReLU(alpha, inplace=True)\n )\n\ndef crop_like(inp, target):\n if inp.size()[2:] == target.size()[2:]:\n return inp\n else:\n return inp[:, :, :target.size(2), :target.size(3)]\n\ndef deconv(in_channels, out_channels):\n return nn.Sequential(\n nn.ConvTranspose2d(\n in_channels, out_channels, kernel_size=4, stride=2, padding=1, bias=False,\n ),\n nn.LeakyReLU(0.1, inplace=True))\n \n\nclass ForceEncoder(nn.Module):\n def __init__(self, force_dim, z_dim, alpha=0.1):\n super().__init__()\n self.force_dim = force_dim\n self.z_dim = z_dim\n\n self.layers = torch.nn.Sequential(CausalConv1d(self.force_dim, 32, kernel_size=2, stride=2),\n nn.LeakyReLU(alpha, inplace=True),\n # CausalConv1d(16, 32, kernel_size=2, stride=2),\n # nn.LeakyReLU(alpha, inplace=True),\n CausalConv1d(32, 64, kernel_size=2, stride=2),\n nn.LeakyReLU(alpha, inplace=True),\n CausalConv1d(64, 128, kernel_size=2, stride=2),\n nn.LeakyReLU(alpha, inplace=True),\n CausalConv1d(128, 2 * self.z_dim, kernel_size=2, stride=2),\n nn.LeakyReLU(alpha, inplace=True),\n )\n \n def forward(self, x):\n return torch.split(self.layers(x), self.z_dim, dim=1)\n\nclass ContactDecoder(nn.Module):\n def __init__(self, z_dim, alpha=0.1):\n super().__init__()\n self.z_dim = z_dim\n\n self.layers = torch.nn.Sequential(nn.Linear(z_dim, 256),\n nn.LeakyReLU(alpha, inplace=True),\n # nn.Linear(128, 128),\n # nn.LeakyReLU(alpha, inplace=True), \n nn.Linear(256, 1),\n torch.nn.Sigmoid(),\n )\n\n def forward(self, x):\n return self.layers(x)\n\nclass EEDeltaEncoder(nn.Module):\n def __init__(self, z_dim, ee_dim, alpha=0.1):\n super().__init__()\n self.z_dim = z_dim\n self.ee_dim = ee_dim\n self.layers = torch.nn.Sequential(nn.Linear(ee_dim, 256),\n nn.LeakyReLU(alpha, inplace=True),\n nn.Linear(256, 256),\n nn.LeakyReLU(alpha, inplace=True), \n nn.Linear(256, 2 * z_dim),\n )\n \n def forward(self, x):\n return torch.split(self.layers(x).unsqueeze(-1), self.z_dim, dim=1)\n \nclass EEDeltaDecoder(nn.Module):\n def __init__(self, z_dim, ee_dim, alpha=0.1):\n super().__init__()\n self.z_dim = z_dim\n self.ee_dim = ee_dim\n self.layers = torch.nn.Sequential(nn.Linear(z_dim, 128),\n nn.LeakyReLU(alpha, inplace=True),\n nn.Linear(128, 128),\n nn.LeakyReLU(alpha, inplace=True), \n nn.Linear(128, ee_dim),\n )\n \n def forward(self, x):\n return self.layers(x)\n \n# Image\nclass ImageEncoder(nn.Module):\n def __init__(self, z_dim):\n super().__init__()\n\n self.z_dim = z_dim\n\n self.conv1 = conv2d_leakyrelu(in_channels=3, out_channels=16, kernel_size=7, stride=2)\n self.conv2 = conv2d_leakyrelu(in_channels=16, out_channels=32, kernel_size=5, stride=2)\n self.conv3 = conv2d_leakyrelu(in_channels=32, out_channels=64, kernel_size=5, stride=2)\n self.conv4 = conv2d_leakyrelu(in_channels=64, out_channels=64, kernel_size=3, stride=2)\n self.conv5 = conv2d_leakyrelu(in_channels=64, out_channels=128, kernel_size=3, stride=2)\n self.conv6 = conv2d_leakyrelu(in_channels=128, out_channels=self.z_dim, kernel_size=3, stride=2)\n\n self.embedding_layer = nn.Linear(4 * self.z_dim, 2 * self.z_dim)\n\n \n # (Yifeng) Might need to try kaiming initialization\n \n def forward(self, x):\n out_conv1 = self.conv1(x)\n out_conv2 = self.conv2(out_conv1)\n out_conv3 = self.conv3(out_conv2)\n out_conv4 = self.conv4(out_conv3)\n out_conv5 = self.conv5(out_conv4)\n out_conv6 = self.conv6(out_conv5)\n\n out_convs = (out_conv1,\n out_conv2,\n out_conv3,\n out_conv4,\n out_conv5,\n out_conv6\n )\n\n img_out = self.embedding_layer(torch.flatten(out_conv6, start_dim=1)).unsqueeze(2)\n return img_out, out_convs\n\nclass ImageDecoder(nn.Module):\n def __init__(self, z_dim, use_skip_connection=True):\n super().__init__()\n\n self.z_dim = z_dim\n self.embedding_conv = conv2d_leakyrelu(self.z_dim, 64, kernel_size=1, stride=1)\n\n self.use_skip_connection = use_skip_connection\n\n if self.use_skip_connection:\n self.deconv6 = deconv(64, 64)\n self.deconv5 = deconv(64, 32)\n self.deconv4 = deconv(96, 32)\n self.deconv3 = deconv(96, 32)\n self.deconv2 = deconv(64, 32)\n self.deconv1 = deconv(48, 3)\n else:\n self.deconv6 = deconv(64, 64)\n self.deconv5 = deconv(64, 32)\n self.deconv4 = deconv(32, 32)\n self.deconv3 = deconv(32, 32)\n self.deconv2 = deconv(32, 32)\n self.deconv1 = deconv(32, 3)\n\n def forward(self, x, out_convs):\n out_conv1, out_conv2, out_conv3, out_conv4, out_conv5, out_conv6 = out_convs\n\n out_deconv_embedding = self.embedding_conv(x)\n\n if self.use_skip_connection:\n out_deconv6 = self.deconv6(out_deconv_embedding)\n out_deconv5 = self.deconv5(out_deconv6)\n out_deconv4 = self.deconv4(torch.cat((out_deconv5, out_conv4), 1))\n out_deconv3 = self.deconv3(torch.cat((out_deconv4, out_conv3), 1))\n out_deconv2 = self.deconv2(torch.cat((out_deconv3, out_conv2), 1))\n out_deconv1 = self.deconv1(torch.cat((out_deconv2, out_conv1), 1))\n\n else:\n out_deconv6 = self.deconv6(out_deconv_embedding)\n out_deconv5 = self.deconv5(out_deconv6)\n out_deconv4 = self.deconv4(out_deconv5)\n out_deconv3 = self.deconv3(out_deconv4)\n out_deconv2 = self.deconv2(out_deconv3)\n out_deconv1 = self.deconv1(out_deconv2)\n\n\n # print(out_deconv6.shape,\n # out_deconv5.shape,\n # out_deconv4.shape,\n # out_deconv3.shape,\n # out_deconv2.shape)\n # print(out_conv6.shape,\n # out_conv5.shape,\n # out_conv4.shape,\n # out_conv3.shape,\n # out_conv2.shape)\n\n return torch.sigmoid(out_deconv1)\n \n\nclass ImageVAE(torch.nn.Module):\n def __init__(self, z_dim=128, use_skip_connection=True):\n super().__init__()\n\n self.z_dim = z_dim\n self.encoder = ImageEncoder(z_dim=z_dim)\n self.decoder = ImageDecoder(z_dim=z_dim, use_skip_connection=use_skip_connection)\n\n def forward(self, x):\n h, convs = self.encoder(x)\n mu, logvar = torch.split(h, self.z_dim, dim=1)\n\n z = self.sampling(mu, logvar)\n out = self.decoder(z.view(z.size(0), self.z_dim, 1, 1).expand(-1, -1, 2, 2), convs)\n return out, mu, logvar\n\n def sampling(self, mu, logvar):\n std = torch.exp(0.5 * logvar)\n eps = torch.randn_like(mu)\n return eps * std + mu\n\nclass SensorFusion(torch.nn.Module):\n def __init__(self, z_dim=128, use_skip_connection=True, modalities=['frontview', 'eye_in_hand', 'force', 'proprio'], use_gmm=False, proprio_dim=5):\n super().__init__()\n\n self.use_gmm = use_gmm\n\n self.z_dim = z_dim\n\n self.modalities = modalities\n\n if \"frontview\" in modalities:\n self.encoder_frontview = ImageEncoder(z_dim=z_dim)\n self.decoder_frontview = ImageDecoder(z_dim=z_dim, use_skip_connection=use_skip_connection)\n\n self.encoder_agentview = ImageEncoder(z_dim=z_dim)\n self.decoder_agentview = ImageDecoder(z_dim=z_dim, use_skip_connection=use_skip_connection)\n \n self.encoder_eye_in_hand = ImageEncoder(z_dim=z_dim)\n self.decoder_eye_in_hand = ImageDecoder(z_dim=z_dim, use_skip_connection=use_skip_connection)\n\n self.encoder_force = ForceEncoder(force_dim=6, z_dim=z_dim)\n self.decoder_contact = ContactDecoder(z_dim=z_dim)\n\n self.encoder_proprio = EEDeltaEncoder(z_dim=z_dim, ee_dim=proprio_dim)\n self.decoder_proprio = EEDeltaDecoder(z_dim=z_dim, ee_dim=proprio_dim)\n\n self.z_prior_m = torch.nn.Parameter(\n torch.zeros(1, self.z_dim), requires_grad=False\n )\n self.z_prior_v = torch.nn.Parameter(\n torch.ones(1, self.z_dim), requires_grad=False\n )\n\n def forward(self, x, encoder_only=False):\n\n batch_dim = x[0].size(0)\n \n z_prior_m = (self.z_prior_m.expand(batch_dim, *self.z_prior_m.shape).reshape(-1, *self.z_prior_m.shape[1:])).unsqueeze(2)\n z_prior_v = (self.z_prior_v.expand(batch_dim, *self.z_prior_v.shape).reshape(-1, *self.z_prior_v.shape[1:])).unsqueeze(2)\n m_vect_list = [z_prior_m]\n v_vect_list = [z_prior_v]\n if 'agentview' in self.modalities:\n x_agentview = x.agentview\n h_agentview, convs_agentview = self.encoder_agentview(x_agentview)\n mu_agentview, var_h_agentview = torch.split(h_agentview, self.z_dim, dim=1)\n var_agentview = torch.nn.Softplus()(var_h_agentview) + 1e-6\n m_vect_list.append(mu_agentview)\n v_vect_list.append(var_agentview)\n\n if 'frontview' in self.modalities:\n x_frontview = x.frontview\n h_frontview, convs_frontview = self.encoder_frontview(x_frontview)\n mu_frontview, var_h_frontview = torch.split(h_frontview, self.z_dim, dim=1)\n var_frontview = torch.nn.Softplus()(var_h_frontview) + 1e-6\n m_vect_list.append(mu_frontview)\n v_vect_list.append(var_frontview)\n \n if 'eye_in_hand' in self.modalities:\n x_eye_in_hand = x.eye_in_hand\n h_eye_in_hand, convs_eye_in_hand = self.encoder_eye_in_hand(x_eye_in_hand)\n mu_eye_in_hand, var_h_eye_in_hand = torch.split(h_eye_in_hand, self.z_dim, dim=1)\n var_eye_in_hand = torch.nn.Softplus()(var_h_eye_in_hand) + 1e-6\n\n if 'force' in self.modalities:\n force = x.force\n mu_force, var_h_force = self.encoder_force(force)\n var_force = torch.nn.Softplus()(var_h_force) + 1e-6\n\n m_vect_list.append(mu_force)\n v_vect_list.append(var_force)\n\n if 'proprio' in self.modalities:\n proprio = x.proprio\n mu_proprio, var_h_proprio = self.encoder_proprio(proprio)\n var_proprio = torch.nn.Softplus()(var_h_proprio) + 1e-6\n\n m_vect_list.append(mu_proprio)\n v_vect_list.append(var_proprio)\n\n m_vect = torch.cat(m_vect_list, dim=2)\n v_vect = torch.cat(v_vect_list, dim=2)\n\n mu_z, var_z = product_of_experts(m_vect, v_vect)\n\n if encoder_only:\n return mu_z\n\n\n z = self.sampling(mu_z, var_z)\n # print(z.shape)\n # print(mu_force.shape)\n\n\n out_frontview = None\n out_agentview = None\n out_eye_in_hand = None\n out_contact = None\n out_proprio = None\n\n reshaped_z = z.view(batch_dim, self.z_dim, 1, 1).expand(-1, -1, 2, 2)\n if 'agentview' in self.modalities:\n out_agentview = self.decoder_agentview(reshaped_z, convs_agentview)\n \n if 'frontview' in self.modalities:\n out_frontview = self.decoder_frontview(reshaped_z, convs_frontview)\n\n if 'eye_in_hand' in self.modalities:\n out_eye_in_hand = self.decoder_eye_in_hand(reshaped_z, convs_eye_in_hand)\n\n if 'force' in self.modalities:\n out_contact = self.decoder_contact(z)\n\n if 'proprio' in self.modalities:\n out_proprio = self.decoder_proprio(z)\n \n\n output = Modality_output(frontview_recon=out_frontview,\n agentview_recon=out_agentview,\n eye_in_hand_recon=out_eye_in_hand,\n contact=out_contact,\n proprio=out_proprio)\n\n return output, mu_z, var_z, self.z_prior_m, self.z_prior_v\n\n # def forward_encoder(self, x_frontview, x_eye_in_hand):\n\n # z_prior_m = (self.z_prior_m.expand(x_frontview.size(0), *self.z_prior_m.shape).reshape(-1, *self.z_prior_m.shape[1:])).unsqueeze(2)\n # z_prior_v = (self.z_prior_v.expand(x_frontview.size(0), *self.z_prior_v.shape).reshape(-1, *self.z_prior_v.shape[1:])).unsqueeze(2)\n\n # h_frontview, convs_frontview = self.encoder_frontview(x_frontview)\n # h_eye_in_hand, convs_eye_in_hand = self.encoder_eye_in_hand(x_eye_in_hand)\n\n # mu_frontview, var_h_frontview = torch.split(h_frontview, self.z_dim, dim=1)\n # mu_eye_in_hand, var_h_eye_in_hand = torch.split(h_eye_in_hand, self.z_dim, dim=1)\n\n # var_frontview = torch.nn.Softplus()(var_h_frontview)\n # var_eye_in_hand = torch.nn.Softplus()(var_h_eye_in_hand)\n\n # m_vect = torch.cat([mu_frontview, mu_eye_in_hand, z_prior_m], dim=2)\n # v_vect = torch.cat([var_frontview, var_eye_in_hand, z_prior_v], dim=2)\n\n # mu_z, var_z = product_of_experts(m_vect, v_vect)\n\n # z = self.sampling(mu_z, var_z)\n # return z\n\n def sampling(self, mu, var):\n std = torch.sqrt(var)\n eps = torch.randn_like(mu)\n return eps * std + mu\n\nclass GMM():\n def __init__(self, z_dim, num_components=10):\n self.num_components = num_components\n self.mean_list = []\n self.var_list = []\n for i in range(self.num_components):\n self.mean_list.append(i * 0.1)\n self.logvar_list.append(0.1)\n\n self.mu = safe_cuda(torch.stack(self.mean_list, dim=0))\n self.var = safe_cuda(torch.stack(self.var_list, dim=0))\n \n\nclass SensorFusionGMM(torch.nn.Module):\n def __init__(self, z_dim=128, use_skip_connection=True, modalities=['frontview', 'eye_in_hand', 'force', 'proprio'], num_components=10):\n super().__init__()\n\n self.use_gmm = use_gmm\n self.gmm = GMM(z_dim=z_dim,\n num_components=num_components)\n\n self.z_dim = z_dim\n\n self.modalities = modalities\n\n if \"frontview\" in modalities:\n self.encoder_frontview = ImageEncoder(z_dim=z_dim)\n self.decoder_frontview = ImageDecoder(z_dim=z_dim, use_skip_connection=use_skip_connection)\n\n self.encoder_agentview = ImageEncoder(z_dim=z_dim)\n self.decoder_agentview = ImageDecoder(z_dim=z_dim, use_skip_connection=use_skip_connection)\n \n self.encoder_eye_in_hand = ImageEncoder(z_dim=z_dim)\n self.decoder_eye_in_hand = ImageDecoder(z_dim=z_dim, use_skip_connection=use_skip_connection)\n\n self.encoder_force = ForceEncoder(force_dim=6, z_dim=z_dim)\n self.decoder_contact = ContactDecoder(z_dim=z_dim)\n\n self.encoder_proprio = EEDeltaEncoder(z_dim=z_dim, ee_dim=6)\n self.decoder_proprio = EEDeltaDecoder(z_dim=z_dim, ee_dim=6)\n\n self.z_prior_m = torch.nn.Parameter(\n torch.zeros(1, self.z_dim), requires_grad=False\n )\n self.z_prior_v = torch.nn.Parameter(\n torch.ones(1, self.z_dim), requires_grad=False\n )\n\n def forward(self, x, encoder_only=False):\n\n batch_dim = x[0].size(0)\n \n z_prior_m = (self.z_prior_m.expand(batch_dim, *self.z_prior_m.shape).reshape(-1, *self.z_prior_m.shape[1:])).unsqueeze(2)\n z_prior_v = (self.z_prior_v.expand(batch_dim, *self.z_prior_v.shape).reshape(-1, *self.z_prior_v.shape[1:])).unsqueeze(2)\n m_vect_list = [z_prior_m]\n v_vect_list = [z_prior_v]\n if 'agentview' in self.modalities:\n x_agentview = x.agentview\n h_agentview, convs_agentview = self.encoder_agentview(x_agentview)\n mu_agentview, var_h_agentview = torch.split(h_agentview, self.z_dim, dim=1)\n var_agentview = torch.nn.Softplus()(var_h_agentview) + 1e-6\n m_vect_list.append(mu_agentview)\n v_vect_list.append(var_agentview)\n\n if 'frontview' in self.modalities:\n x_frontview = x.frontview\n h_frontview, convs_frontview = self.encoder_frontview(x_frontview)\n mu_frontview, var_h_frontview = torch.split(h_frontview, self.z_dim, dim=1)\n var_frontview = torch.nn.Softplus()(var_h_frontview) + 1e-6\n m_vect_list.append(mu_frontview)\n v_vect_list.append(var_frontview)\n \n if 'eye_in_hand' in self.modalities:\n x_eye_in_hand = x.eye_in_hand\n h_eye_in_hand, convs_eye_in_hand = self.encoder_eye_in_hand(x_eye_in_hand)\n mu_eye_in_hand, var_h_eye_in_hand = torch.split(h_eye_in_hand, self.z_dim, dim=1)\n var_eye_in_hand = torch.nn.Softplus()(var_h_eye_in_hand) + 1e-6\n\n if 'force' in self.modalities:\n force = x.force\n mu_force, var_h_force = self.encoder_force(force)\n var_force = torch.nn.Softplus()(var_h_force) + 1e-6\n\n m_vect_list.append(mu_force)\n v_vect_list.append(var_force)\n\n if 'proprio' in self.modalities:\n proprio = x.proprio\n mu_proprio, var_h_proprio = self.encoder_proprio(proprio)\n var_proprio = torch.nn.Softplus()(var_h_proprio) + 1e-6\n\n m_vect_list.append(mu_proprio)\n v_vect_list.append(var_proprio)\n\n m_vect = torch.cat(m_vect_list, dim=2)\n v_vect = torch.cat(v_vect_list, dim=2)\n\n mu_z, var_z = product_of_experts(m_vect, v_vect)\n\n if encoder_only:\n return mu_z\n\n\n z = self.sampling(mu_z, var_z)\n\n out_frontview = None\n out_agentview = None\n out_eye_in_hand = None\n out_contact = None\n out_proprio = None\n\n reshaped_z = z.view(batch_dim, self.z_dim, 1, 1).expand(-1, -1, 2, 2)\n if 'agentview' in self.modalities:\n out_agentview = self.decoder_agentview(reshaped_z, convs_agentview)\n \n if 'frontview' in self.modalities:\n out_frontview = self.decoder_frontview(reshaped_z, convs_frontview)\n\n if 'eye_in_hand' in self.modalities:\n out_eye_in_hand = self.decoder_eye_in_hand(reshaped_z, convs_eye_in_hand)\n\n if 'force' in self.modalities:\n out_contact = self.decoder_contact(z)\n\n if 'proprio' in self.modalities:\n out_proprio = self.decoder_proprio(z)\n \n\n output = Modality_output(frontview_recon=out_frontview,\n agentview_recon=out_agentview,\n eye_in_hand_recon=out_eye_in_hand,\n contact=out_contact,\n proprio=out_proprio)\n\n return output, z, mu_z, var_z, self.z_prior_m, self.z_prior_v\n\n def sampling(self, mu, var):\n std = torch.sqrt(var)\n eps = torch.randn_like(mu)\n return eps * std + mu\n \n \nif __name__ == \"__main__\":\n force_encoder = ForceEncoder(force_dim=6, z_dim=128)\n\n inp = torch.randn(1, 6, 32) \n y = force_encoder(inp)\n print(y.shape)\n\n img_encoder = ImageEncoder(z_dim=128)\n img_decoder = ImageDecoder(z_dim=128)\n inp = torch.randn(1, 3, 128, 128)\n img_out, img_out_convs = img_encoder(inp)\n\n embedding = torch.split(img_out, 128, dim=1)[0].view(img_out.size(0), 128, 1, 1).expand(-1, -1, 2, 2)\n reconstruct_inp = img_decoder(embedding, img_out_convs)\n print(reconstruct_inp.shape)\n\n # TODO Depth encoder + decoder\n\n # Contact\n",
"import torch\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms\nfrom PIL import Image\nimport h5py\nimport numpy as np\nimport cv2\nimport os\n\nimport argparse\nimport init_path\nfrom models.model_utils import safe_cuda\nfrom models.conf_utils import *\nfrom models.torch_utils import *\nfrom goal_skill_learning.models import *\nfrom goal_skill_learning.datasets import *\nfrom goal_skill_learning.hydra_path_templates import *\n\ndef get_subtask_label(idx, saved_ep_subtasks_seq, horizon):\n for (start_idx, end_idx, subtask_label) in saved_ep_subtasks_seq:\n if start_idx <= idx <= end_idx:\n return min(end_idx, idx + horizon), subtask_label\n\nimport hydra\nfrom omegaconf import OmegaConf, DictConfig\nimport yaml\nfrom easydict import EasyDict\nfrom hydra.experimental import compose, initialize\n\[email protected](config_path=\"../conf\", config_name=\"config\")\ndef main(hydra_cfg):\n # args = get_common_args(training=True)\n # cfg = update_json_config(args)\n\n yaml_config = OmegaConf.to_yaml(hydra_cfg, resolve=True)\n cfg = EasyDict(yaml.load(yaml_config))\n \n folder_path = \"./\"\n \n modalities = cfg.repr.modalities \n modality_str = get_modalities_str(cfg)\n goal_str = get_goal_str(cfg)\n\n out_parent_dir = f\"datasets/{cfg.data.dataset_name}\"\n\n demo_file_name = os.path.join(out_parent_dir, \"demo.hdf5\")\n demo_file = h5py.File(demo_file_name, \"r\")\n num_eps = demo_file[\"data\"].attrs[\"num_eps\"]\n\n action_dim = len(demo_file[\"data/ep_0/actions\"][()][0])\n\n proprio_dim = 0\n if cfg.skill_training.use_gripper:\n proprio_dim += len(demo_file[\"data/ep_0/gripper_states\"][()][0]) * 5\n if cfg.skill_training.use_joints:\n proprio_dim += len(demo_file[\"data/ep_0/joint_states\"][()][0])\n\n if cfg.multitask.training_task_id == -1:\n subtask_file_name = folder_path+f\"results/skill_data/{cfg.data.dataset_name}_subtasks_{modality_str}_{cfg.repr.z_dim}_{cfg.agglomoration.footprint}_{cfg.agglomoration.dist}_{cfg.agglomoration.segment_footprint}_K{cfg.skill_training.agglomoration.K}_{cfg.agglomoration.affinity}.hdf5\"\n else:\n subtask_file_name = folder_path+f\"results/skill_data/{cfg.data.dataset_name}_SingleTask_{cfg.multitask.training_task_id}_subtasks_{modality_str}_{cfg.repr.z_dim}_{cfg.agglomoration.footprint}_{cfg.agglomoration.dist}_{cfg.agglomoration.segment_footprint}_K{cfg.skill_training.agglomoration.K}_{cfg.agglomoration.affinity}.hdf5\" \n subtask_file = h5py.File(subtask_file_name, \"r\")\n num_subtask = subtask_file[\"subtasks\"].attrs[\"num_subtasks\"]\n\n subgoal_embedding_file_name = singletask_subgoal_embedding_path_template(cfg, modality_str)\n \n subgoal_embedding_file = h5py.File(subgoal_embedding_file_name, \"w\")\n\n networks = {}\n if cfg.skill_subgoal_cfg is not None:\n subgoal_visual_feature_dimension = cfg.skill_subgoal_cfg.visual_feature_dimension\n else:\n raise ValueError\n\n if cfg.skill_training.policy_type == \"no_subgoal\":\n policy_type = PolicyType.NO_SUBGOAL\n elif cfg.skill_training.policy_type == \"normal_subgoal\":\n policy_type = PolicyType.NORMAL_SUBGOAL\n if cfg.skill_training.policy_type == \"normal_subgoal\":\n for i in range(num_subtask):\n template = single_subskill_path_template(cfg, subtask_id=i, use_cvae=cfg.skill_cvae_cfg.enable)\n output_dir = template.output_dir\n model_checkpoint_name = template.model_checkpoint_name\n network_state_dict, network_cfg = torch_load_model(model_checkpoint_name)\n\n print(model_checkpoint_name)\n # print(network_cfg)\n\n if cfg.skill_cvae_cfg.enable:\n network = safe_cuda(BCVAEPolicy(action_dim=action_dim,\n state_dim=network_cfg.skill_training.state_dim,\n proprio_dim=proprio_dim,\n data_modality=network_cfg.skill_training.data_modality,\n use_eye_in_hand=network_cfg.skill_training.use_eye_in_hand,\n use_subgoal_eye_in_hand=network_cfg.skill_subgoal_cfg.use_eye_in_hand,\n activation=network_cfg.skill_training.activation,\n z_dim=network_cfg.repr.z_dim,\n num_kp=network_cfg.skill_training.num_kp,\n img_h=network_cfg.skill_training.img_h,\n img_w=network_cfg.skill_training.img_w,\n visual_feature_dimension=network_cfg.skill_training.visual_feature_dimension,\n subgoal_visual_feature_dimension=subgoal_visual_feature_dimension,\n action_squash=network_cfg.skill_training.action_squash,\n policy_layer_dims=network_cfg.skill_training.policy_layer_dims,\n policy_type=policy_type,\n subgoal_type=network_cfg.skill_subgoal_cfg.subgoal_type,\n latent_dim=network_cfg.skill_cvae_cfg.latent_dim,\n ))\n elif cfg.skill_training.use_rnn:\n print(\"Using BC RNN\")\n network = safe_cuda(BCRNNPolicy(action_dim=action_dim,\n proprio_dim=proprio_dim,\n data_modality=network_cfg.skill_training.data_modality,\n use_eye_in_hand=network_cfg.skill_training.use_eye_in_hand,\n use_subgoal_eye_in_hand=network_cfg.skill_subgoal_cfg.use_eye_in_hand,\n activation=network_cfg.skill_training.activation,\n z_dim=network_cfg.repr.z_dim,\n num_kp=network_cfg.skill_training.num_kp,\n img_h=network_cfg.skill_training.img_h,\n img_w=network_cfg.skill_training.img_w,\n visual_feature_dimension=network_cfg.skill_training.visual_feature_dimension,\n subgoal_visual_feature_dimension=subgoal_visual_feature_dimension,\n action_squash=network_cfg.skill_training.action_squash, \n policy_layer_dims=network_cfg.skill_training.policy_layer_dims,\n policy_type=policy_type,\n subgoal_type=network_cfg.skill_subgoal_cfg.subgoal_type,\n rnn_num_layers=network_cfg.skill_training.rnn_num_layers,\n rnn_hidden_dim=network_cfg.skill_training.rnn_hidden_dim,\n rnn_horizon=network_cfg.skill_subgoal_cfg.horizon))\n \n else:\n network = safe_cuda(BCPolicy(action_dim=action_dim,\n state_dim=network_cfg.skill_training.state_dim,\n proprio_dim=proprio_dim,\n data_modality=network_cfg.skill_training.data_modality,\n use_eye_in_hand=network_cfg.skill_training.use_eye_in_hand,\n use_subgoal_eye_in_hand=network_cfg.skill_subgoal_cfg.use_eye_in_hand,\n use_subgoal_spatial_softmax=network_cfg.skill_subgoal_cfg.use_spatial_softmax, \n activation=network_cfg.skill_training.activation,\n z_dim=network_cfg.repr.z_dim,\n num_kp=network_cfg.skill_training.num_kp,\n img_h=network_cfg.skill_training.img_h,\n img_w=network_cfg.skill_training.img_w,\n visual_feature_dimension=network_cfg.skill_training.visual_feature_dimension,\n subgoal_visual_feature_dimension=subgoal_visual_feature_dimension,\n action_squash=network_cfg.skill_training.action_squash,\n policy_layer_dims=network_cfg.skill_training.policy_layer_dims,\n policy_type=policy_type,\n subgoal_type=cfg.skill_subgoal_cfg.subgoal_type))\n network.load_state_dict(network_state_dict)\n networks[i] = network\n\n grp = subgoal_embedding_file.create_group(\"data\")\n for ep_idx in range(num_eps):\n # Generate embedding\n if f\"ep_subtasks_seq_{ep_idx}\" not in subtask_file[\"subtasks\"]:\n print(f\"Skipping {ep_idx}\")\n continue\n saved_ep_subtasks_seq = subtask_file[\"subtasks\"][f\"ep_subtasks_seq_{ep_idx}\"][()]\n agentview_image_names = demo_file[f\"data/ep_{ep_idx}/agentview_image_names\"][()]\n eye_in_hand_image_names = demo_file[f\"data/ep_{ep_idx}/eye_in_hand_image_names\"][()]\n\n embeddings = []\n print(\"Ep: \", ep_idx)\n for i in range(len(agentview_image_names)):\n future_idx, subtask_label = get_subtask_label(i, saved_ep_subtasks_seq, horizon=cfg.skill_subgoal_cfg.horizon)\n agentview_image = safe_cuda(torch.from_numpy(np.array(Image.open(agentview_image_names[future_idx])).transpose(2, 0, 1)).unsqueeze(0)).float() / 255.\n eye_in_hand_image = safe_cuda(torch.from_numpy(np.array(Image.open(eye_in_hand_image_names[future_idx])).transpose(2, 0, 1)).unsqueeze(0)).float() / 255.\n\n if network_cfg.skill_subgoal_cfg.use_eye_in_hand:\n state_image = torch.cat([agentview_image, eye_in_hand_image], dim=1)\n else:\n state_image = agentview_image\n embedding = networks[subtask_label].get_embedding(state_image).detach().cpu().numpy().squeeze()\n embeddings.append(embedding)\n\n if ep_idx % 10 == 0:\n for (start_idx, end_idx, subtask_label) in saved_ep_subtasks_seq:\n print(f\"Subtask: {subtask_label}\")\n print(np.round(embeddings[start_idx], 2))\n print(np.round(embeddings[end_idx], 2))\n \n ep_data_grp = grp.create_group(f\"ep_{ep_idx}\")\n ep_data_grp.create_dataset(\"embedding\", data=np.stack(embeddings, axis=0))\n\n grp.attrs[\"embedding_dim\"] = len(embeddings[-1])\n subtask_file.close()\n demo_file.close()\n subgoal_embedding_file.close()\n \n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"torch.randn_like",
"torch.sigmoid",
"torch.ones",
"torch.nn.Softplus",
"torch.nn.ConvTranspose2d",
"torch.cat",
"torch.sqrt",
"torch.randn",
"torch.zeros",
"torch.nn.Conv2d",
"torch.nn.Sigmoid",
"torch.exp",
"torch.nn.Linear",
"torch.flatten",
"torch.nn.LeakyReLU",
"torch.cuda.is_available",
"torch.split",
"torch.stack"
],
[
"numpy.round",
"numpy.stack",
"torch.cat"
]
] |
grassking100/optuna
|
[
"3075a1cf6648b3a8f061f904177734a08bb3a3c3"
] |
[
"examples/pytorch_simple.py"
] |
[
"\"\"\"\nOptuna example that optimizes multi-layer perceptrons using PyTorch.\n\nIn this example, we optimize the validation accuracy of hand-written digit recognition using\nPyTorch and MNIST. We optimize the neural network architecture as well as the optimizer\nconfiguration. As it is too time consuming to use the whole MNIST dataset, we here use a small\nsubset of it.\n\nWe have the following two ways to execute this example:\n\n(1) Execute this code directly.\n $ python pytorch_simple.py\n\n\n(2) Execute through CLI.\n $ STUDY_NAME=`optuna create-study --direction maximize --storage sqlite:///example.db`\n $ optuna study optimize pytorch_simple.py objective --n-trials=100 --study $STUDY_NAME \\\n --storage sqlite:///example.db\n\n\"\"\"\n\nimport os\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.utils.data\nfrom torchvision import datasets\nfrom torchvision import transforms\n\nimport optuna\n\nDEVICE = torch.device(\"cpu\")\nBATCHSIZE = 128\nCLASSES = 10\nDIR = os.getcwd()\nEPOCHS = 10\nLOG_INTERVAL = 10\nN_TRAIN_EXAMPLES = BATCHSIZE * 30\nN_TEST_EXAMPLES = BATCHSIZE * 10\n\n\ndef define_model(trial):\n # We optimize the number of layers, hidden untis and dropout ratio in each layer.\n n_layers = trial.suggest_int(\"n_layers\", 1, 3)\n layers = []\n\n in_features = 28 * 28\n for i in range(n_layers):\n out_features = trial.suggest_int(\"n_units_l{}\".format(i), 4, 128)\n layers.append(nn.Linear(in_features, out_features))\n layers.append(nn.ReLU())\n p = trial.suggest_uniform(\"dropout_l{}\".format(i), 0.2, 0.5)\n layers.append(nn.Dropout(p))\n\n in_features = out_features\n layers.append(nn.Linear(in_features, CLASSES))\n layers.append(nn.LogSoftmax(dim=1))\n\n return nn.Sequential(*layers)\n\n\ndef get_mnist():\n # Load MNIST dataset.\n train_loader = torch.utils.data.DataLoader(\n datasets.MNIST(DIR, train=True, download=True, transform=transforms.ToTensor()),\n batch_size=BATCHSIZE,\n shuffle=True,\n )\n test_loader = torch.utils.data.DataLoader(\n datasets.MNIST(DIR, train=False, transform=transforms.ToTensor()),\n batch_size=BATCHSIZE,\n shuffle=True,\n )\n\n return train_loader, test_loader\n\n\ndef objective(trial):\n\n # Generate the model.\n model = define_model(trial).to(DEVICE)\n\n # Generate the optimizers.\n optimizer_name = trial.suggest_categorical(\"optimizer\", [\"Adam\", \"RMSprop\", \"SGD\"])\n lr = trial.suggest_uniform(\"lr\", 1e-5, 1e-1)\n optimizer = getattr(optim, optimizer_name)(model.parameters(), lr=lr)\n\n # Get the MNIST dataset.\n train_loader, test_loader = get_mnist()\n\n # Training of the model.\n model.train()\n for epoch in range(EPOCHS):\n for batch_idx, (data, target) in enumerate(train_loader):\n # Limiting training data for faster epochs.\n if batch_idx * BATCHSIZE >= N_TRAIN_EXAMPLES:\n break\n\n data, target = data.view(-1, 28 * 28).to(DEVICE), target.to(DEVICE)\n\n # Zeroing out gradient buffers.\n optimizer.zero_grad()\n # Performing a forward pass.\n output = model(data)\n # Computing negative Log Likelihood loss.\n loss = F.nll_loss(output, target)\n # Performing a backward pass.\n loss.backward()\n # Updating the weights.\n optimizer.step()\n\n # Validation of the model.\n model.eval()\n correct = 0\n with torch.no_grad():\n for batch_idx, (data, target) in enumerate(test_loader):\n # Limiting testing data.\n if batch_idx * BATCHSIZE >= N_TEST_EXAMPLES:\n break\n data, target = data.view(-1, 28 * 28).to(DEVICE), target.to(DEVICE)\n output = model(data)\n pred = output.argmax(dim=1, keepdim=True) # Get the index of the max log-probability.\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n accuracy = correct / N_TEST_EXAMPLES\n return accuracy\n\n\nif __name__ == \"__main__\":\n study = optuna.create_study(direction=\"maximize\")\n study.optimize(objective, n_trials=100)\n\n print(\"Number of finished trials: \", len(study.trials))\n\n print(\"Best trial:\")\n trial = study.best_trial\n\n print(\" Value: \", trial.value)\n\n print(\" Params: \")\n for key, value in trial.params.items():\n print(\" {}: {}\".format(key, value))\n"
] |
[
[
"torch.nn.Sequential",
"torch.nn.Dropout",
"torch.nn.LogSoftmax",
"torch.nn.functional.nll_loss",
"torch.nn.Linear",
"torch.no_grad",
"torch.device",
"torch.nn.ReLU"
]
] |
ZhouKai90/object_detection_ssd_caffe
|
[
"47ae0ab8a6dff53f46f6d6be46a17479471f444e"
] |
[
"demo/python/merge_bn_layers.py"
] |
[
"import numpy as np\nimport sys, os\nimport caffe\n\ntrain_proto = os.path.join(os.getcwd(), '../../', 'models/deploy/ssd_vgg16_512.prototxt')\ntrain_model = os.path.join(os.getcwd(), '../../', 'models/deploy/ssd_vgg16_512.prototxt')\n\ndeploy_proto = os.path.join(os.getcwd(), '../../', 'models/deploy/deploy_ssd_vgg16_512.prototxt')\ndeploy_model = os.path.join(os.getcwd(), '../../', 'models/deploy/deploy_ssd_vgg16_512.caffemodel')\n\n\ndef merge_bn(net, nob):\n '''\n merge the batchnorm, scale layer weights to the conv layer, to improve the performance\n var = var + scaleFacotr\n rstd = 1. / sqrt(var + eps)\n w = w * rstd * scale\n b = (b - mean) * rstd * scale + shift\n '''\n for key in net.params.iterkeys():\n if type(net.params[key]) is caffe._caffe.BlobVec:\n if key.endswith(\"/bn\") or key.endswith(\"/scale\"):\n continue\n else:\n conv = net.params[key]\n if not net.params.has_key(key + \"/bn\"):\n for i, w in enumerate(conv):\n nob.params[key][i].data[...] = w.data\n else:\n bn = net.params[key + \"/bn\"]\n scale = net.params[key + \"/scale\"]\n wt = conv[0].data\n channels = wt.shape[0]\n bias = np.zeros(wt.shape[0])\n if len(conv) > 1:\n bias = conv[1].data\n mean = bn[0].data\n var = bn[1].data\n scalef = bn[2].data\n\n scales = scale[0].data\n shift = scale[1].data\n\n if scalef != 0:\n scalef = 1. / scalef\n mean = mean * scalef\n var = var * scalef\n rstd = 1. / np.sqrt(var + 1e-5)\n rstd1 = rstd.reshape((channels, 1, 1, 1))\n scales1 = scales.reshape((channels, 1, 1, 1))\n wt = wt * rstd1 * scales1\n bias = (bias - mean) * rstd * scales + shift\n\n nob.params[key][0].data[...] = wt\n nob.params[key][1].data[...] = bias\n\n\nif __name__ == '__main__':\n net = caffe.Net(train_proto, train_model, caffe.TRAIN)\n net_deploy = caffe.Net(deploy_proto, caffe.TEST)\n\n merge_bn(net, net_deploy)\n net_deploy.save(deploy_model)"
] |
[
[
"numpy.zeros",
"numpy.sqrt"
]
] |
sokian/tensorflow
|
[
"359f53686c87ee76e80353c32a3d22cfb1cf0989"
] |
[
"tensorflow/python/kernel_tests/distributions/util_test.py"
] |
[
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for utility functions.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport importlib\n\nimport numpy as np\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gradient_checker\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops.distributions import util as distribution_util\nimport tensorflow.python.ops.nn_grad # pylint: disable=unused-import\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.platform import tf_logging\n\ndu = distribution_util\n\n\ndef try_import(name): # pylint: disable=invalid-name\n module = None\n try:\n module = importlib.import_module(name)\n except ImportError as e:\n tf_logging.warning(\"Could not import %s: %s\" % (name, str(e)))\n return module\n\n\nspecial = try_import(\"scipy.special\")\n\n\ndef _logit(x):\n x = np.asarray(x)\n return np.log(x) - np.log1p(-x)\n\n\nclass AssertCloseTest(test.TestCase):\n\n def testAssertIntegerForm(self):\n # This should only be detected as an integer.\n x = array_ops.placeholder(dtypes.float32)\n y = array_ops.placeholder(dtypes.float32)\n # First component isn't less than float32.eps = 1e-7\n z = array_ops.placeholder(dtypes.float32)\n # This shouldn\"t be detected as an integer.\n w = array_ops.placeholder(dtypes.float32)\n feed_dict = {x: [1., 5, 10, 15, 20], y: [1.1, 5, 10, 15, 20],\n z: [1.0001, 5, 10, 15, 20], w: [1e-8, 5, 10, 15, 20]}\n with self.test_session():\n with ops.control_dependencies([du.assert_integer_form(x)]):\n array_ops.identity(x).eval(feed_dict=feed_dict)\n\n with self.assertRaisesOpError(\"has non-integer components\"):\n with ops.control_dependencies(\n [du.assert_integer_form(y)]):\n array_ops.identity(y).eval(feed_dict=feed_dict)\n\n with self.assertRaisesOpError(\"has non-integer components\"):\n with ops.control_dependencies(\n [du.assert_integer_form(z)]):\n array_ops.identity(z).eval(feed_dict=feed_dict)\n\n with self.assertRaisesOpError(\"has non-integer components\"):\n with ops.control_dependencies(\n [du.assert_integer_form(w)]):\n array_ops.identity(w).eval(feed_dict=feed_dict)\n\n\nclass MaybeGetStaticTest(test.TestCase):\n\n @test_util.run_in_graph_and_eager_modes()\n def testGetStaticInt(self):\n x = 2\n self.assertEqual(x, du.maybe_get_static_value(x))\n self.assertAllClose(\n np.array(2.), du.maybe_get_static_value(x, dtype=np.float64))\n\n @test_util.run_in_graph_and_eager_modes()\n def testGetStaticNumpyArray(self):\n x = np.array(2, dtype=np.int32)\n self.assertEqual(x, du.maybe_get_static_value(x))\n self.assertAllClose(\n np.array(2.), du.maybe_get_static_value(x, dtype=np.float64))\n\n @test_util.run_in_graph_and_eager_modes()\n def testGetStaticConstant(self):\n x = constant_op.constant(2, dtype=dtypes.int32)\n self.assertEqual(np.array(2, dtype=np.int32), du.maybe_get_static_value(x))\n self.assertAllClose(\n np.array(2.), du.maybe_get_static_value(x, dtype=np.float64))\n\n def testGetStaticPlaceholder(self):\n x = array_ops.placeholder(dtype=dtypes.int32, shape=[1])\n self.assertEqual(None, du.maybe_get_static_value(x))\n self.assertEqual(None, du.maybe_get_static_value(x, dtype=np.float64))\n\n\nclass GetLogitsAndProbsTest(test.TestCase):\n\n @test_util.run_in_graph_and_eager_modes()\n def testImproperArguments(self):\n with self.test_session():\n with self.assertRaises(ValueError):\n du.get_logits_and_probs(logits=None, probs=None)\n\n with self.assertRaises(ValueError):\n du.get_logits_and_probs(logits=[0.1], probs=[0.1])\n\n @test_util.run_in_graph_and_eager_modes()\n def testLogits(self):\n p = np.array([0.01, 0.2, 0.5, 0.7, .99], dtype=np.float32)\n logits = _logit(p)\n\n with self.test_session():\n new_logits, new_p = du.get_logits_and_probs(\n logits=logits, validate_args=True)\n\n self.assertAllClose(p, self.evaluate(new_p), rtol=1e-5, atol=0.)\n self.assertAllClose(logits, self.evaluate(new_logits), rtol=1e-5, atol=0.)\n\n @test_util.run_in_graph_and_eager_modes()\n def testLogitsMultidimensional(self):\n p = np.array([0.2, 0.3, 0.5], dtype=np.float32)\n logits = np.log(p)\n\n with self.test_session():\n new_logits, new_p = du.get_logits_and_probs(\n logits=logits, multidimensional=True, validate_args=True)\n\n self.assertAllClose(self.evaluate(new_p), p)\n self.assertAllClose(self.evaluate(new_logits), logits)\n\n @test_util.run_in_graph_and_eager_modes()\n def testProbability(self):\n p = np.array([0.01, 0.2, 0.5, 0.7, .99], dtype=np.float32)\n\n with self.test_session():\n new_logits, new_p = du.get_logits_and_probs(\n probs=p, validate_args=True)\n\n self.assertAllClose(_logit(p), self.evaluate(new_logits))\n self.assertAllClose(p, self.evaluate(new_p))\n\n @test_util.run_in_graph_and_eager_modes()\n def testProbabilityMultidimensional(self):\n p = np.array([[0.3, 0.4, 0.3], [0.1, 0.5, 0.4]], dtype=np.float32)\n\n with self.test_session():\n new_logits, new_p = du.get_logits_and_probs(\n probs=p, multidimensional=True, validate_args=True)\n\n self.assertAllClose(np.log(p), self.evaluate(new_logits))\n self.assertAllClose(p, self.evaluate(new_p))\n\n @test_util.run_in_graph_and_eager_modes()\n def testProbabilityValidateArgs(self):\n p = [0.01, 0.2, 0.5, 0.7, .99]\n # Component less than 0.\n p2 = [-1, 0.2, 0.5, 0.3, .2]\n # Component greater than 1.\n p3 = [2, 0.2, 0.5, 0.3, .2]\n\n with self.test_session():\n _, prob = du.get_logits_and_probs(\n probs=p, validate_args=True)\n self.evaluate(prob)\n\n with self.assertRaisesOpError(\"Condition x >= 0\"):\n _, prob = du.get_logits_and_probs(\n probs=p2, validate_args=True)\n self.evaluate(prob)\n\n _, prob = du.get_logits_and_probs(\n probs=p2, validate_args=False)\n self.evaluate(prob)\n\n with self.assertRaisesOpError(\"probs has components greater than 1\"):\n _, prob = du.get_logits_and_probs(\n probs=p3, validate_args=True)\n self.evaluate(prob)\n\n _, prob = du.get_logits_and_probs(\n probs=p3, validate_args=False)\n self.evaluate(prob)\n\n @test_util.run_in_graph_and_eager_modes()\n def testProbabilityValidateArgsMultidimensional(self):\n p = np.array([[0.3, 0.4, 0.3], [0.1, 0.5, 0.4]], dtype=np.float32)\n # Component less than 0. Still sums to 1.\n p2 = np.array([[-.3, 0.4, 0.9], [0.1, 0.5, 0.4]], dtype=np.float32)\n # Component greater than 1. Does not sum to 1.\n p3 = np.array([[1.3, 0.0, 0.0], [0.1, 0.5, 0.4]], dtype=np.float32)\n # Does not sum to 1.\n p4 = np.array([[1.1, 0.3, 0.4], [0.1, 0.5, 0.4]], dtype=np.float32)\n\n with self.test_session():\n _, prob = du.get_logits_and_probs(\n probs=p, multidimensional=True)\n self.evaluate(prob)\n\n with self.assertRaisesOpError(\"Condition x >= 0\"):\n _, prob = du.get_logits_and_probs(\n probs=p2, multidimensional=True, validate_args=True)\n self.evaluate(prob)\n\n _, prob = du.get_logits_and_probs(\n probs=p2, multidimensional=True, validate_args=False)\n self.evaluate(prob)\n\n with self.assertRaisesOpError(\n \"(probs has components greater than 1|probs does not sum to 1)\"):\n _, prob = du.get_logits_and_probs(\n probs=p3, multidimensional=True, validate_args=True)\n self.evaluate(prob)\n\n _, prob = du.get_logits_and_probs(\n probs=p3, multidimensional=True, validate_args=False)\n self.evaluate(prob)\n\n with self.assertRaisesOpError(\"probs does not sum to 1\"):\n _, prob = du.get_logits_and_probs(\n probs=p4, multidimensional=True, validate_args=True)\n self.evaluate(prob)\n\n _, prob = du.get_logits_and_probs(\n probs=p4, multidimensional=True, validate_args=False)\n self.evaluate(prob)\n\n def testProbsMultidimShape(self):\n with self.test_session():\n with self.assertRaises(ValueError):\n p = array_ops.ones([int(2**11+1)], dtype=np.float16)\n du.get_logits_and_probs(\n probs=p, multidimensional=True, validate_args=True)\n\n with self.assertRaisesOpError(\n \"Number of classes exceeds `dtype` precision\"):\n p = array_ops.placeholder(dtype=dtypes.float16)\n _, prob = du.get_logits_and_probs(\n probs=p, multidimensional=True, validate_args=True)\n prob.eval(feed_dict={p: np.ones([int(2**11+1)])})\n\n def testLogitsMultidimShape(self):\n with self.test_session():\n with self.assertRaises(ValueError):\n l = array_ops.ones([int(2**11+1)], dtype=np.float16)\n du.get_logits_and_probs(\n logits=l, multidimensional=True, validate_args=True)\n\n with self.assertRaisesOpError(\n \"Number of classes exceeds `dtype` precision\"):\n l = array_ops.placeholder(dtype=dtypes.float16)\n logit, _ = du.get_logits_and_probs(\n logits=l, multidimensional=True, validate_args=True)\n logit.eval(feed_dict={l: np.ones([int(2**11+1)])})\n\n\nclass EmbedCheckCategoricalEventShapeTest(test.TestCase):\n\n def testTooSmall(self):\n with self.test_session():\n with self.assertRaises(ValueError):\n param = array_ops.ones([1], dtype=np.float16)\n checked_param = du.embed_check_categorical_event_shape(\n param)\n\n with self.assertRaisesOpError(\n \"must have at least 2 events\"):\n param = array_ops.placeholder(dtype=dtypes.float16)\n checked_param = du.embed_check_categorical_event_shape(\n param)\n checked_param.eval(feed_dict={param: np.ones([1])})\n\n def testTooLarge(self):\n with self.test_session():\n with self.assertRaises(ValueError):\n param = array_ops.ones([int(2**11+1)], dtype=dtypes.float16)\n checked_param = du.embed_check_categorical_event_shape(\n param)\n\n with self.assertRaisesOpError(\n \"Number of classes exceeds `dtype` precision\"):\n param = array_ops.placeholder(dtype=dtypes.float16)\n checked_param = du.embed_check_categorical_event_shape(\n param)\n checked_param.eval(feed_dict={param: np.ones([int(2**11+1)])})\n\n @test_util.run_in_graph_and_eager_modes()\n def testUnsupportedDtype(self):\n with self.test_session():\n with self.assertRaises(TypeError):\n param = array_ops.ones([int(2**11+1)], dtype=dtypes.qint16)\n du.embed_check_categorical_event_shape(param)\n\n\nclass EmbedCheckIntegerCastingClosedTest(test.TestCase):\n\n def testCorrectlyAssertsNonnegative(self):\n with self.test_session():\n with self.assertRaisesOpError(\"Elements must be non-negative\"):\n x = array_ops.placeholder(dtype=dtypes.float16)\n x_checked = du.embed_check_integer_casting_closed(\n x, target_dtype=dtypes.int16)\n x_checked.eval(feed_dict={x: np.array([1, -1], dtype=np.float16)})\n\n def testCorrectlyAssersIntegerForm(self):\n with self.test_session():\n with self.assertRaisesOpError(\"Elements must be int16-equivalent.\"):\n x = array_ops.placeholder(dtype=dtypes.float16)\n x_checked = du.embed_check_integer_casting_closed(\n x, target_dtype=dtypes.int16)\n x_checked.eval(feed_dict={x: np.array([1, 1.5], dtype=np.float16)})\n\n def testCorrectlyAssertsLargestPossibleInteger(self):\n with self.test_session():\n with self.assertRaisesOpError(\"Elements cannot exceed 32767.\"):\n x = array_ops.placeholder(dtype=dtypes.int32)\n x_checked = du.embed_check_integer_casting_closed(\n x, target_dtype=dtypes.int16)\n x_checked.eval(feed_dict={x: np.array([1, 2**15], dtype=np.int32)})\n\n def testCorrectlyAssertsSmallestPossibleInteger(self):\n with self.test_session():\n with self.assertRaisesOpError(\"Elements cannot be smaller than 0.\"):\n x = array_ops.placeholder(dtype=dtypes.int32)\n x_checked = du.embed_check_integer_casting_closed(\n x, target_dtype=dtypes.uint16, assert_nonnegative=False)\n x_checked.eval(feed_dict={x: np.array([1, -1], dtype=np.int32)})\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass LogCombinationsTest(test.TestCase):\n\n def testLogCombinationsBinomial(self):\n n = [2, 5, 12, 15]\n k = [1, 2, 4, 11]\n\n if not special:\n return\n\n log_combs = np.log(special.binom(n, k))\n\n with self.test_session():\n n = np.array(n, dtype=np.float32)\n counts = [[1., 1], [2., 3], [4., 8], [11, 4]]\n log_binom = du.log_combinations(n, counts)\n self.assertEqual([4], log_binom.get_shape())\n self.assertAllClose(log_combs, self.evaluate(log_binom))\n\n def testLogCombinationsShape(self):\n # Shape [2, 2]\n n = [[2, 5], [12, 15]]\n\n with self.test_session():\n n = np.array(n, dtype=np.float32)\n # Shape [2, 2, 4]\n counts = [[[1., 1, 0, 0], [2., 2, 1, 0]], [[4., 4, 1, 3], [10, 1, 1, 4]]]\n log_binom = du.log_combinations(n, counts)\n self.assertEqual([2, 2], log_binom.get_shape())\n\n\nclass DynamicShapeTest(test.TestCase):\n\n def testSameDynamicShape(self):\n with self.test_session():\n scalar = constant_op.constant(2.0)\n scalar1 = array_ops.placeholder(dtype=dtypes.float32)\n\n vector = [0.3, 0.4, 0.5]\n vector1 = array_ops.placeholder(dtype=dtypes.float32, shape=[None])\n vector2 = array_ops.placeholder(dtype=dtypes.float32, shape=[None])\n\n multidimensional = [[0.3, 0.4], [0.2, 0.6]]\n multidimensional1 = array_ops.placeholder(\n dtype=dtypes.float32, shape=[None, None])\n multidimensional2 = array_ops.placeholder(\n dtype=dtypes.float32, shape=[None, None])\n\n # Scalar\n self.assertTrue(\n du.same_dynamic_shape(scalar, scalar1).eval({\n scalar1: 2.0\n }))\n\n # Vector\n\n self.assertTrue(\n du.same_dynamic_shape(vector, vector1).eval({\n vector1: [2.0, 3.0, 4.0]\n }))\n self.assertTrue(\n du.same_dynamic_shape(vector1, vector2).eval({\n vector1: [2.0, 3.0, 4.0],\n vector2: [2.0, 3.5, 6.0]\n }))\n\n # Multidimensional\n self.assertTrue(\n du.same_dynamic_shape(\n multidimensional, multidimensional1).eval({\n multidimensional1: [[2.0, 3.0], [3.0, 4.0]]\n }))\n self.assertTrue(\n du.same_dynamic_shape(\n multidimensional1, multidimensional2).eval({\n multidimensional1: [[2.0, 3.0], [3.0, 4.0]],\n multidimensional2: [[1.0, 3.5], [6.3, 2.3]]\n }))\n\n # Scalar, X\n self.assertFalse(\n du.same_dynamic_shape(scalar, vector1).eval({\n vector1: [2.0, 3.0, 4.0]\n }))\n self.assertFalse(\n du.same_dynamic_shape(scalar1, vector1).eval({\n scalar1: 2.0,\n vector1: [2.0, 3.0, 4.0]\n }))\n self.assertFalse(\n du.same_dynamic_shape(scalar, multidimensional1).eval({\n multidimensional1: [[2.0, 3.0], [3.0, 4.0]]\n }))\n self.assertFalse(\n du.same_dynamic_shape(scalar1, multidimensional1).eval(\n {\n scalar1: 2.0,\n multidimensional1: [[2.0, 3.0], [3.0, 4.0]]\n }))\n\n # Vector, X\n self.assertFalse(\n du.same_dynamic_shape(vector, vector1).eval({\n vector1: [2.0, 3.0]\n }))\n self.assertFalse(\n du.same_dynamic_shape(vector1, vector2).eval({\n vector1: [2.0, 3.0, 4.0],\n vector2: [6.0]\n }))\n self.assertFalse(\n du.same_dynamic_shape(vector, multidimensional1).eval({\n multidimensional1: [[2.0, 3.0], [3.0, 4.0]]\n }))\n self.assertFalse(\n du.same_dynamic_shape(vector1, multidimensional1).eval(\n {\n vector1: [2.0, 3.0, 4.0],\n multidimensional1: [[2.0, 3.0], [3.0, 4.0]]\n }))\n\n # Multidimensional, X\n self.assertFalse(\n du.same_dynamic_shape(\n multidimensional, multidimensional1).eval({\n multidimensional1: [[1.0, 3.5, 5.0], [6.3, 2.3, 7.1]]\n }))\n self.assertFalse(\n du.same_dynamic_shape(\n multidimensional1, multidimensional2).eval({\n multidimensional1: [[2.0, 3.0], [3.0, 4.0]],\n multidimensional2: [[1.0, 3.5, 5.0], [6.3, 2.3, 7.1]]\n }))\n\n\nclass RotateTransposeTest(test.TestCase):\n\n def _np_rotate_transpose(self, x, shift):\n if not isinstance(x, np.ndarray):\n x = np.array(x)\n return np.transpose(x, np.roll(np.arange(len(x.shape)), shift))\n\n @test_util.run_in_graph_and_eager_modes()\n def testRollStatic(self):\n with self.test_session():\n if context.executing_eagerly():\n error_message = r\"Attempt to convert a value \\(None\\)\"\n else:\n error_message = \"None values not supported.\"\n with self.assertRaisesRegexp(ValueError, error_message):\n du.rotate_transpose(None, 1)\n for x in (np.ones(1), np.ones((2, 1)), np.ones((3, 2, 1))):\n for shift in np.arange(-5, 5):\n y = du.rotate_transpose(x, shift)\n self.assertAllEqual(\n self._np_rotate_transpose(x, shift), self.evaluate(y))\n self.assertAllEqual(np.roll(x.shape, shift), y.get_shape().as_list())\n\n def testRollDynamic(self):\n with self.test_session() as sess:\n x = array_ops.placeholder(dtypes.float32)\n shift = array_ops.placeholder(dtypes.int32)\n for x_value in (np.ones(\n 1, dtype=x.dtype.as_numpy_dtype()), np.ones(\n (2, 1), dtype=x.dtype.as_numpy_dtype()), np.ones(\n (3, 2, 1), dtype=x.dtype.as_numpy_dtype())):\n for shift_value in np.arange(-5, 5):\n self.assertAllEqual(\n self._np_rotate_transpose(x_value, shift_value),\n sess.run(du.rotate_transpose(x, shift),\n feed_dict={x: x_value,\n shift: shift_value}))\n\n\nclass PickVectorTest(test.TestCase):\n\n def testCorrectlyPicksVector(self):\n with self.test_session():\n x = np.arange(10, 12)\n y = np.arange(15, 18)\n self.assertAllEqual(\n x, self.evaluate(du.pick_vector(math_ops.less(0, 5), x, y)))\n self.assertAllEqual(\n y, self.evaluate(du.pick_vector(math_ops.less(5, 0), x, y)))\n self.assertAllEqual(x,\n du.pick_vector(\n constant_op.constant(True), x, y)) # No eval.\n self.assertAllEqual(y,\n du.pick_vector(\n constant_op.constant(False), x, y)) # No eval.\n\n\nclass PreferStaticRankTest(test.TestCase):\n\n def testNonEmptyConstantTensor(self):\n x = array_ops.zeros((2, 3, 4))\n rank = du.prefer_static_rank(x)\n self.assertIsInstance(rank, np.ndarray)\n self.assertEqual(3, rank)\n\n def testEmptyConstantTensor(self):\n x = constant_op.constant([])\n rank = du.prefer_static_rank(x)\n self.assertIsInstance(rank, np.ndarray)\n self.assertEqual(1, rank)\n\n def testScalarTensor(self):\n x = constant_op.constant(1.)\n rank = du.prefer_static_rank(x)\n self.assertIsInstance(rank, np.ndarray)\n self.assertEqual(0, rank)\n\n def testDynamicRankEndsUpBeingNonEmpty(self):\n x = array_ops.placeholder(np.float64, shape=None)\n rank = du.prefer_static_rank(x)\n with self.test_session():\n self.assertAllEqual(2, rank.eval(feed_dict={x: np.zeros((2, 3))}))\n\n def testDynamicRankEndsUpBeingEmpty(self):\n x = array_ops.placeholder(np.int32, shape=None)\n rank = du.prefer_static_rank(x)\n with self.test_session():\n self.assertAllEqual(1, rank.eval(feed_dict={x: []}))\n\n def testDynamicRankEndsUpBeingScalar(self):\n x = array_ops.placeholder(np.int32, shape=None)\n rank = du.prefer_static_rank(x)\n with self.test_session():\n self.assertAllEqual(0, rank.eval(feed_dict={x: 1}))\n\n\nclass PreferStaticShapeTest(test.TestCase):\n\n def testNonEmptyConstantTensor(self):\n x = array_ops.zeros((2, 3, 4))\n shape = du.prefer_static_shape(x)\n self.assertIsInstance(shape, np.ndarray)\n self.assertAllEqual(np.array([2, 3, 4]), shape)\n\n def testEmptyConstantTensor(self):\n x = constant_op.constant([])\n shape = du.prefer_static_shape(x)\n self.assertIsInstance(shape, np.ndarray)\n self.assertAllEqual(np.array([0]), shape)\n\n def testScalarTensor(self):\n x = constant_op.constant(1.)\n shape = du.prefer_static_shape(x)\n self.assertIsInstance(shape, np.ndarray)\n self.assertAllEqual(np.array([]), shape)\n\n def testDynamicShapeEndsUpBeingNonEmpty(self):\n x = array_ops.placeholder(np.float64, shape=None)\n shape = du.prefer_static_shape(x)\n with self.test_session():\n self.assertAllEqual((2, 3), shape.eval(feed_dict={x: np.zeros((2, 3))}))\n\n def testDynamicShapeEndsUpBeingEmpty(self):\n x = array_ops.placeholder(np.int32, shape=None)\n shape = du.prefer_static_shape(x)\n with self.test_session():\n self.assertAllEqual(np.array([0]), shape.eval(feed_dict={x: []}))\n\n def testDynamicShapeEndsUpBeingScalar(self):\n x = array_ops.placeholder(np.int32, shape=None)\n shape = du.prefer_static_shape(x)\n with self.test_session():\n self.assertAllEqual(np.array([]), shape.eval(feed_dict={x: 1}))\n\n\nclass PreferStaticValueTest(test.TestCase):\n\n def testNonEmptyConstantTensor(self):\n x = array_ops.zeros((2, 3, 4))\n value = du.prefer_static_value(x)\n self.assertIsInstance(value, np.ndarray)\n self.assertAllEqual(np.zeros((2, 3, 4)), value)\n\n def testEmptyConstantTensor(self):\n x = constant_op.constant([])\n value = du.prefer_static_value(x)\n self.assertIsInstance(value, np.ndarray)\n self.assertAllEqual(np.array([]), value)\n\n def testScalarTensor(self):\n x = constant_op.constant(1.)\n value = du.prefer_static_value(x)\n self.assertIsInstance(value, np.ndarray)\n self.assertAllEqual(np.array(1.), value)\n\n def testDynamicValueEndsUpBeingNonEmpty(self):\n x = array_ops.placeholder(np.float64, shape=None)\n value = du.prefer_static_value(x)\n with self.test_session():\n self.assertAllEqual(np.zeros((2, 3)),\n value.eval(feed_dict={x: np.zeros((2, 3))}))\n\n def testDynamicValueEndsUpBeingEmpty(self):\n x = array_ops.placeholder(np.int32, shape=None)\n value = du.prefer_static_value(x)\n with self.test_session():\n self.assertAllEqual(np.array([]), value.eval(feed_dict={x: []}))\n\n def testDynamicValueEndsUpBeingScalar(self):\n x = array_ops.placeholder(np.int32, shape=None)\n value = du.prefer_static_value(x)\n with self.test_session():\n self.assertAllEqual(np.array(1), value.eval(feed_dict={x: 1}))\n\n\nclass FillTriangularTest(test.TestCase):\n\n def setUp(self):\n self._rng = np.random.RandomState(42)\n\n def _fill_triangular(self, x, upper=False):\n \"\"\"Numpy implementation of `fill_triangular`.\"\"\"\n x = np.asarray(x)\n # Formula derived by solving for n: m = n(n+1)/2.\n m = np.int32(x.shape[-1])\n n = np.sqrt(0.25 + 2. * m) - 0.5\n if n != np.floor(n):\n raise ValueError(\"Invalid shape.\")\n n = np.int32(n)\n # We can't do: `x[..., -(n**2-m):]` because this doesn't correctly handle\n # `m == n == 1`. Hence, we do absolute indexing.\n x_tail = x[..., (m - (n * n - m)):]\n y = np.concatenate(\n [x, x_tail[..., ::-1]] if upper else [x_tail, x[..., ::-1]],\n axis=-1)\n y = y.reshape(np.concatenate([\n np.int32(x.shape[:-1]),\n np.int32([n, n]),\n ], axis=0))\n return np.triu(y) if upper else np.tril(y)\n\n def _run_test(self, x_, use_deferred_shape=False, **kwargs):\n x_ = np.asarray(x_)\n with self.test_session() as sess:\n static_shape = None if use_deferred_shape else x_.shape\n x_pl = array_ops.placeholder_with_default(x_, shape=static_shape)\n # Add `zeros_like(x)` such that x's value and gradient are identical. We\n # do this so we can ensure each gradient value is mapped to the right\n # gradient location. (Not doing this means the gradient wrt `x` is simple\n # `ones_like(x)`.)\n # Note:\n # zeros_like_x_pl == zeros_like(x_pl)\n # gradient(zeros_like_x_pl, x_pl) == x_pl - 1\n zeros_like_x_pl = (x_pl * array_ops.stop_gradient(x_pl - 1.)\n - array_ops.stop_gradient(x_pl * (x_pl - 1.)))\n x = x_pl + zeros_like_x_pl\n actual = du.fill_triangular(x, **kwargs)\n grad_actual = gradients_impl.gradients(actual, x_pl)[0]\n [actual_, grad_actual_] = sess.run([actual, grad_actual],\n feed_dict={x_pl: x_})\n expected = self._fill_triangular(x_, **kwargs)\n if use_deferred_shape:\n self.assertEqual(None, actual.shape)\n else:\n self.assertAllEqual(expected.shape, actual.shape)\n self.assertAllClose(expected, actual_, rtol=1e-8, atol=1e-9)\n self.assertAllClose(x_, grad_actual_, rtol=1e-8, atol=1e-9)\n\n def testCorrectlyMakes1x1TriLower(self):\n self._run_test(self._rng.randn(3, int(1*2/2)))\n\n def testCorrectlyMakesNoBatchTriLower(self):\n self._run_test(self._rng.randn(int(4*5/2)))\n\n def testCorrectlyMakesBatchTriLower(self):\n self._run_test(self._rng.randn(2, 3, int(3*4/2)))\n\n def testCorrectlyMakesBatchTriLowerUnknownShape(self):\n self._run_test(self._rng.randn(2, 3, int(3*4/2)), use_deferred_shape=True)\n\n def testCorrectlyMakesBatch7x7TriLowerUnknownShape(self):\n self._run_test(self._rng.randn(2, 3, int(7*8/2)), use_deferred_shape=True)\n\n def testCorrectlyMakesBatch7x7TriLower(self):\n self._run_test(self._rng.randn(2, 3, int(7*8/2)))\n\n def testCorrectlyMakes1x1TriUpper(self):\n self._run_test(self._rng.randn(3, int(1*2/2)), upper=True)\n\n def testCorrectlyMakesNoBatchTriUpper(self):\n self._run_test(self._rng.randn(int(4*5/2)), upper=True)\n\n def testCorrectlyMakesBatchTriUpper(self):\n self._run_test(self._rng.randn(2, 2, int(3*4/2)), upper=True)\n\n def testCorrectlyMakesBatchTriUpperUnknownShape(self):\n self._run_test(self._rng.randn(2, 2, int(3*4/2)),\n use_deferred_shape=True,\n upper=True)\n\n def testCorrectlyMakesBatch7x7TriUpperUnknownShape(self):\n self._run_test(self._rng.randn(2, 3, int(7*8/2)),\n use_deferred_shape=True,\n upper=True)\n\n def testCorrectlyMakesBatch7x7TriUpper(self):\n self._run_test(self._rng.randn(2, 3, int(7*8/2)), upper=True)\n\n\nclass FillTriangularInverseTest(FillTriangularTest):\n\n def _run_test(self, x_, use_deferred_shape=False, **kwargs):\n x_ = np.asarray(x_)\n with self.test_session() as sess:\n static_shape = None if use_deferred_shape else x_.shape\n x_pl = array_ops.placeholder_with_default(x_, shape=static_shape)\n zeros_like_x_pl = (x_pl * array_ops.stop_gradient(x_pl - 1.)\n - array_ops.stop_gradient(x_pl * (x_pl - 1.)))\n x = x_pl + zeros_like_x_pl\n actual = du.fill_triangular(x, **kwargs)\n inverse_actual = du.fill_triangular_inverse(actual, **kwargs)\n\n inverse_actual_ = sess.run(\n inverse_actual,\n feed_dict={x_pl: x_})\n\n if use_deferred_shape:\n self.assertEqual(None, inverse_actual.shape)\n else:\n self.assertAllEqual(x_.shape, inverse_actual.shape)\n self.assertAllEqual(x_, inverse_actual_)\n\n\nclass ReduceWeightedLogSumExp(test.TestCase):\n\n def _reduce_weighted_logsumexp(self, logx, w, axis, keep_dims=False):\n m = np.max(logx, axis=axis, keepdims=True)\n sum_ = np.sum(w * np.exp(logx - m), axis=axis, keepdims=keep_dims)\n sgn = np.sign(sum_)\n if not keep_dims:\n m = np.squeeze(m, axis=axis)\n return m + np.log(sgn * sum_), sgn\n\n def testNoWeights(self):\n logx_ = np.array([[0., -1, 1000.],\n [0, 1, -1000.],\n [-5, 0, 5]])\n with self.test_session() as sess:\n logx = constant_op.constant(logx_)\n expected = math_ops.reduce_logsumexp(logx, axis=-1)\n grad_expected = gradients_impl.gradients(expected, logx)[0]\n actual, actual_sgn = du.reduce_weighted_logsumexp(\n logx, axis=-1, return_sign=True)\n grad_actual = gradients_impl.gradients(actual, logx)[0]\n [actual_, actual_sgn_, grad_actual_,\n expected_, grad_expected_] = sess.run([\n actual, actual_sgn, grad_actual,\n expected, grad_expected])\n self.assertAllEqual(expected_, actual_)\n self.assertAllEqual(grad_expected_, grad_actual_)\n self.assertAllEqual([1., 1, 1], actual_sgn_)\n\n def testNegativeWeights(self):\n logx_ = np.array([[0., -1, 1000.],\n [0, 1, -1000.],\n [-5, 0, 5]])\n w_ = np.array([[1., 1, -1],\n [1, -2, 1],\n [1, 0, 1]])\n expected, _ = self._reduce_weighted_logsumexp(logx_, w_, axis=-1)\n with self.test_session() as sess:\n logx = constant_op.constant(logx_)\n w = constant_op.constant(w_)\n actual, actual_sgn = du.reduce_weighted_logsumexp(\n logx, w, axis=-1, return_sign=True)\n [actual_, actual_sgn_] = sess.run([actual, actual_sgn])\n self.assertAllEqual(expected, actual_)\n self.assertAllEqual([-1., -1, 1], actual_sgn_)\n\n def testKeepDims(self):\n logx_ = np.array([[0., -1, 1000.],\n [0, 1, -1000.],\n [-5, 0, 5]])\n w_ = np.array([[1., 1, -1],\n [1, -2, 1],\n [1, 0, 1]])\n expected, _ = self._reduce_weighted_logsumexp(\n logx_, w_, axis=-1, keep_dims=True)\n with self.test_session() as sess:\n logx = constant_op.constant(logx_)\n w = constant_op.constant(w_)\n actual, actual_sgn = du.reduce_weighted_logsumexp(\n logx, w, axis=-1, return_sign=True, keep_dims=True)\n [actual_, actual_sgn_] = sess.run([actual, actual_sgn])\n self.assertAllEqual(expected, actual_)\n self.assertAllEqual([[-1.], [-1], [1]], actual_sgn_)\n\n def testDocString(self):\n \"\"\"This test verifies the correctness of the docstring examples.\"\"\"\n\n with self.test_session():\n x = constant_op.constant([[0., 0, 0],\n [0, 0, 0]])\n\n w = constant_op.constant([[-1., 1, 1],\n [1, 1, 1]])\n\n self.assertAllClose(\n np.log(4), self.evaluate(du.reduce_weighted_logsumexp(x, w)))\n\n with np.errstate(divide=\"ignore\"):\n self.assertAllClose(\n np.log([0, 2, 2]),\n self.evaluate(du.reduce_weighted_logsumexp(x, w, axis=0)))\n\n self.assertAllClose(\n np.log([1, 3]),\n self.evaluate(du.reduce_weighted_logsumexp(x, w, axis=1)))\n\n self.assertAllClose(\n np.log([[1], [3]]),\n self.evaluate(\n du.reduce_weighted_logsumexp(x, w, axis=1, keep_dims=True)))\n\n self.assertAllClose(\n np.log(4),\n self.evaluate(du.reduce_weighted_logsumexp(x, w, axis=[0, 1])))\n\n\nclass GenNewSeedTest(test.TestCase):\n\n def testOnlyNoneReturnsNone(self):\n self.assertFalse(du.gen_new_seed(0, \"salt\") is None)\n self.assertTrue(du.gen_new_seed(None, \"salt\") is None)\n\n\n# TODO(jvdillon): Merge this test back into:\n# tensorflow/python/kernel_tests/softplus_op_test.py\n# once TF core is accepting new ops.\nclass SoftplusTest(test.TestCase):\n\n def _npSoftplus(self, np_features):\n np_features = np.asarray(np_features)\n zero = np.asarray(0).astype(np_features.dtype)\n return np.logaddexp(zero, np_features)\n\n def _testSoftplus(self, np_features, use_gpu=False):\n np_features = np.asarray(np_features)\n np_softplus = self._npSoftplus(np_features)\n with self.test_session(use_gpu=use_gpu) as sess:\n softplus = nn_ops.softplus(np_features)\n softplus_inverse = du.softplus_inverse(softplus)\n [tf_softplus, tf_softplus_inverse] = sess.run([\n softplus, softplus_inverse])\n self.assertAllCloseAccordingToType(np_softplus, tf_softplus)\n rtol = {\"float16\": 0.07, \"float32\": 0.003, \"float64\": 0.002}.get(\n str(np_features.dtype), 1e-6)\n # This will test that we correctly computed the inverse by verifying we\n # recovered the original input.\n self.assertAllCloseAccordingToType(\n np_features, tf_softplus_inverse,\n atol=0., rtol=rtol)\n self.assertAllEqual(np.ones_like(tf_softplus).astype(np.bool),\n tf_softplus > 0)\n\n self.assertShapeEqual(np_softplus, softplus)\n self.assertShapeEqual(np_softplus, softplus_inverse)\n\n self.assertAllEqual(np.ones_like(tf_softplus).astype(np.bool),\n np.isfinite(tf_softplus))\n self.assertAllEqual(np.ones_like(tf_softplus_inverse).astype(np.bool),\n np.isfinite(tf_softplus_inverse))\n\n def testNumbers(self):\n for t in [np.float16, np.float32, np.float64]:\n lower = {np.float16: -15, np.float32: -50, np.float64: -50}.get(t, -100)\n upper = {np.float16: 50, np.float32: 50, np.float64: 50}.get(t, 100)\n self._testSoftplus(\n np.array(np.linspace(lower, upper, int(1e3)).astype(t)).reshape(\n [2, -1]),\n use_gpu=False)\n self._testSoftplus(\n np.array(np.linspace(lower, upper, int(1e3)).astype(t)).reshape(\n [2, -1]),\n use_gpu=True)\n log_eps = np.log(np.finfo(t).eps)\n one = t(1)\n ten = t(10)\n self._testSoftplus(\n [\n log_eps, log_eps - one, log_eps + one, log_eps - ten,\n log_eps + ten, -log_eps, -log_eps - one, -log_eps + one,\n -log_eps - ten, -log_eps + ten\n ],\n use_gpu=False)\n self._testSoftplus(\n [\n log_eps, log_eps - one, log_eps + one, log_eps - ten,\n log_eps + ten - log_eps, -log_eps - one, -log_eps + one,\n -log_eps - ten, -log_eps + ten\n ],\n use_gpu=True)\n\n def testGradient(self):\n with self.test_session():\n x = constant_op.constant(\n [-0.9, -0.7, -0.5, -0.3, -0.1, 0.1, 0.3, 0.5, 0.7, 0.9],\n shape=[2, 5],\n name=\"x\")\n y = nn_ops.softplus(x, name=\"softplus\")\n x_init = np.asarray(\n [[-0.9, -0.7, -0.5, -0.3, -0.1], [0.1, 0.3, 0.5, 0.7, 0.9]],\n dtype=np.float32,\n order=\"F\")\n err = gradient_checker.compute_gradient_error(\n x, [2, 5], y, [2, 5], x_init_value=x_init)\n tf_logging.vlog(2, \"softplus (float) gradient err = \", err)\n self.assertLess(err, 1e-4)\n\n def testInverseSoftplusGradientNeverNan(self):\n with self.test_session():\n # Note that this range contains both zero and inf.\n x = constant_op.constant(np.logspace(-8, 6).astype(np.float16))\n y = du.softplus_inverse(x)\n grads = self.evaluate(gradients_impl.gradients(y, x)[0])\n # Equivalent to `assertAllFalse` (if it existed).\n self.assertAllEqual(np.zeros_like(grads).astype(np.bool), np.isnan(grads))\n\n def testInverseSoftplusGradientFinite(self):\n with self.test_session():\n # This range of x is all finite, and so is 1 / x. So the\n # gradient and its approximations should be finite as well.\n x = constant_op.constant(np.logspace(-4.8, 4.5).astype(np.float16))\n y = du.softplus_inverse(x)\n grads = self.evaluate(gradients_impl.gradients(y, x)[0])\n # Equivalent to `assertAllTrue` (if it existed).\n self.assertAllEqual(\n np.ones_like(grads).astype(np.bool), np.isfinite(grads))\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass ArgumentsTest(test.TestCase):\n\n def testNoArguments(self):\n def foo():\n return du.parent_frame_arguments()\n\n self.assertEqual({}, foo())\n\n def testPositionalArguments(self):\n def foo(a, b, c, d): # pylint: disable=unused-argument\n return du.parent_frame_arguments()\n\n self.assertEqual({\"a\": 1, \"b\": 2, \"c\": 3, \"d\": 4}, foo(1, 2, 3, 4))\n\n # Tests that it does not matter where this function is called, and\n # no other local variables are returned back.\n def bar(a, b, c):\n unused_x = a * b\n unused_y = c * 3\n return du.parent_frame_arguments()\n\n self.assertEqual({\"a\": 1, \"b\": 2, \"c\": 3}, bar(1, 2, 3))\n\n def testOverloadedArgumentValues(self):\n def foo(a, b, c): # pylint: disable=unused-argument\n a = 42\n b = 31\n c = 42\n return du.parent_frame_arguments()\n self.assertEqual({\"a\": 42, \"b\": 31, \"c\": 42}, foo(1, 2, 3))\n\n def testKeywordArguments(self):\n def foo(**kwargs): # pylint: disable=unused-argument\n return du.parent_frame_arguments()\n\n self.assertEqual({\"a\": 1, \"b\": 2, \"c\": 3, \"d\": 4}, foo(a=1, b=2, c=3, d=4))\n\n def testPositionalKeywordArgs(self):\n def foo(a, b, c, **kwargs): # pylint: disable=unused-argument\n return du.parent_frame_arguments()\n\n self.assertEqual({\"a\": 1, \"b\": 2, \"c\": 3}, foo(a=1, b=2, c=3))\n self.assertEqual({\"a\": 1, \"b\": 2, \"c\": 3, \"unicorn\": None},\n foo(a=1, b=2, c=3, unicorn=None))\n\n def testNoVarargs(self):\n def foo(a, b, c, *varargs, **kwargs): # pylint: disable=unused-argument\n return du.parent_frame_arguments()\n\n self.assertEqual({\"a\": 1, \"b\": 2, \"c\": 3}, foo(a=1, b=2, c=3))\n self.assertEqual({\"a\": 1, \"b\": 2, \"c\": 3}, foo(1, 2, 3, *[1, 2, 3]))\n self.assertEqual({\"a\": 1, \"b\": 2, \"c\": 3, \"unicorn\": None},\n foo(1, 2, 3, unicorn=None))\n self.assertEqual({\"a\": 1, \"b\": 2, \"c\": 3, \"unicorn\": None},\n foo(1, 2, 3, *[1, 2, 3], unicorn=None))\n\n\nif __name__ == \"__main__\":\n test.main()\n"
] |
[
[
"tensorflow.python.framework.test_util.run_in_graph_and_eager_modes",
"numpy.sqrt",
"numpy.asarray",
"numpy.squeeze",
"tensorflow.python.ops.array_ops.placeholder",
"numpy.concatenate",
"numpy.max",
"tensorflow.python.ops.array_ops.zeros",
"numpy.zeros_like",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.ops.gradients_impl.gradients",
"numpy.exp",
"numpy.tril",
"tensorflow.python.ops.array_ops.stop_gradient",
"tensorflow.python.ops.nn_ops.softplus",
"tensorflow.python.ops.array_ops.identity",
"numpy.roll",
"numpy.ones_like",
"tensorflow.python.ops.math_ops.reduce_logsumexp",
"numpy.arange",
"tensorflow.python.ops.math_ops.less",
"numpy.finfo",
"tensorflow.python.platform.test.main",
"tensorflow.python.ops.array_ops.ones",
"numpy.log1p",
"numpy.triu",
"numpy.zeros",
"tensorflow.python.platform.tf_logging.vlog",
"numpy.log",
"tensorflow.python.ops.gradient_checker.compute_gradient_error",
"numpy.isnan",
"numpy.logspace",
"numpy.floor",
"numpy.errstate",
"numpy.array",
"numpy.random.RandomState",
"numpy.logaddexp",
"numpy.isfinite",
"numpy.int32",
"numpy.ones",
"numpy.sign",
"tensorflow.python.ops.array_ops.placeholder_with_default",
"tensorflow.python.framework.constant_op.constant"
]
] |
hunse/speedtest
|
[
"6e0edcd42d113a141b262d8d105c8f611dae63c4"
] |
[
"test_theano_gpu.py"
] |
[
"from __future__ import print_function\n\nimport os\nimport timeit\n\nimport numpy as np\n\ngpuflags = 'device=gpu,floatX=float32'\nif os.environ.has_key('THEANO_FLAGS'):\n os.environ['THEANO_FLAGS'] += gpuflags\nelse:\n os.environ['THEANO_FLAGS'] = gpuflags\nimport theano\nimport theano.tensor as T\ndtype = theano.config.floatX\n\nprint(\"Theano location: %s\" % theano.__file__)\nprint(\"Theano version: %s\" % theano.__version__)\n\n# --- create Theano function\nsA = T.matrix(dtype=dtype)\nsB = T.matrix(dtype=dtype)\nsC = T.dot(sA,sB)\nf = theano.function([sA, sB], sC)\n\n# --- create test data\nm = 10000\nk = 2000\nn = 1500\n\nrng = np.random.RandomState(0)\n\nA = np.asarray(rng.randn(m, k), dtype=dtype)\nB = np.asarray(rng.randn(k, n), dtype=dtype)\n\n# --- run tests\nN = 10 # number of repeats, for tests\n\nt = min(timeit.Timer('np.dot(A,B)', 'from __main__ import np, A, B'\n ).repeat(N, 1))\nprint(\"Numpy: multiplied (%d,%d) matrix with (%d,%d) matrix in %0.3f s\"\n % (m,k,k,n,t))\n\nt = min(timeit.Timer('f(A,B)', 'from __main__ import f, A, B'\n ).repeat(N, 1))\nprint(\"Theano (GPU): multiplied (%d,%d) matrix with (%d,%d) matrix in %0.3f s\"\n % (m,k,k,n,t))\n"
] |
[
[
"numpy.random.RandomState"
]
] |
off99555/ssd_keras
|
[
"363ba6b47d0e631e7272031f9054f7c7ebc0615e"
] |
[
"keras_layers/keras_layer_L2Normalization.py"
] |
[
"'''\nA custom Keras layer to perform L2-normalization.\n\nCopyright (C) 2018 Pierluigi Ferrari\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n'''\n\nfrom __future__ import division\nimport numpy as np\nimport tensorflow.keras.backend as K\nfrom tensorflow.keras.layers import InputSpec\nfrom tensorflow.keras.layers import Layer\n\nclass L2Normalization(Layer):\n '''\n Performs L2 normalization on the input tensor with a learnable scaling parameter\n as described in the paper \"Parsenet: Looking Wider to See Better\" (see references)\n and as used in the original SSD model.\n\n Arguments:\n gamma_init (int): The initial scaling parameter. Defaults to 20 following the\n SSD paper.\n\n Input shape:\n 4D tensor of shape `(batch, channels, height, width)` if `dim_ordering = 'th'`\n or `(batch, height, width, channels)` if `dim_ordering = 'tf'`.\n\n Returns:\n The scaled tensor. Same shape as the input tensor.\n\n References:\n http://cs.unc.edu/~wliu/papers/parsenet.pdf\n '''\n\n def __init__(self, gamma_init=20, **kwargs):\n if K.image_data_format() == 'channels_last':\n self.axis = 3\n else:\n self.axis = 1\n self.gamma_init = gamma_init\n super(L2Normalization, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.input_spec = [InputSpec(shape=input_shape)]\n gamma = self.gamma_init * np.ones((input_shape[self.axis],))\n self.gamma = K.variable(gamma, name='{}_gamma'.format(self.name))\n self.trainable_weights = [self.gamma]\n super(L2Normalization, self).build(input_shape)\n\n def call(self, x, mask=None):\n output = K.l2_normalize(x, self.axis)\n return output * self.gamma\n\n def get_config(self):\n config = {\n 'gamma_init': self.gamma_init\n }\n base_config = super(L2Normalization, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n"
] |
[
[
"tensorflow.keras.backend.l2_normalize",
"tensorflow.keras.layers.InputSpec",
"tensorflow.keras.backend.image_data_format",
"numpy.ones"
]
] |
basanovase/taming-transformers
|
[
"c30d4c0be67428fb63d0188b658d83a12fb4595f"
] |
[
"taming/models/vqgan.py"
] |
[
"import torch\nimport torch.nn.functional as F\nimport pytorch_lightning as pl\n\nfrom taming_transformers.main import instantiate_from_config\n\nfrom taming.modules.diffusionmodules.model import Encoder, Decoder\nfrom taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer\nfrom taming.modules.vqvae.quantize import GumbelQuantize\nfrom taming.modules.vqvae.quantize import EMAVectorQuantizer\n\nclass VQModel(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n n_embed,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n remap=None,\n sane_index_shape=False, # tell vector quantizer to return indices as bhw\n ):\n super().__init__()\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25,\n remap=remap, sane_index_shape=sane_index_shape)\n self.quant_conv = torch.nn.Conv2d(ddconfig[\"z_channels\"], embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n self.image_key = image_key\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n def encode(self, x):\n h = self.encoder(x)\n h = self.quant_conv(h)\n quant, emb_loss, info = self.quantize(h)\n return quant, emb_loss, info\n\n def decode(self, quant):\n quant = self.post_quant_conv(quant)\n dec = self.decoder(quant)\n return dec\n\n def decode_code(self, code_b):\n quant_b = self.quantize.embed_code(code_b)\n dec = self.decode(quant_b)\n return dec\n\n def forward(self, input):\n quant, diff, _ = self.encode(input)\n dec = self.decode(quant)\n return dec, diff\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format)\n return x.float()\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n x = self.get_input(batch, self.image_key)\n xrec, qloss = self(x)\n\n if optimizer_idx == 0:\n # autoencode\n aeloss, log_dict_ae = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"train/aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)\n return aeloss\n\n if optimizer_idx == 1:\n # discriminator\n discloss, log_dict_disc = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"train/discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n x = self.get_input(batch, self.image_key)\n xrec, qloss = self(x)\n aeloss, log_dict_ae = self.loss(qloss, x, xrec, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\")\n\n discloss, log_dict_disc = self.loss(qloss, x, xrec, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\")\n rec_loss = log_dict_ae[\"val/rec_loss\"]\n self.log(\"val/rec_loss\", rec_loss,\n prog_bar=True, logger=True, on_step=True, on_epoch=True, sync_dist=True)\n self.log(\"val/aeloss\", aeloss,\n prog_bar=True, logger=True, on_step=True, on_epoch=True, sync_dist=True)\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(list(self.encoder.parameters())+\n list(self.decoder.parameters())+\n list(self.quantize.parameters())+\n list(self.quant_conv.parameters())+\n list(self.post_quant_conv.parameters()),\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n def log_images(self, batch, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n xrec, _ = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"inputs\"] = x\n log[\"reconstructions\"] = xrec\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x\n\n\nclass VQSegmentationModel(VQModel):\n def __init__(self, n_labels, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.register_buffer(\"colorize\", torch.randn(3, n_labels, 1, 1))\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(list(self.encoder.parameters())+\n list(self.decoder.parameters())+\n list(self.quantize.parameters())+\n list(self.quant_conv.parameters())+\n list(self.post_quant_conv.parameters()),\n lr=lr, betas=(0.5, 0.9))\n return opt_ae\n\n def training_step(self, batch, batch_idx):\n x = self.get_input(batch, self.image_key)\n xrec, qloss = self(x)\n aeloss, log_dict_ae = self.loss(qloss, x, xrec, split=\"train\")\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)\n return aeloss\n\n def validation_step(self, batch, batch_idx):\n x = self.get_input(batch, self.image_key)\n xrec, qloss = self(x)\n aeloss, log_dict_ae = self.loss(qloss, x, xrec, split=\"val\")\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)\n total_loss = log_dict_ae[\"val/total_loss\"]\n self.log(\"val/total_loss\", total_loss,\n prog_bar=True, logger=True, on_step=True, on_epoch=True, sync_dist=True)\n return aeloss\n\n @torch.no_grad()\n def log_images(self, batch, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n xrec, _ = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n # convert logits to indices\n xrec = torch.argmax(xrec, dim=1, keepdim=True)\n xrec = F.one_hot(xrec, num_classes=x.shape[1])\n xrec = xrec.squeeze(1).permute(0, 3, 1, 2).float()\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"inputs\"] = x\n log[\"reconstructions\"] = xrec\n return log\n\n\nclass VQNoDiscModel(VQModel):\n def __init__(self,\n ddconfig,\n lossconfig,\n n_embed,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None\n ):\n super().__init__(ddconfig=ddconfig, lossconfig=lossconfig, n_embed=n_embed, embed_dim=embed_dim,\n ckpt_path=ckpt_path, ignore_keys=ignore_keys, image_key=image_key,\n colorize_nlabels=colorize_nlabels)\n\n def training_step(self, batch, batch_idx):\n x = self.get_input(batch, self.image_key)\n xrec, qloss = self(x)\n # autoencode\n aeloss, log_dict_ae = self.loss(qloss, x, xrec, self.global_step, split=\"train\")\n output = pl.TrainResult(minimize=aeloss)\n output.log(\"train/aeloss\", aeloss,\n prog_bar=True, logger=True, on_step=True, on_epoch=True)\n output.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)\n return output\n\n def validation_step(self, batch, batch_idx):\n x = self.get_input(batch, self.image_key)\n xrec, qloss = self(x)\n aeloss, log_dict_ae = self.loss(qloss, x, xrec, self.global_step, split=\"val\")\n rec_loss = log_dict_ae[\"val/rec_loss\"]\n output = pl.EvalResult(checkpoint_on=rec_loss)\n output.log(\"val/rec_loss\", rec_loss,\n prog_bar=True, logger=True, on_step=True, on_epoch=True)\n output.log(\"val/aeloss\", aeloss,\n prog_bar=True, logger=True, on_step=True, on_epoch=True)\n output.log_dict(log_dict_ae)\n\n return output\n\n def configure_optimizers(self):\n optimizer = torch.optim.Adam(list(self.encoder.parameters())+\n list(self.decoder.parameters())+\n list(self.quantize.parameters())+\n list(self.quant_conv.parameters())+\n list(self.post_quant_conv.parameters()),\n lr=self.learning_rate, betas=(0.5, 0.9))\n return optimizer\n\n\nclass GumbelVQ(VQModel):\n def __init__(self,\n ddconfig,\n lossconfig,\n n_embed,\n embed_dim,\n temperature_scheduler_config,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n kl_weight=1e-8,\n remap=None,\n ):\n\n z_channels = ddconfig[\"z_channels\"]\n super().__init__(ddconfig,\n lossconfig,\n n_embed,\n embed_dim,\n ckpt_path=None,\n ignore_keys=ignore_keys,\n image_key=image_key,\n colorize_nlabels=colorize_nlabels,\n monitor=monitor,\n )\n\n self.loss.n_classes = n_embed\n self.vocab_size = n_embed\n\n self.quantize = GumbelQuantize(z_channels, embed_dim,\n n_embed=n_embed,\n kl_weight=kl_weight, temp_init=1.0,\n remap=remap)\n\n self.temperature_scheduler = instantiate_from_config(temperature_scheduler_config) # annealing of temp\n\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def temperature_scheduling(self):\n self.quantize.temperature = self.temperature_scheduler(self.global_step)\n\n def encode_to_prequant(self, x):\n h = self.encoder(x)\n h = self.quant_conv(h)\n return h\n\n def decode_code(self, code_b):\n raise NotImplementedError\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n self.temperature_scheduling()\n x = self.get_input(batch, self.image_key)\n xrec, qloss = self(x)\n\n if optimizer_idx == 0:\n # autoencode\n aeloss, log_dict_ae = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)\n self.log(\"temperature\", self.quantize.temperature, prog_bar=False, logger=True, on_step=True, on_epoch=True)\n return aeloss\n\n if optimizer_idx == 1:\n # discriminator\n discloss, log_dict_disc = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n x = self.get_input(batch, self.image_key)\n xrec, qloss = self(x, return_pred_indices=True)\n aeloss, log_dict_ae = self.loss(qloss, x, xrec, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\")\n\n discloss, log_dict_disc = self.loss(qloss, x, xrec, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\")\n rec_loss = log_dict_ae[\"val/rec_loss\"]\n self.log(\"val/rec_loss\", rec_loss,\n prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)\n self.log(\"val/aeloss\", aeloss,\n prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def log_images(self, batch, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n # encode\n h = self.encoder(x)\n h = self.quant_conv(h)\n quant, _, _ = self.quantize(h)\n # decode\n x_rec = self.decode(quant)\n log[\"inputs\"] = x\n log[\"reconstructions\"] = x_rec\n return log\n\n\nclass EMAVQ(VQModel):\n def __init__(self,\n ddconfig,\n lossconfig,\n n_embed,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n remap=None,\n sane_index_shape=False, # tell vector quantizer to return indices as bhw\n ):\n super().__init__(ddconfig,\n lossconfig,\n n_embed,\n embed_dim,\n ckpt_path=None,\n ignore_keys=ignore_keys,\n image_key=image_key,\n colorize_nlabels=colorize_nlabels,\n monitor=monitor,\n )\n self.quantize = EMAVectorQuantizer(n_embed=n_embed,\n embedding_dim=embed_dim,\n beta=0.25,\n remap=remap)\n def configure_optimizers(self):\n lr = self.learning_rate\n #Remove self.quantize from parameter list since it is updated via EMA\n opt_ae = torch.optim.Adam(list(self.encoder.parameters())+\n list(self.decoder.parameters())+\n list(self.quant_conv.parameters())+\n list(self.post_quant_conv.parameters()),\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n"
] |
[
[
"torch.load",
"torch.randn",
"torch.nn.functional.conv2d",
"torch.nn.Conv2d",
"torch.no_grad",
"torch.nn.functional.one_hot",
"torch.argmax"
]
] |
dineshresearch/Real_time_vehicle_tracking
|
[
"310d9cad9d151675c96fec3e5a103489ee9b2f46"
] |
[
"opt1.py"
] |
[
"import numpy as np\nimport cv2\n\ncap = cv2.VideoCapture('vtest.avi')\n\n# params for ShiTomasi corner detection\nfeature_params = dict( maxCorners = 100,\n qualityLevel = 0.3,\n minDistance = 7,\n blockSize = 7 )\n\n# Parameters for lucas kanade optical flow\nlk_params = dict( winSize = (15,15),\n maxLevel = 2,\n criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n\n# Create some random colors\ncolor = np.random.randint(0,255,(100,3))\n\n# Take first frame and find corners in it\nret, old_frame = cap.read()\nif ret: \n\told_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)\np0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)\n\n# Create a mask image for drawing purposes\nmask = np.zeros_like(old_frame)\n\nwhile(1):\n ret,frame = cap.read()\n if ret: \n \tframe_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # calculate optical flow\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)\n\n # Select good points\n good_new = p1[st==1]\n good_old = p0[st==1]\n\n # draw the tracks\n for i,(new,old) in enumerate(zip(good_new,good_old)):\n a,b = new.ravel()\n c,d = old.ravel()\n mask = cv2.line(mask, (a,b),(c,d), color[i].tolist(), 2)\n frame = cv2.circle(frame,(a,b),5,color[i].tolist(),-1)\n img = cv2.add(frame,mask)\n\n cv2.imshow('frame',img)\n k = cv2.waitKey(30) & 0xff\n if k == 27:\n break\n\n # Now update the previous frame and previous points\n old_gray = frame_gray.copy()\n p0 = good_new.reshape(-1,1,2)\n\ncv2.destroyAllWindows()\ncap.release()\n"
] |
[
[
"numpy.zeros_like",
"numpy.random.randint"
]
] |
meghu2791/DeepLearningModels
|
[
"d98190d1cc44f530b9cc6e51fb791c50c8fd5f4f"
] |
[
"utils.py"
] |
[
"import sys\nimport os\nimport operator\nimport re\nimport string\nimport nltk\n#Uncomment only for downloading NLTK collections (all): nltk.download()\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem import PorterStemmer\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.corpus import stopwords\nimport codecs\nimport torch\nfrom torch import nn\nfrom torch.autograd import Variable\nimport operator\nfrom torchtext.utils import download_from_url\nimport io\nimport os\n#import gensim\n#from gensim.models import KeyedVectors\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics.pairwise import cosine_similarity\nimport re\nfrom numpy import dot\nfrom numpy.linalg import norm\n\n\ndef clean_data(line):\n #stemmer = PorterStemmer()\n srclist = []\n spltokens = ['<START>', '<END>', '<SEC>', '<ENDSEC>']\n punctuation = '!\"#$%&’.()*+,-/:;=?@[\\]^_\\'`{|}\\\"~<>'\n lemma = WordNetLemmatizer()\n\n l = re.sub(r'\\d+',' ',line) #remove numbers\n \n #remove punctuation\n for c in punctuation:\n l = l.replace(c, ' ')\n #l = l.translate(string.maketrans(\"\", \"\"), punctuation) #remove punctuation marks\n word_tokens = l.lower().strip().split() #covert to lower case\n \n #remove stop words\n stop_words = set(stopwords.words('english'))\n tokens = [ i for i in word_tokens if not i in stop_words ]\n \n #lemmatization - gets words in its root form (having, had -> have for example)\n for w in tokens:\n srclist.append(lemma.lemmatize(w))\n return srclist\n\n\ndef read_docs(srcfile, m):\n src = ''\n print(\"Reading....\" + str(srcfile))\n\n try:\n with open(srcfile, 'r') as f:\n start = 1000\n end = 0\n data = []\n ind = 0\n #for ind, line in enumerate(f):\n while True:\n line = f.readline()\n #if lang =='en':\n if 'abstract' in line.lower():\n start = ind\n print(\"Start index found!\", start)\n elif 'introduction' in line.lower() or ind > start+40:\n end = ind\n print(\"End index found \", end)\n break\n ind += 1\n data.append(line)\n #print(start, end, srcfile)\n for ind, line in enumerate(data):\n temp = ''\n if start < ind:\n list_str = line.split()\n for l in list_str:\n if re.match(\"[a-zA-Z0-9_]\", l):\n if not isinstance(l, str):\n temp += ' '\n else:\n temp = temp + ' ' + l + ' '\n src += ' '.join(clean_data(temp))\n src += ' '\n except UnicodeDecodeError:\n with open(srcfile, 'r', encoding=\"ISO-8859-1\") as f:\n start = 1000\n end = 0\n data = []\n for ind, line in enumerate(f):\n #if detect(line) == 'en':\n if 'abstract' in line.lower():\n start = ind\n elif 'introduction' in line.lower() or ind > start+40:\n end = ind\n break\n data.append(line)\n \n for ind, line in enumerate(data):\n temp = ''\n if start < ind:\n list_str = line.split()\n for l in list_str:\n if re.match(\"[a-zA-Z0-9_]\", l):\n if not isinstance(l, str):\n temp += ' '\n else:\n temp = temp + ' ' + l + ' '\n src += ' '.join(clean_data(temp))\n src += ' '\n\n f.close()\n \n return src.split()\n \ndef build_vocab(src):\n vocab = dict()\n\n for line in src:\n for w in line:\n if w not in vocab:\n vocab[w] = 1\n else:\n vocab[w] += 1\n\n if '<s>' in vocab:\n del vocab['<s>']\n if '<\\s>' in vocab:\n del vocab['<\\s>']\n if '<unk>' in vocab:\n del vocab['<unk>']\n if '<pad>' in vocab:\n del vocab['<pad>']\n \n sorted_vocab = sorted(vocab.items(),\n key=operator.itemgetter(1),\n reverse=True)\n\n sorted_words = [x[0] for x in sorted_vocab[:30000]]\n\n word2idx = {'<s>' : 0,\n '</s>' : 1,\n '<unk>' : 2,\n '<pad>' : 3 }\n\n idx2word = { 0 : '<s>',\n 1 : '</s>',\n 2 : '<unk>',\n 3 : '<pad>' }\n\n for idx, w in enumerate(sorted_words):\n word2idx[w] = idx+4\n idx2word[idx+4] = w\n \n return word2idx, idx2word\n \ndef read_docs(srcfile):\n src = ''\n print(\"Reading....\" + str(srcfile))\n try:\n with open(srcfile, 'r') as f:\n for ind, line in enumerate(f):\n if line is not None:\n src += ' '.join(clean_data(line))\n except UnicodeDecodeError:\n with open(srcfile, 'r', encoding=\"ISO-8859-1\") as f:\n for ind, line in enumerate(f):\n if line is not None:\n src += ' '.join(clean_data(line))\n #src.append(temp)\n f.close()\n return src\n\ndef read_data(srcfile):\n '''\n src = []\n try:\n with open(srcfile, 'r') as f:\n for ind, line in enumerate(f):\n line = line.split('.')\n for i in line:\n temp = clean_data(i)\n src.append(temp)\n except UnicodeDecodeError:\n with open(srcfile, 'r', encoding=\"ISO-8859-1\") as f:\n for ind, line in enumerate(f):\n line = line.split('.')\n for i in line:\n temp = clean_data(i)\n src.append(temp)\n #remove = len(src) % 10\n #src = src[:len(src)-remove]\n f.close()\n '''\n word2idx, idx2word = build_vocab(srcfile)\n return srcfile, word2idx, idx2word\n\ndef find_max_length(src):\n lens = [len(line) for line in src]\n max_len = max(lens)\n return max_len\n\ndef get_batch(src, word2idx, idx, batch_size, max_len):\n lens = [len(line) for line in src[idx:idx+batch_size]]\n src_lines = []\n\n for line in src[idx:idx+batch_size]:\n temp = []\n for w in line:\n if w not in word2idx:\n temp.append(word2idx['<unk>'])\n else:\n temp.append(word2idx[w])\n if len(temp) < max_len:\n for i in range(len(temp), max_len):\n temp.append(word2idx['<pad>'])\n src_lines.append(temp)\n #print(src_lines)\n \n mask = [([1] * (i)) + ([0] * (max_len - i))\n for i in lens\n ] \n \n src_lines = torch.LongTensor(src_lines)\n mask = torch.FloatTensor(mask)\n out_lines = src_lines\n\n return src_lines, out_lines, lens, mask\n\n\ndef load_Wikiword2vecModel(cache):\n\n name_base = 'wiki.en.vec'\n _direct_en_url = 'https://dl.fbaipublicfiles.com/fasttext/vectors-wiki/wiki.en.vec'\n '''\n name_base = 'wiki.en.vec'\n _direct_en_url = 'https://dl.fbaipublicfiles.com/fasttext/vectors-wiki/wiki.en.zip'\n #_direct_en_url = 'https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.en.300.bin.gz'\n #corpus = api.load('text8', return_path=True) # download the corpus and return it opened as an iterable\n #model = Word2Vec(corpus)\n '''\n destination = os.path.join(cache, str(name_base))\n print(destination)\n if not os.path.isfile(destination):\n download_from_url(_direct_en_url, destination) \n ext = os.path.splitext(cache)[1][1:]\n if ext == 'zip':\n with zipfile.ZipFile(destination, \"r\") as zf:\n zf.extractall(cache)\n elif ext == 'gz':\n with tarfile.open(destination, 'r:gz') as tar:\n tar.extractall(path=cache)\n \n model = KeyedVectors.load_word2vec_format(destination)\n return model\n\ndef get_embeddingWeights(model, n_words, word2idx, embedding_dim=300):\n embeddings = np.zeros((n_words, embedding_dim))\n for word in word2idx:\n index = word2idx.get(word)\n try:\n vector = model[word]\n embeddings[index] = vector\n except KeyError:\n embeddings[index] = np.random.normal(scale=0.6, size=(embedding_dim, ))\n return torch.from_numpy(embeddings).float()\n\ndef vectorExtrema(reviewer):\n for each in reviewer.keys():\n var = []\n for i in range(len(reviewer[each])):\n var.append(torch.Tensor(reviewer[each][i]).unsqueeze(0))\n reviewer[each] = torch.unsqueeze(torch.max(torch.cat(var), 0).values, 0)\n return reviewer\n\n\ndef vectorMean(reviewer):\n for each in reviewer.keys():\n var = []\n for i in range(len(reviewer[each])):\n var.append(torch.Tensor(reviewer[each][i]).unsqueeze(0))\n reviewer[each] = torch.unsqueeze(torch.mean(torch.cat(var,0), 0), 0)\n return reviewer\n\n\n\ndef get_batch_eval(paper_emb, rev_emb, trg_value, idx, batch_size):\n paper_lines = Variable(torch.stack(paper_emb[idx:idx+batch_size]).squeeze(), requires_grad=True)\n review_emb = Variable(torch.stack(rev_emb[idx:idx+batch_size]).squeeze(), requires_grad=True)\n trg = torch.stack(trg_value[idx:idx+batch_size]).squeeze()\n return paper_lines, review_emb, trg\n\ndef train_classification(epochs, model, train_data_sub, train_data_rev, labels, save_dir, criterion, optimizer, batch_size, m_name=''):\n losses= []\n for e_num in range(epochs):\n loss_ep = 0\n for i in range(0, len(labels), batch_size):\n tr_sub, tr_rev, y = get_batch_eval(train_data_sub, train_data_rev, labels, i, batch_size) \n optimizer.zero_grad()\n prediction = model(tr_sub, tr_rev)\n loss = criterion(prediction, y.argmax(dim=1))\n loss_ep += loss.item()\n loss.backward() # backpropagation, compute gradients\n optimizer.step()\n losses.append(loss_ep/batch_size)\n print(\"Epoch:\", e_num, \" Loss:\", losses[-1])\n print(\"GPU memory consumption for epoch\" + str(e_num) + \" \" + str(torch.cuda.memory_allocated()))\n torch.save(model, os.path.join(save_dir, str(m_name+\".model\")))\n print(\"Model training completed!!\")\n\n\ndef train_d2v(epochs, model, train_data_sub, train_data_rev, labels, save_dir, criterion, optimizer, batch_size):\n losses= []\n for e_num in range(epochs):\n loss_ep = 0\n for i in range(0, len(labels), batch_size):\n tr_sub, tr_rev, y = get_batch_eval(train_data_sub, train_data_rev, labels, i, batch_size) \n optimizer.zero_grad()\n prediction = model(tr_sub, tr_rev)\n print(prediction, y.argmax(dim=1))\n loss = criterion(prediction, y.argmax(dim=1))\n loss_ep += loss.item()\n loss.backward() # backpropagation, compute gradients\n optimizer.step()\n losses.append(loss_ep/batch_size)\n print(\"Epoch:\", e_num, \" Loss:\", losses[-1])\n print(\"GPU memory consumption for epoch\" + str(e_num) + \" \" + str(torch.cuda.memory_allocated()))\n torch.save(model, os.path.join(save_dir, \"doc2vec_bid.model\"))\n print(\"Model training completed!!\")\n\ndef train_seq(epochs, model, train_data_sub, train_data_rev, labels, save_dir, criterion, optimizer, batch_size, m_name):\n losses= []\n for e_num in range(epochs):\n loss_ep = 0\n for i in range(0, len(labels), batch_size):\n tr_sub, tr_rev, y = get_batch_eval(train_data_sub, train_data_rev, labels, i, batch_size) \n optimizer.zero_grad()\n prediction = model(tr_sub, tr_rev)\n loss = criterion(prediction, y.argmax(dim=1))\n loss_ep += loss.item()\n loss.backward() # backpropagation, compute gradients\n optimizer.step()\n losses.append(loss_ep/batch_size)\n print(\"Epoch:\", e_num, \" Loss:\", losses[-1])\n print(\"GPU memory consumption for epoch\" + str(e_num) + \" \" + str(torch.cuda.memory_allocated()))\n torch.save(model, os.path.join(save_dir, str(\"seq_bid\"+m_name+\".model\")))\n print(\"Model training completed!!\")\n\ndef train_bert(epochs, model, train_data_sub, train_data_rev, labels, save_dir, criterion, optimizer, m, batch_size):\n losses= []\n for e_num in range(epochs):\n loss_ep = 0\n for i in range(0, len(labels), batch_size):\n tr_sub, tr_rev, y = get_batch_eval(train_data_sub, train_data_rev, labels, i, batch_size) \n optimizer.zero_grad()\n prediction = model(tr_sub, tr_rev)\n loss = criterion(prediction, y.argmax(dim=1))\n loss_ep += loss.item()\n loss.backward() # backpropagation, compute gradients\n optimizer.step()\n losses.append(loss_ep/batch_size)\n print(\"Epoch:\", e_num, \" Loss:\", losses[-1])\n print(\"GPU memory consumption for epoch\" + str(e_num) + \" \" + str(torch.cuda.memory_allocated()))\n torch.save(model, os.path.join(save_dir, \"bid.model\"))\n print(\"Model training completed!!\")\n\ndef eval_bert(path, train_data_sub, train_data_rev, labels, m, criterion, submitter_ids, reviewer_ids):\n model = torch.load(os.path.join(path, \"bid.model\"))\n with torch.no_grad():\n model.eval()\n class_label = 0\n trg_label = 0\n correct = 0\n wrong = 0\n loss_test = 0\n with open(os.path.join(path,\"test_results.txt\"), \"w\") as out:\n for i in range(len(labels)):\n prediction = model(train_data_sub[i], train_data_rev[i])\n print(prediction, labels[i].unsqueeze(0).argmax(dim=1))\n class_label = prediction.argmax(dim=1).squeeze()\n trg_label = labels[i].argmax(dim=-1)\n loss = criterion(prediction, labels[i].unsqueeze(0).argmax(dim=1)) # must be (1. nn output, 2. target)\n loss_test += loss.item()\n if class_label == trg_label:\n correct += 1\n else:\n print(class_label, trg_label)\n wrong += 1\n out.write(str(submitter_ids[i]) + \" \" + str(reviewer_ids[i]) + \" \" + str(class_label.data.cpu()) + \" \" + str(trg_label.data.cpu()))\n print(\"Accuracy:\", correct/len(labels), \" Test Loss:\", loss_test/len(labels))\n out.write(\"Accuracy:\"+ str(correct/len(labels)) + \" Test Loss:\" + str(loss_test/len(labels)))\n out.close()\n\ndef eval_d2v(path, train_data_sub, train_data_rev, labels, criterion, submitter_ids, reviewer_ids):\n model = torch.load(os.path.join(path, \"doc2vec_bid.model\"))\n with torch.no_grad():\n model.eval()\n class_label = 0\n trg_label = 0\n correct = 0\n wrong = 0\n loss_test = 0\n with open(os.path.join(path,\"test_results.txt\"), \"w\") as out:\n for i in range(len(labels)):\n #tr_sub, tr_rev, y = get_batch_eval(train_data_sub, train_data_rev, labels, i, 1)\n #print(tr_sub, y)\n prediction = model(train_data_sub[i], train_data_rev[i])\n print(prediction, labels[i])\n class_label = prediction.argmax(dim=1)\n trg_label = labels[i].argmax(dim=-1)\n loss = criterion(prediction, labels[i].unsqueeze(0).argmax(dim=1)) # must be (1. nn output, 2. target)\n loss_test += loss.item()\n if class_label == trg_label:\n correct += 1\n else:\n print(class_label, trg_label)\n wrong += 1\n out.write(str(submitter_ids[i]) + \" \" + str(reviewer_ids[i]) + \" \" + str(class_label.data.cpu()) + \" \" + str(trg_label.data.cpu()))\n print(\"Accuracy:\", correct/len(labels), \" Test Loss:\", loss_test/len(labels))\n out.write(\"Accuracy:\"+ str(correct/len(labels)) + \" Test Loss:\" + str(loss_test/len(labels)))\n out.close()\n\n\n"
] |
[
[
"torch.LongTensor",
"torch.Tensor",
"torch.cat",
"torch.from_numpy",
"numpy.random.normal",
"torch.no_grad",
"torch.FloatTensor",
"torch.stack",
"numpy.zeros",
"torch.cuda.memory_allocated"
]
] |
yutashx/PiSense
|
[
"bc059e8e0bf17adafc116e10bd4a6a69b6d99cf1"
] |
[
"reconstruction/reconstruction_system/sensors/realsense_recorder.py"
] |
[
"# Open3D: www.open3d.org\n# The MIT License (MIT)\n# See license file or visit www.open3d.org for details\n\n# examples/python/reconstruction_system/sensors/realsense_recorder.py\n\n# pyrealsense2 is required.\n# Please see instructions in https://github.com/IntelRealSense/librealsense/tree/master/wrappers/python\nimport pyrealsense2 as rs\nimport numpy as np\nimport cv2\nfrom os import makedirs\nfrom os.path import exists, join\nimport shutil\nimport json\nfrom enum import IntEnum\nfrom argparse import ArgumentParser\n\ntry:\n # Python 2 compatible\n input = raw_input\nexcept NameError:\n pass\n\n\nclass Preset(IntEnum):\n Custom = 0\n Default = 1\n Hand = 2\n HighAccuracy = 3\n HighDensity = 4\n MediumDensity = 5\n\n\ndef make_clean_folder(path_folder):\n if not exists(path_folder):\n makedirs(path_folder)\n else:\n user_input = input(\"%s not empty. Overwrite? (y/n) : \" % path_folder)\n if user_input.lower() == 'y':\n shutil.rmtree(path_folder)\n makedirs(path_folder)\n else:\n exit()\n\n\ndef save_intrinsic_as_json(filename, frame):\n intrinsics = frame.profile.as_video_stream_profile().intrinsics\n with open(filename, 'w') as outfile:\n obj = json.dump(\n {\n 'width':\n intrinsics.width,\n 'height':\n intrinsics.height,\n 'intrinsic_matrix': [\n intrinsics.fx, 0, 0, 0, intrinsics.fy, 0, intrinsics.ppx,\n intrinsics.ppy, 1\n ]\n },\n outfile,\n indent=4)\n\ndef get_parser():\n argparser = ArgumentParser(\n description=\n \"Realsense Recorder. Please select one of the optional arguments\")\n argparser.add_argument('--color_width', type=int, default=1280, help=\"color camera width\")\n argparser.add_argument('--color_height', type=int, default=720, help=\"color camera height\")\n argparser.add_argument('--depth_width', type=int, default=1280, help=\"depth camera width\")\n argparser.add_argument('--depth_height', type=int, default=720, help=\"depth camera height\")\n argparser.add_argument('--fps', type=int, default=30, help=\"input camera fps\")\n argparser.add_argument('--distance', type=float, default=3.0, help=\"input clipping distance in meters\")\n argparser.add_argument(\"--output_folder\", default='../dataset/', help=\"set output folder\")\n argparser.add_argument(\"--record_rosbag\", action='store_true', help=\"Recording rgbd stream into realsense.bag\")\n argparser.add_argument( \"--record_imgs\", action='store_true', help=\"Recording save color and depth images into realsense folder\")\n argparser.add_argument(\"--playback_rosbag\", action='store_true', help=\"Play recorded realsense.bag file\")\n argparser.add_argument(\"--window\", action='store_true', help=\"visualize capturing color and dpeth images\")\n\n return argparser\n\n\nif __name__ == \"__main__\":\n parser = get_parser()\n args = parser.parse_args()\n\n if sum(o is not False for o in vars(args).values()) <= 1:\n parser.print_help()\n exit()\n\n path_output = args.output_folder\n path_depth = join(args.output_folder, \"depth\")\n path_color = join(args.output_folder, \"color\")\n if args.record_imgs:\n make_clean_folder(path_output)\n make_clean_folder(path_depth)\n make_clean_folder(path_color)\n\n path_bag = join(args.output_folder, \"realsense.bag\")\n if args.record_rosbag:\n if exists(path_bag):\n user_input = input(\"%s exists. Overwrite? (y/n) : \" % path_bag)\n if user_input.lower() == 'n':\n exit()\n\n color_camera_width = args.color_width\n color_camera_height = args.color_height\n depth_camera_width = args.depth_width\n depth_camera_height = args.depth_height\n fps = args.fps\n distance = args.distance\n\n # Create a pipeline\n pipeline = rs.pipeline()\n\n #Create a config and configure the pipeline to stream\n # different resolutions of color and depth streams\n config = rs.config()\n\n if args.record_imgs or args.record_rosbag:\n # note: using 640 x 480 depth resolution produces smooth depth boundaries\n # using rs.format.bgr8 for color image format for OpenCV based image visualization\n config.enable_stream(rs.stream.depth, depth_camera_width, depth_camera_height, rs.format.z16, fps)\n config.enable_stream(rs.stream.color, color_camera_width, color_camera_height, rs.format.bgr8, fps)\n if args.record_rosbag:\n config.enable_record_to_file(path_bag)\n if args.playback_rosbag:\n config.enable_device_from_file(path_bag, repeat_playback=True)\n\n # Start streaming\n profile = pipeline.start(config)\n depth_sensor = profile.get_device().first_depth_sensor()\n\n # Using preset HighAccuracy for recording\n if args.record_rosbag or args.record_imgs:\n depth_sensor.set_option(rs.option.visual_preset, Preset.HighAccuracy)\n\n # Getting the depth sensor's depth scale (see rs-align example for explanation)\n depth_scale = depth_sensor.get_depth_scale()\n\n # We will not display the background of objects more than\n # clipping_distance_in_meters meters away\n clipping_distance_in_meters = distance # 3 meter\n clipping_distance = clipping_distance_in_meters / depth_scale\n\n # Create an align object\n # rs.align allows us to perform alignment of depth frames to others frames\n # The \"align_to\" is the stream type to which we plan to align depth frames.\n align_to = rs.stream.color\n align = rs.align(align_to)\n\n # Streaming loop\n frame_count = 0\n try:\n while True:\n # Get frameset of color and depth\n frames = pipeline.wait_for_frames()\n\n # Align the depth frame to color frame\n aligned_frames = align.process(frames)\n\n # Get aligned frames\n aligned_depth_frame = aligned_frames.get_depth_frame()\n color_frame = aligned_frames.get_color_frame()\n\n # Validate that both frames are valid\n if not aligned_depth_frame or not color_frame:\n continue\n\n depth_image = np.asanyarray(aligned_depth_frame.get_data())\n color_image = np.asanyarray(color_frame.get_data())\n\n if args.record_imgs:\n if frame_count == 0:\n save_intrinsic_as_json(\n join(args.output_folder, \"camera_intrinsic.json\"),\n color_frame)\n cv2.imwrite(\"%s/%06d.png\" % \\\n (path_depth, frame_count), depth_image)\n cv2.imwrite(\"%s/%06d.jpg\" % \\\n (path_color, frame_count), color_image)\n print(\"Saved color + depth image %06d\" % frame_count)\n frame_count += 1\n\n # Remove background - Set pixels further than clipping_distance to grey\n grey_color = 153\n #depth image is 1 channel, color is 3 channels\n depth_image_3d = np.dstack((depth_image, depth_image, depth_image))\n bg_removed = np.where((depth_image_3d > clipping_distance) | \\\n (depth_image_3d <= 0), grey_color, color_image)\n\n # Render images\n if args.window:\n depth_colormap = cv2.applyColorMap(\n cv2.convertScaleAbs(depth_image, alpha=0.09), cv2.COLORMAP_JET)\n images = np.hstack((bg_removed, depth_colormap))\n cv2.namedWindow('Recorder Realsense', cv2.WINDOW_AUTOSIZE)\n cv2.imshow('Recorder Realsense', images)\n key = cv2.waitKey(1)\n\n # if 'esc' button pressed, escape loop and exit program\n if key == 27:\n cv2.destroyAllWindows()\n break\n finally:\n pipeline.stop()\n"
] |
[
[
"numpy.hstack",
"numpy.where",
"numpy.dstack"
]
] |
kex5n/Vehicles-Dispatch-Simulator
|
[
"d0cca03fbf56e4b0ceeef8dafc59de105c1d4507"
] |
[
"objects/node.py"
] |
[
"from typing import Any, List\n\nimport numpy as np\nimport pandas as pd\n\n\n# random.seed(1234)\nnp.random.seed(1234)\n# torch.manual_seed(1234)\n# torch.cuda.manual_seed_all(1234)\n# torch.backends.cudnn.deterministic = True\n\nclass Node:\n def __init__(self, id: int, node_index: int,longitude: float, latitude: float):\n self.id: int = id\n self.node_index: int = node_index\n self.longitude: float = longitude\n self.latitude: float = latitude\n\n def __eq__(self, other: Any):\n if isinstance(other, Node):\n return False\n if other.id != self.id:\n return False\n return True\n\n\nclass NodeManager:\n def __init__(self, node_df: pd.DataFrame) -> None:\n self.__node_list = [\n Node(\n id=row[\"NodeID\"],\n node_index=int(row[\"NodeIndex\"]),\n longitude=row[\"Longitude\"],\n latitude=row[\"Latitude\"],\n )\n for _, row in node_df.iterrows()\n ]\n self.__node_index = {node.id: node.node_index for node in self.__node_list}\n self.__node_dict = {node.id: node for node in self.__node_list}\n\n @property\n def node_locations(self) -> np.ndarray:\n return np.array(\n [\n [round(node.longitude, 7), round(node.latitude, 7)]\n for node in self.__node_list\n ]\n )\n\n @property\n def node_id_list(self) -> np.ndarray:\n return np.array([node.id for node in self.__node_list])\n\n @property\n def node_index_list(self) -> np.ndarray:\n return np.array([node.node_index for node in self.__node_list])\n\n def get_node_list(self) -> List[Node]:\n return [node for node in self.__node_list]\n\n def get_node_index(self, node_id: int) -> int:\n return self.__node_index[node_id]\n\n def get_node(self, node_id) -> Node:\n return self.__node_dict[node_id]\n\n def __len__(self) -> int:\n return len(self.__node_list)\n\n def get_node_by_node_id(self, node_id: int) -> Node:\n return self.__node_dict[node_id]\n"
] |
[
[
"numpy.array",
"numpy.random.seed"
]
] |
jiskattema/kernel_tuner
|
[
"dacd79e3371308092798f7f2394a5d20af8bdebd"
] |
[
"kernel_tuner/interface.py"
] |
[
"\"\"\"Kernel Tuner interface module\n\nThis module contains the main functions that Kernel Tuner\noffers to its users.\n\nAuthor\n------\nBen van Werkhoven <[email protected]>\n\nCopyright and License\n---------------------\n* Copyright 2016 Netherlands eScience Center\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nfrom __future__ import print_function\n\nimport json\nimport os.path\nfrom collections import OrderedDict\nimport importlib\nfrom datetime import datetime\nimport logging\nimport sys\nimport numpy\n\nimport kernel_tuner.util as util\nimport kernel_tuner.core as core\n\nfrom kernel_tuner.runners.sequential import SequentialRunner\nfrom kernel_tuner.runners.simulation import SimulationRunner\n\nfrom kernel_tuner.strategies import brute_force, random_sample, diff_evo, minimize, basinhopping, genetic_algorithm, pso, simulated_annealing, firefly_algorithm, bayes_opt\n\nstrategy_map = {\n \"brute_force\": brute_force,\n \"random_sample\": random_sample,\n \"minimize\": minimize,\n \"basinhopping\": basinhopping,\n \"diff_evo\": diff_evo,\n \"genetic_algorithm\": genetic_algorithm,\n \"pso\": pso,\n \"simulated_annealing\": simulated_annealing,\n \"firefly_algorithm\": firefly_algorithm,\n \"bayes_opt\": bayes_opt\n}\n\n\nclass Options(OrderedDict):\n \"\"\"read-only class for passing options around\"\"\"\n\n def __getattr__(self, name):\n if not name.startswith('_'):\n return self[name]\n return super(Options, self).__getattr__(name)\n\n def __deepcopy__(self, _):\n return self\n\n\n_kernel_options = Options([(\"kernel_name\", (\"\"\"The name of the kernel in the code.\"\"\", \"string\")),\n (\"kernel_source\", (\"\"\"The CUDA, OpenCL, or C kernel code.\n It is allowed for the code to be passed as a string, a filename, a function\n that returns a string of code, or a list when the code needs auxilliary files.\n\n To support combined host and device code tuning, a list of\n filenames can be passed. The first file in the list should be the\n file that contains the host code. The host code is assumed to\n include or read in any of the files in the list beyond the first.\n The tunable parameters can be used within all files.\n\n Another alternative is to pass a code generating function.\n The purpose of this is to support the use of code generating\n functions that generate the kernel code based on the specific\n parameters. This function should take one positional argument,\n which will be used to pass a dict containing the parameters.\n The function should return a string with the source code for\n the kernel.\"\"\", \"string or list and/or callable\")),\n (\"lang\", (\"\"\"Specifies the language used for GPU kernels. The kernel_tuner\n automatically detects the language, but if it fails, you may specify\n the language using this argument, currently supported: \"CUDA\",\n \"OpenCL\", or \"C\".\"\"\", \"string\")),\n (\"problem_size\", (\"\"\"The size of the domain from which the grid dimensions\n of the kernel are computed.\n\n This can be specified using an int, string, function, or\n 1,2,3-dimensional tuple.\n\n In general, do not divide the problem_size yourself by the thread block sizes.\n Kernel Tuner does this for you based on tunable parameters,\n called \"block_size_x\", \"block_size_y\", and \"block_size_z\".\n If more or different parameters divide the grid dimensions use\n grid_div_x/y/z options to specify this.\n\n In most use-cases the problem_size is specified using a single integer\n or a tuple of integers,\n but Kernel Tuner supports more advanced use cases where the problem_size\n itself depends on the tunable parameters in some way.\n\n You are allowed to use a function or string to specify the problem_size.\n A function should accept a dictionary with the tunable parameters\n for this kernel configuration and directly return a tuple\n that specifies the problem size in all dimensions.\n\n When passing a string, you are allowed to write Python\n arithmetic and use the names of tunable parameters as variables\n in these expressions. Kernel Tuner will replace instances of the tunable\n parameters with their current value when computing the grid dimensions.\n This option exists for convenience, but do note that using a lambda\n function is probably safer. The string notation should only return\n the problem size for one dimension, but can be used inside\n a tuple, possibly in combination with integers or more strings in\n different dimensions.\n\n See the reduction CUDA example for an example use of this feature.\"\"\", \"callable, string, int, or tuple(int or string, ..)\")),\n (\"arguments\", (\"\"\"A list of kernel arguments, use numpy arrays for\n arrays, use numpy.int32 or numpy.float32 for scalars.\"\"\", \"list\")),\n (\"grid_div_x\", (\"\"\"A list of names of the parameters whose values divide\n the grid dimensions in the x-direction.\n The product of all grid divisor expressions is computed before dividing\n the problem_size in that dimension. Also note that the divison is treated\n as a float divison and resulting grid dimensions will be rounded up to\n the nearest integer number.\n\n Arithmetic expressions can be\n used if necessary inside the string containing a parameter name. For\n example, in some cases you may want to divide the problem size in the\n x-dimension with the number of warps rather than the number of threads\n in a block, in such cases one could for example use [\"block_size_x/32\"].\n Another option is to pass a function to grid_div_x that accepts a\n dictionary with the tunable parameters and returns the grid divisor\n in this dimension, for example: grid_div_x=lambda p:p[\"block_size_x\"]/32.\n\n If not supplied, [\"block_size_x\"] will be used by default, if you do not\n want any grid x-dimension divisors pass an empty list.\"\"\", \"callable or list\")),\n (\"grid_div_y\", (\"\"\"A list of names of the parameters whose values divide\n the grid dimensions in the y-direction, [\"block_size_y\"] by default.\n If you do not want to divide the problem_size, you should pass an empty list.\n See grid_div_x for more details.\"\"\", \"list\")),\n (\"grid_div_z\", (\"\"\"A list of names of the parameters whose values divide\n the grid dimensions in the z-direction, [\"block_size_z\"] by default.\n If you do not want to divide the problem_size, you should pass an empty list.\n See grid_div_x for more details.\"\"\", \"list\")),\n (\"smem_args\", (\"\"\"CUDA-specific feature for specifying shared memory options\n to the kernel. At the moment only 'size' is supported, but setting the\n shared memory configuration on Kepler GPUs for example could be added\n in the future. Size should denote the number of bytes for to use when\n dynamically allocating shared memory.\"\"\", \"dict(string: numpy object)\")),\n (\"cmem_args\", (\"\"\"CUDA-specific feature for specifying constant memory\n arguments to the kernel. In OpenCL these are handled as normal\n kernel arguments, but in CUDA you can copy to a symbol. The way you\n specify constant memory arguments is by passing a dictionary with\n strings containing the constant memory symbol name together with numpy\n objects in the same way as normal kernel arguments.\"\"\", \"dict(string: numpy object)\")),\n (\"texmem_args\", (\"\"\"CUDA-specific feature for specifying texture memory\n arguments to the kernel. You specify texture memory arguments by passing a\n dictionary with strings containing the texture reference name together with\n the texture contents. These contents can be either simply a numpy object,\n or a dictionary containing the numpy object under the key 'array' plus the\n configuration options 'filter_mode' ('point' or 'linear), 'address_mode'\n (a list of 'border', 'clamp', 'mirror', 'wrap' per axis),\n 'normalized_coordinates' (True/False).\"\"\", \"dict(string: numpy object or dict)\")),\n (\"block_size_names\", (\"\"\"A list of strings that replace the defaults for the names\n that denote the thread block dimensions. If not passed, the behavior\n defaults to ``[\"block_size_x\", \"block_size_y\", \"block_size_z\"]``\"\"\", \"list(string)\"))])\n\n_tuning_options = Options([(\"tune_params\", (\"\"\"A dictionary containing the parameter names as keys,\n and lists of possible parameter settings as values.\n Kernel Tuner will try to compile and benchmark all possible\n combinations of all possible values for all tuning parameters.\n This typically results in a rather large search space of all\n possible kernel configurations.\n\n For each kernel configuration, each tuning parameter is\n replaced at compile-time with its current value.\n Currently, Kernel Tuner uses the convention that the following\n list of tuning parameters are used as thread block dimensions:\n\n * \"block_size_x\" thread block (work group) x-dimension\n * \"block_size_y\" thread block (work group) y-dimension\n * \"block_size_z\" thread block (work group) z-dimension\n\n Options for changing these defaults may be added later. If you\n don't want the thread block dimensions to be compiled in, you\n may use the built-in variables blockDim.xyz in CUDA or the\n built-in function get_local_size() in OpenCL instead.\"\"\", \"dict( string : [...]\")),\n (\"restrictions\", (\"\"\"An option to limit the search space with restrictions.\n The restrictions can be specified using a function or a list of strings.\n The function should take one argument, namely a dictionary with the\n tunable parameters of the kernel configuration, if the function returns\n True the configuration is considered to be part of the search space, or\n False otherwise.\n The other way to specify restrictions is using a list of strings\n containing boolean expression that must be satisfied by the kernel\n configuration. These expressions must all be true for the configuration\n to be part of the search space. For example:\n restrictions=[\"block_size_x==block_size_y*tile_size_y\"] limits the\n search to configurations where the block_size_x equals the product\n of block_size_y and tile_size_y.\n The default is None.\"\"\", \"callable or list(strings)\")),\n (\"answer\", (\"\"\"A list of arguments, similar to what you pass to arguments,\n that contains the expected output of the kernel after it has executed\n and contains None for each argument that is input-only. The expected\n output of the kernel will then be used to verify the correctness of\n each kernel in the parameter space before it will be benchmarked.\"\"\", \"list\")),\n (\"atol\", (\"\"\"The maximum allowed absolute difference between two elements\n in the output and the reference answer, as passed to numpy.allclose().\n Ignored if you have not passed a reference answer. Default value is\n 1e-6, that is 0.000001.\"\"\", \"float\")),\n (\"verify\", (\"\"\"Python function used for output verification. By default,\n numpy.allclose is used for output verification, if this does not suit\n your application, you can pass a different function here.\n\n The function is expected to have two positional arguments. The first\n is the reference result, the second is the output computed by the\n kernel being verified. The types of these arguments depends on the\n type of the output arguments you are verifying. The function may also\n have an optional argument named atol, to which the value will be\n passed that was specified using the atol option to tune_kernel.\n The function should return True when the output passes the test, and\n False when the output fails the test.\"\"\", \"func(ref, ans, atol=None)\")),\n (\"strategy\", (\"\"\"Specify the strategy to use for searching through the\n parameter space, choose from:\n\n * \"brute_force\" (default),\n * \"random_sample\", specify: *sample_fraction*,\n * \"minimize\" or \"basinhopping\", specify: *method*,\n * \"diff_evo\", specify: *method*.\n * \"genetic_algorithm\"\n * \"pso\"\n * \"firefly_algorithm\"\n * \"simulated_annealing\"\n * \"bayes_opt\"\n\n \"brute_force\" is the default and iterates over the entire search\n space.\n\n \"random_sample\" can be used to only benchmark a fraction of the\n search space, specify a *sample_fraction* in the interval [0, 1].\n\n \"minimize\" and \"basinhopping\" strategies use minimizers to\n limit the search through the parameter space.\n\n \"diff_evo\" uses differential evolution.\n\n \"genetic_algorithm\" implements a Genetic Algorithm, default\n setting uses a population size of 20 for 100 generations.\n\n \"pso\" implements Particle Swarm Optimization, using the default\n setting of 20 particles for 100 iterations.\n\n \"firefly_algorithm\" implements the Firefly Algorithm, using 20\n fireflies for 100 iterations.\n\n \"simulated_annealing\" uses Simulated Annealing.\n\n \"bayes_opt\" uses Bayesian Optimization.\n\n \"\"\", \"\")),\n (\"strategy_options\", (\"\"\"A dict with options for the tuning strategy\n\n Example usage:\n\n * strategy=\"basinhopping\",\n strategy_options={\"method\": \"BFGS\",\n \"maxiter\": 100,\n \"T\": 1.0}\n * strategy=\"diff_evo\",\n strategy_options={\"method\": \"best1bin\",\n \"popsize\": 20}\n * strategy=\"genetic_algorithm\",\n strategy_options={\"method\": \"uniform\",\n \"popsize\": 20,\n \"maxiter\": 100}\n\n strategy=\"minimize\" and strategy=\"basinhopping\", support the following\n options for \"method\":\n \"Nelder-Mead\", \"Powell\", \"CG\", \"BFGS\", \"L-BFGS-B\",\n \"TNC\", \"COBYLA\", or \"SLSQP\". It is also possible to pass a function\n that implements a custom minimization strategy.\n The default is \"L-BFGS-B\".\n strategy=\"basinhopping\" also supports \"T\", which is 1.0 by default.\n\n strategy=\"diff_evo\" supports the following creation \"method\" options:\n \"best1bin\", \"best1exp\", \"rand1exp\", \"randtobest1exp\", \"best2exp\",\n \"rand2exp\", \"randtobest1bin\", \"best2bin\", \"rand2bin\", \"rand1bin\".\n The default is \"best1bin\".\n\n strategy=\"genetic_algorithm\" uses \"method\" to select the crossover\n method, options are: \"single_point\", \"two_point\", \"uniform\", and\n \"disruptive_uniform\".\n The default is \"uniform\".\n Also \"mutation_chance\" can be set to control the chance of a mutation,\n which is separately evaluated for each dimension. For example, set\n to 100 for a probability of 0.01 of a mutation per tunable parameter.\n\n strategy=\"random_sample\" supports \"fraction\" to specify\n the fraction of the search space to sample in the interval [0,1].\n\n strategy=\"firefly_algorithm\" supports the following parameters:\n B0 = 1.0, gamma = 1.0, alpha = 0.20.\n\n strategy=\"simulated_annealing\" supports parameters:\n T = 1.0, T_min = 0.001, alpha = 0.9.\n\n strategy=\"bayes_opt\" supports acquisition methods: \"poi\" (default),\n \"ei\", \"ucb\". And parameters, popsize (initial random guesses),\n maxiter, alpha, kappa, xi.\n\n \"maxiter\" is supported by \"minimize\", \"basinhopping\", \"diff_evo\"\n \"firefly_algorithm\", \"pso\", \"genetic_algorithm\", \"bayes_opt\". Note\n that maxiter generally refers to iterations of the strategy, not\n the maximum number of function evaluations.\n\n \"\"\", \"dict\")),\n (\"iterations\", (\"\"\"The number of times a kernel should be executed and\n its execution time measured when benchmarking a kernel, 7 by default.\"\"\", \"int\")),\n (\"verbose\", (\"\"\"Sets whether or not to report about configurations that\n were skipped during the search. This could be due to several reasons:\n\n * kernel configuration fails one or more restrictions\n * too many threads per thread block\n * too much shared memory used by the kernel\n * too many resources requested for launch\n\n verbose is False by default.\"\"\", \"bool\")),\n (\"cache\", (\"\"\"filename for caching/logging benchmarked instances\n filename uses suffix \".json\"\n if the file exists it is read and tuning continues from this file\n \"\"\", \"string\")), (\"metrics\", (\"specifies user-defined metrics\", \"OrderedDict\")),\n (\"simulation_mode\", (\"Simulate an auto-tuning search from an existing cachefile\", \"bool\")),\n (\"observers\", (\"\"\"A list of BenchmarkObservers\"\"\", \"list\"))])\n\n_device_options = Options([(\"device\", (\"\"\"CUDA/OpenCL device to use, in case you have multiple\n CUDA-capable GPUs or OpenCL devices you may use this to select one,\n 0 by default. Ignored if you are tuning host code by passing\n lang=\"C\".\"\"\", \"int\")),\n (\"platform\", (\"\"\"OpenCL platform to use, in case you have multiple\n OpenCL platforms you may use this to select one,\n 0 by default. Ignored if not using OpenCL. \"\"\", \"int\")),\n (\"quiet\", (\"\"\"Control whether or not to print to the console which\n device is being used, False by default\"\"\", \"boolean\")),\n (\"compiler\", (\"\"\"A string containing your preferred compiler,\n only effective with lang=\"C\". \"\"\", \"string\")), (\"compiler_options\", (\"\"\"A list of strings that specify compiler\n options.\"\"\", \"list(string)\"))])\n\n\ndef _get_docstring(opts):\n docstr = \"\"\n for k, v in opts.items():\n docstr += \" :param \" + k + \": \" + v[0] + \"\\n\"\n docstr += \" :type \" + k + \": \" + v[1] + \"\\n\\n\"\n return docstr\n\n\n_tune_kernel_docstring = \"\"\" Tune a CUDA kernel given a set of tunable parameters\n\n%s\n\n :returns: A list of dictionaries of all executed kernel configurations and their\n execution times. And a dictionary with information about the environment\n in which the tuning took place. This records device name, properties,\n version info, and so on.\n :rtype: list(dict()), dict()\n\n\"\"\" % _get_docstring(_kernel_options) + _get_docstring(_tuning_options) + _get_docstring(_device_options)\n\n#\"\"\"\n\n\ndef tune_kernel(kernel_name, kernel_string, problem_size, arguments, tune_params, grid_div_x=None, grid_div_y=None, grid_div_z=None, restrictions=None,\n answer=None, atol=1e-6, verify=None, verbose=False, lang=None, device=0, platform=0, smem_args=None, cmem_args=None, texmem_args=None,\n compiler=None, compiler_options=None, log=None, iterations=7, block_size_names=None, quiet=False, strategy=None, strategy_options=None,\n cache=None, metrics=None, simulation_mode=False, observers=None):\n\n if log:\n logging.basicConfig(filename=kernel_name + datetime.now().strftime('%Y%m%d-%H:%M:%S') + '.log', level=log)\n\n kernel_source = core.KernelSource(kernel_string, lang)\n\n _check_user_input(kernel_name, kernel_source, arguments, block_size_names)\n\n # check for forbidden names in tune parameters\n util.check_tune_params_list(tune_params)\n\n # check whether block_size_names are used as expected\n util.check_block_size_params_names_list(block_size_names, tune_params)\n\n if iterations < 1:\n raise ValueError(\"Iterations should be at least one!\")\n\n #sort all the options into separate dicts\n opts = locals()\n kernel_options = Options([(k, opts[k]) for k in _kernel_options.keys()])\n tuning_options = Options([(k, opts[k]) for k in _tuning_options.keys()])\n device_options = Options([(k, opts[k]) for k in _device_options.keys()])\n\n logging.debug('tune_kernel called')\n logging.debug('kernel_options: %s', util.get_config_string(kernel_options))\n logging.debug('tuning_options: %s', util.get_config_string(tuning_options))\n logging.debug('device_options: %s', util.get_config_string(device_options))\n\n if strategy:\n if strategy in strategy_map:\n strategy = strategy_map[strategy]\n else:\n raise ValueError(\"Strategy %s not recognized\" % strategy)\n\n #make strategy_options into an Options object\n if tuning_options.strategy_options:\n if not isinstance(strategy_options, Options):\n tuning_options.strategy_options = Options(strategy_options)\n\n #select strategy based on user options\n if \"fraction\" in tuning_options.strategy_options and not tuning_options.strategy == 'random_sample':\n raise ValueError('It is not possible to use fraction in combination with strategies other than \"random_sample\". ' \\\n 'Please set strategy=\"random_sample\", when using \"fraction\" in strategy_options')\n\n #check if method is supported by the selected strategy\n if \"method\" in tuning_options.strategy_options:\n method = tuning_options.strategy_options.method\n if not method in strategy.supported_methods:\n raise ValueError('Method %s is not supported for strategy %s' % (method, tuning_options.strategy))\n\n #if no strategy_options dict has been passed, create empty dictionary\n else:\n tuning_options.strategy_options = Options({})\n\n #if no strategy selected\n else:\n strategy = brute_force\n\n # select the runner for this job based on input\n SelectedRunner = SimulationRunner if simulation_mode else SequentialRunner\n with SelectedRunner(kernel_source, kernel_options, device_options, iterations, observers) as runner:\n\n #the user-specified function may or may not have an optional atol argument;\n #we normalize it so that it always accepts atol.\n tuning_options.verify = util.normalize_verify_function(tuning_options.verify)\n\n #process cache\n if cache:\n if cache[-5:] != \".json\":\n cache += \".json\"\n\n util.process_cache(cache, kernel_options, tuning_options, runner)\n else:\n tuning_options.cache = {}\n tuning_options.cachefile = None\n\n #call the strategy to execute the tuning process\n results, env = strategy.tune(runner, kernel_options, device_options, tuning_options)\n\n #finished iterating over search space\n if not device_options.quiet:\n if results: #checks if results is not empty\n best_config = min(results, key=lambda x: x['time'])\n units = getattr(runner, \"units\", None)\n print(\"best performing configuration:\")\n util.print_config_output(tune_params, best_config, device_options.quiet, metrics, units)\n else:\n print(\"no results to report\")\n\n if cache:\n util.close_cache(cache)\n\n return results, env\n\n\ntune_kernel.__doc__ = _tune_kernel_docstring\n\n_run_kernel_docstring = \"\"\"Compile and run a single kernel\n\n Compiles and runs a single kernel once, given a specific instance of the kernels tuning parameters.\n However, instead of measuring execution time run_kernel returns the output of the kernel.\n The output is returned as a list of numpy arrays that contains the state of all the kernel arguments\n after execution on the GPU.\n\n To summarize what this function will do for you in one call:\n * Compile the kernel according to the set of parameters passed\n * Allocate GPU memory to hold all kernel arguments\n * Move the all data to the GPU\n * Execute the kernel on the GPU\n * Copy all data from the GPU back to the host and return it as a list of Numpy arrays\n\n This function was added to Kernel Tuner mostly to allow easy testing for kernel correctness.\n On purpose, the interface is a lot like `tune_kernel()`.\n\n%s\n\n :param params: A dictionary containing the tuning parameter names as keys\n and a single value per tuning parameter as values.\n :type params: dict( string: int )\n\n :returns: A list of numpy arrays, similar to the arguments passed to this\n function, containing the output after kernel execution.\n :rtype: list\n\"\"\" % _get_docstring(_kernel_options) + _get_docstring(_device_options)\n\n\ndef run_kernel(kernel_name, kernel_string, problem_size, arguments, params, grid_div_x=None, grid_div_y=None, grid_div_z=None, lang=None, device=0, platform=0,\n smem_args=None, cmem_args=None, texmem_args=None, compiler=None, compiler_options=None, block_size_names=None, quiet=False, log=None):\n\n if log:\n logging.basicConfig(filename=kernel_name + datetime.now().strftime('%Y%m%d-%H:%M:%S') + '.log', level=log)\n\n kernel_source = core.KernelSource(kernel_string, lang)\n\n _check_user_input(kernel_name, kernel_source, arguments, block_size_names)\n\n #sort options into separate dicts\n opts = locals()\n kernel_options = Options([(k, opts[k]) for k in _kernel_options.keys()])\n device_options = Options([(k, opts[k]) for k in _device_options.keys()])\n\n #detect language and create the right device function interface\n with core.DeviceInterface(kernel_source, iterations=1, **device_options) as dev:\n\n #move data to the GPU\n gpu_args = dev.ready_argument_list(arguments)\n\n instance = None\n try:\n #create kernel instance\n instance = dev.create_kernel_instance(kernel_source, kernel_options, params, False)\n if instance is None:\n raise Exception(\"cannot create kernel instance, too many threads per block\")\n\n # see if the kernel arguments have correct type\n util.check_argument_list(instance.name, instance.kernel_string, arguments)\n\n #compile the kernel\n func = dev.compile_kernel(instance, False)\n if func is None:\n raise Exception(\"cannot compile kernel, too much shared memory used\")\n\n #add constant memory arguments to compiled module\n if cmem_args is not None:\n dev.copy_constant_memory_args(cmem_args)\n #add texture memory arguments to compiled module\n if texmem_args is not None:\n dev.copy_texture_memory_args(texmem_args)\n finally:\n #delete temp files\n if instance is not None:\n instance.delete_temp_files()\n\n #run the kernel\n if not dev.run_kernel(func, gpu_args, instance):\n raise Exception(\"runtime error occured, too many resources requested\")\n\n #copy data in GPU memory back to the host\n results = []\n for i, arg in enumerate(arguments):\n if numpy.isscalar(arg):\n results.append(arg)\n else:\n results.append(numpy.zeros_like(arg))\n dev.memcpy_dtoh(results[-1], gpu_args[i])\n\n return results\n\n\nrun_kernel.__doc__ = _run_kernel_docstring\n\n\ndef _check_user_input(kernel_name, kernel_source, arguments, block_size_names):\n # see if the kernel arguments have correct type\n kernel_source.check_argument_lists(kernel_name, arguments)\n\n # check for types and length of block_size_names\n util.check_block_size_names(block_size_names)\n"
] |
[
[
"numpy.zeros_like",
"numpy.isscalar"
]
] |
alnah005/raccoon_identification
|
[
"1af1213b744e061bd6f5551b7f76585115f94b03"
] |
[
"Mega_detector_raccoon_transfer_learning/log_files/parse_logs_to_csv.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nfile: parse_logs_to_csv.py\n\n@author: Suhail.Alnahari\n\n@description: \n\n@created: 2021-03-01T13:30:00.015Z-06:00\n\n@last-modified: 2021-03-02T19:24:22.570Z-06:00\n\"\"\"\n\n# standard library\nimport re \n\n# 3rd party packages\nimport pandas as pd\nimport numpy as np\n\n# local source\n\n# columns = [\"Step\",\"DetectionBoxes_Precision/mAP\", \"DetectionBoxes_Precision/mAP (large)\", \"DetectionBoxes_Precision/mAP (medium)\",\"DetectionBoxes_Precision/mAP (small)\"\n# ,\"DetectionBoxes_Precision/[email protected]\",\"DetectionBoxes_Precision/[email protected]\",\"DetectionBoxes_Recall/AR@1\",\"DetectionBoxes_Recall/AR@10\",\"DetectionBoxes_Recall/AR@100\",\n# \"DetectionBoxes_Recall/AR@100 (large)\", \"DetectionBoxes_Recall/AR@100 (medium)\",\"DetectionBoxes_Recall/AR@100 (small)\", \"Loss/BoxClassifierLoss/classification_loss\",\n# \"Loss/BoxClassifierLoss/localization_loss\", \"Loss/RPNLoss/localization_loss\",\"Loss/RPNLoss/objectness_loss\",\"Loss/total_loss\",\"loss\"]\n# regex = r\"INFO:tensorflow:Saving[\\s\\n]+dict[\\s\\n]+for[\\s\\n]+global[\\s\\n]+step[\\s\\n]+(\\d*?):[\\s\\n]+DetectionBoxes_Precision\\/mAP[\\s\\n]+=[\\s\\n]+(.*?),[\\s\\n]+DetectionBoxes_Precision\\/mAP[\\s\\n]+\\(large\\)[\\s\\n]+=[\\s\\n]+(.*?), DetectionBoxes_Precision\\/mAP[\\s\\n]+\\(medium\\)[\\s\\n]+=[\\s\\n]+(.*?),[\\s\\n]+DetectionBoxes_Precision\\/mAP \\(small\\)[\\s\\n]+=[\\s\\n]+(.*?),[\\s\\n]+DetectionBoxes_Precision\\/mAP@\\.50IOU[\\s\\n]+=[\\s\\n]+(.*?),[\\s\\n]+DetectionBoxes_Precision\\/mAP@\\.75IOU[\\s\\n]+=[\\s\\n]+(.*?),[\\s\\n]+DetectionBoxes_Recall\\/AR@1[\\s\\n]+=[\\s\\n]+(.*?),[\\s\\n]+DetectionBoxes_Recall\\/AR@10[\\s\\n]+=[\\s\\n]+(.*?),[\\s\\n]+DetectionBoxes_Recall\\/AR@100[\\s\\n]+=[\\s\\n]+(.*?),[\\s\\n]+DetectionBoxes_Recall\\/AR@100[\\s\\n]+\\(large\\)[\\s\\n]+=[\\s\\n]+(.*?), DetectionBoxes_Recall\\/AR@100[\\s\\n]+\\(medium\\)[\\s\\n]+=[\\s\\n]+(.*?),[\\s\\n]+DetectionBoxes_Recall\\/AR@100[\\s\\n]+\\(small\\)[\\s\\n]+=[\\s\\n]+(.*?), Loss\\/BoxClassifierLoss\\/classification_loss[\\s\\n]+=[\\s\\n]+(.*?), Loss\\/BoxClassifierLoss\\/localization_loss[\\s\\n]+=[\\s\\n]+(.*?), Loss\\/RPNLoss\\/localization_loss[\\s\\n]+=[\\s\\n]+(.*?),[\\s\\n]+Loss\\/RPNLoss\\/objectness_loss[\\s\\n]+=[\\s\\n]+(.*?),[\\s\\n]+Loss\\/total_loss[\\s\\n]+=[\\s\\n]+(.*?), .*?,[\\s\\n]+loss[\\s\\n]+=[\\s\\n]+(.*?)\\n\"\n\ncolumns = [\"loss\",\"step\"]\nregex = r\"INFO:tensorflow:loss = (.*), step = (.*) \\(\"\nfilenames = [\"slurm-1160281.out\",\"slurm-1178608.out\"]\noutputFile = \"loss_result.csv\"\n\nfiletext = \"\"\nfor i in filenames:\n textfile = open(i, 'r')\n filetext += textfile.read()\n textfile.close()\nmatches = re.findall(regex, filetext)\n\ndf = pd.DataFrame(data=np.asarray(matches),columns=columns)\ndf.to_csv(outputFile,index=False)\n\n"
] |
[
[
"numpy.asarray"
]
] |
sagek21/swift-coreml-transformers
|
[
"02228fbd905ffc1cebfad31b73de19dd1bd34969"
] |
[
"model_generation/gpt2.py"
] |
[
"import coremltools\nimport coremltools.models.datatypes as datatypes\nfrom coremltools.models import neural_network as neural_network\nfrom coremltools.models.utils import save_spec\nimport numpy as np\n# from test import *\n\n# get weights\nfrom pytorch_transformers import GPT2LMHeadModel\nmodel_name = \"gpt2\"\nlm_head_model = GPT2LMHeadModel.from_pretrained(model_name)\nmodel = lm_head_model.transformer\n\nwte = model.wte.weight.data.numpy().transpose() # shape (768, 50257) /!\\ i hate this\nwpe = model.wpe.weight.data.numpy().transpose() # shape (768, 1024)\n\nsequence_length = 64\nsteps = 12\n\n# build model\ninput_features = [\n\t('input_ids', datatypes.Array(sequence_length)),\n\t('position_ids', datatypes.Array(sequence_length)),\n]\noutput_features = [('output_logits', None)]\n\nbuilder = neural_network.NeuralNetworkBuilder(\n\tinput_features,\n\toutput_features,\n\tmode=None,\n\tdisable_rank5_shape_mapping=True,\n)\nbuilder.add_expand_dims(\n\tname='input_ids_expanded_to_rank5',\n\tinput_name='input_ids',\n\toutput_name='input_ids_expanded_to_rank5',\n\taxes=(1, 2, 3, 4)\n)\nbuilder.add_expand_dims(\n\tname='position_ids_expanded_to_rank5',\n\tinput_name='position_ids',\n\toutput_name='position_ids_expanded_to_rank5',\n\taxes=(1, 2, 3, 4)\n)\nbuilder.add_embedding(\n\tname='token_embeddings',\n\tinput_name='input_ids_expanded_to_rank5',\n\toutput_name='token_embeddings',\n\tW=wte,\n\tb=None,\n\tinput_dim=50257,\n\toutput_channels=768,\n\thas_bias=False,\n)\nbuilder.add_embedding(\n\tname='positional_embeddings',\n\tinput_name='position_ids_expanded_to_rank5',\n\toutput_name='positional_embeddings',\n\tW=wpe,\n\tb=None,\n\tinput_dim=1024,\n\toutput_channels=768,\n\thas_bias=False,\n)\n\n# Input:, Output: (seq, 1, 768, 1, 1)\nbuilder.add_add_broadcastable(\n\tname='embeddings_addition',\n\tinput_names=['token_embeddings', 'positional_embeddings'],\n\toutput_name=f'{0}_previous_block'\n)\n\nfor i in range(steps):\n\tprint(i)\n\tln_weight = model.h[i].ln_1.weight.data.numpy().reshape((1, 1, 768, 1, 1))\n\tln_bias = model.h[i].ln_1.bias.data.numpy().reshape((1, 1, 768, 1, 1))\n\tln_epsilon = model.h[i].ln_1.variance_epsilon\n\n\tbuilder.add_mvn(\n\t\tname=f\"{i}_block_ln_1\",\n\t\tinput_name=f\"{i}_previous_block\",\n\t\t# output_name=f\"{i}_block_ln_1_output\",\n\t\toutput_name=f\"{i}_block_ln_1\",\n\t\tacross_channels=True,\n\t\tnormalize_variance=True,\n\t\tepsilon=ln_epsilon\n\t)\n\n\tbuilder.add_scale(\n\t\tname=f\"{i}_block_ln_1_scaled\",\n\t\tinput_name=f\"{i}_block_ln_1\",\n\t\toutput_name=f\"{i}_block_ln_1_scaled\",\n\t\tW=ln_weight,\n\t\tb=ln_bias,\n\t\thas_bias=True,\n\t\tshape_scale=[768],\n\t\tshape_bias=[768]\n\t)\n\n\tbuilder.add_transpose(\n\t\tname=f\"{i}_block_ln_1_reshape\",\n\t\tinput_name=f\"{i}_block_ln_1_scaled\",\n\t\toutput_name=f\"{i}_block_ln_1_scaled_transposed\",\n\t\taxes=(1, 0, 2, 3, 4)\n\t)\n\n\n\tconv_1D_bias = model.h[i].attn.c_attn.bias.data.numpy().reshape((1, 1, 2304, 1, 1))\n\tconv_1D_weights = model.h[i].attn.c_attn.weight.data.numpy().transpose().reshape((1, 768, 2304, 1, 1))\n\n\tbuilder.add_inner_product(\n\t\tname=f\"{i}_block_attn_conv\",\n\t\tinput_name=f\"{i}_block_ln_1_scaled_transposed\",\n\t\toutput_name=f\"{i}_block_attn_conv\",\n\t\tinput_channels=768,\n\t\toutput_channels=2304,\n\t\tW=conv_1D_weights,\n\t\tb=conv_1D_bias,\n\t\thas_bias=True\n\t)\n\n\tbuilder.add_split(\n\t\tname=f\"{i}_block_attn_qkv_split\",\n\t\tinput_name=f\"{i}_block_attn_conv\",\n\t\toutput_names=[f\"{i}_block_attn_q\", f\"{i}_block_attn_k\", f\"{i}_block_attn_v\"]\n\t)\n\n\tbuilder.add_rank_preserving_reshape(\n\t\tname=f\"{i}_block_attn_q_reshape\",\n\t\tinput_name=f\"{i}_block_attn_q\",\n\t\toutput_name=f\"{i}_block_attn_q_reshape\",\n\t\toutput_shape=(1, 1, sequence_length, 12, 64)\n\t)\n\n\tbuilder.add_transpose(\n\t\tname=f\"{i}_block_attn_q_reshape_permuted\",\n\t\tinput_name=f\"{i}_block_attn_q_reshape\",\n\t\toutput_name=f\"{i}_block_attn_q_reshape_permuted\",\n\t\taxes=(0, 1, 3, 2, 4)\n\t)\n\n\tbuilder.add_rank_preserving_reshape(\n\t\tname=f\"{i}_block_attn_k_reshape\",\n\t\tinput_name=f\"{i}_block_attn_k\",\n\t\toutput_name=f\"{i}_block_attn_k_reshape\",\n\t\toutput_shape=(1, 1, sequence_length, 12, 64)\n\t)\n\n\tbuilder.add_transpose(\n\t\tname=f\"{i}_block_attn_k_reshape_permuted\",\n\t\tinput_name=f\"{i}_block_attn_k_reshape\",\n\t\toutput_name=f\"{i}_block_attn_k_reshape_permuted\",\n\t\taxes=(0, 1, 3, 4, 2)\n\t)\n\n\tbuilder.add_rank_preserving_reshape(\n\t\tname=f\"{i}_block_attn_v_reshape\",\n\t\tinput_name=f\"{i}_block_attn_v\",\n\t\toutput_name=f\"{i}_block_attn_v_reshape\",\n\t\toutput_shape=(1, 1, sequence_length, 12, 64)\n\t)\n\n\tbuilder.add_transpose(\n\t\tname=f\"{i}_block_attn_v_reshape_permuted\",\n\t\tinput_name=f\"{i}_block_attn_v_reshape\",\n\t\toutput_name=f\"{i}_block_attn_v_reshape_permuted\",\n\t\taxes=(0, 1, 3, 2, 4)\n\t)\n\n\tbuilder.add_batched_mat_mul(\n\t\tname=f\"{i}_block_attn_qv_matmul\",\n\t\tinput_names=[f\"{i}_block_attn_q_reshape_permuted\", f\"{i}_block_attn_k_reshape_permuted\"],\n\t\toutput_name=f\"{i}_block_attn_qv_matmul\"\n\t)\n\n\tbuilder.add_scale(\n\t\tname=f\"{i}_block_attn_qv_matmul_scaled\",\n\t\tinput_name=f\"{i}_block_attn_qv_matmul\",\n\t\toutput_name=f\"{i}_block_attn_qv_matmul_scaled\",\n\t\tW=np.array(1/8),\n\t\tb=0,\n\t\thas_bias=False\n\t)\n\n\tbias_0 = model.h[i].attn.bias\n\tnd = ns = sequence_length\n\tb = (model.h[i].attn.bias[:, :, ns-nd:ns, :ns]).unsqueeze(0)\n\n\tbuilder.add_scale(\n\t\tname=f\"{i}_block_attn_bias\",\n\t\tinput_name=f\"{i}_block_attn_qv_matmul_scaled\",\n\t\toutput_name=f\"{i}_block_attn_bias\",\n\t\tW=b,\n\t\tb=None,\n\t\thas_bias=False,\n\t\tshape_scale=[1, sequence_length, sequence_length]\n\t)\n\n\tbias_constant_0 = - 1e4 * (1 - b)\n\n\tbuilder.add_bias(\n\t\tname=f\"{i}_block_attn_afterbias\",\n\t\tinput_name=f\"{i}_block_attn_bias\",\n\t\toutput_name=f\"{i}_block_attn_afterbias\",\n\t\t# output_name=f\"output_logits\",\n\t\tb=bias_constant_0,\n\t\tshape_bias=[sequence_length, sequence_length],\n\t)\n\n\tbuilder.add_squeeze(\n\t\tname=f\"{i}_squeezit\",\n\t\tinput_name=f\"{i}_block_attn_afterbias\",\n\t\toutput_name=f\"{i}_squeezit\",\n\t\taxes=[0, 1]\n\t)\n\n\tbuilder.add_softmax(\n\t\tname=f\"{i}_block_attn_softmax\",\n\t\tinput_name=f\"{i}_squeezit\",\n\t\toutput_name=f\"{i}_block_attn_softmax\",\n\t)\n\n\tbuilder.add_expand_dims(\n\t\tname=f\"{i}_expandit\",\n\t\tinput_name=f\"{i}_block_attn_softmax\",\n\t\toutput_name=f\"{i}_expandit\",\n\t\taxes=[0, 1]\n\t)\n\n\tbuilder.add_batched_mat_mul(\n\t\tname=f\"{i}_block_full_attention\",\n\t\tinput_names=[f\"{i}_expandit\", f\"{i}_block_attn_v_reshape_permuted\"],\n\t\toutput_name=f\"{i}_block_full_attention\"\n\t)\n\n\tbuilder.add_transpose(\n\t\tname=f\"{i}_block_full_attention_merged_t\",\n\t\tinput_name=f\"{i}_block_full_attention\",\n\t\toutput_name=f\"{i}_block_full_attention_merged_t\",\n\t\taxes=[0, 1, 3, 2, 4]\n\t)\n\n\tbuilder.add_rank_preserving_reshape(\n\t\tname=f\"{i}_block_full_attention_merged\",\n\t\tinput_name=f\"{i}_block_full_attention_merged_t\",\n\t\toutput_name=f\"{i}_block_full_attention_merged\",\n\t\toutput_shape=[1, 1, 1, sequence_length, 768]\n\t)\n\n\tbuilder.add_transpose(\n\t\tname=f\"{i}_block_attn_conv_proj_t\",\n\t\tinput_name=f\"{i}_block_full_attention_merged\",\n\t\toutput_name=f\"{i}_block_attn_conv_proj_t\",\n\t\taxes=[0, 3, 4, 1, 2]\n\t)\n\n\tconv_1D_proj_bias = model.h[i].attn.c_proj.bias.data.numpy().reshape((1, 1, 768, 1, 1))\n\tconv_1D_proj_weights = model.h[i].attn.c_proj.weight.data.numpy().transpose().reshape((1, 768, 768, 1, 1))\n\n\t# Input:, Output: (1, 3, 768, 1, 1)\n\tbuilder.add_inner_product(\n\t\tname=f\"{i}_block_attn_conv_proj\",\n\t\tinput_name=f\"{i}_block_attn_conv_proj_t\",\n\t\toutput_name=f\"{i}_block_attn_conv_proj\",\n\t\tinput_channels=768,\n\t\toutput_channels=768,\n\t\tW=conv_1D_proj_weights,\n\t\tb=conv_1D_proj_bias,\n\t\thas_bias=True\n\t)\n\n\t# Input: (seq, 1, 768, 1, 1), Output: (1, seq, 768, 1, 1)\n\tbuilder.add_transpose(\n\t\tname=f\"{i}_previous_block_t\",\n\t\tinput_name=f'{i}_previous_block',\n\t\toutput_name=f\"{i}_previous_block_t\",\n\t\taxes=[1, 0, 2, 3, 4]\n\t)\n\n\t# Input: [(1, seq, 768, 1, 1), (1, seq, 768, 1, 1)], Output: (1, seq, 768, 1, 1)\n\tbuilder.add_add_broadcastable(\n\t\tname=f\"{i}_block_xa_sum\",\n\t\tinput_names=[f\"{i}_previous_block_t\", f\"{i}_block_attn_conv_proj\"],\n\t\toutput_name=f\"{i}_block_xa_sum\",\n\t\t# output_name=f\"output_logits\"\n\t)\n\n\tln_2_weight = model.h[i].ln_2.weight.data.numpy().reshape((1, 1, 768, 1, 1))\n\tln_2_bias = model.h[i].ln_2.bias.data.numpy().reshape((1, 1, 768, 1, 1))\n\tln_2_epsilon = model.h[i].ln_2.variance_epsilon\n\n\t# Input: (1, seq, 768, 1, 1), Output:\n\tbuilder.add_mvn(\n\t\tname=f\"{i}_block_ln_2\",\n\t\tinput_name=f\"{i}_block_xa_sum\",\n\t\toutput_name=f\"{i}_block_ln_2\",\n\t\tacross_channels=True,\n\t\tnormalize_variance=True,\n\t\tepsilon=ln_2_epsilon\n\t)\n\n\tbuilder.add_scale(\n\t\tname=f\"{i}_block_ln_2_scaled\",\n\t\tinput_name=f\"{i}_block_ln_2\",\n\t\t# output_name=f\"output_logits\",\n\t\toutput_name=f\"{i}_block_ln_2_scaled\",\n\t\tW=ln_2_weight,\n\t\tb=ln_2_bias,\n\t\thas_bias=True,\n\t\tshape_scale=[768],\n\t\tshape_bias=[768]\n\t)\n\n\tmlp_conv_1D_fc_bias = model.h[i].mlp.c_fc.bias.data.numpy().reshape((1, 1, 3072, 1, 1))\n\tmlp_conv_1D_fc_weights = model.h[i].mlp.c_fc.weight.data.numpy().transpose().reshape((1, 768, 3072, 1, 1))\n\n\t# Input:, Output: (1, 3, 3072, 1, 1)\n\tbuilder.add_inner_product(\n\t\tname=f\"{i}_block_mlp_conv_fc\",\n\t\tinput_name=f\"{i}_block_ln_2_scaled\",\n\t\toutput_name=f\"{i}_block_mlp_conv_fc\",\n\t\t# output_name=f\"output_logits\",\n\t\tinput_channels=768,\n\t\toutput_channels=3072,\n\t\tW=mlp_conv_1D_fc_weights,\n\t\tb=mlp_conv_1D_fc_bias,\n\t\thas_bias=True\n\t)\n\n\tbuilder.add_gelu(\n\t\tname=f\"{i}_block_mlp_gelu\",\n\t\tinput_name=f\"{i}_block_mlp_conv_fc\",\n\t\toutput_name=f\"{i}_block_mlp_gelu\",\n\t\t# output_name=f\"output_logits\",\n\t\tmode='TANH_APPROXIMATION'\n\t)\n\n\tmlp_conv_1D_proj_bias = model.h[i].mlp.c_proj.bias.data.numpy().reshape((1, 1, 768, 1, 1))\n\tmlp_conv_1D_proj_weights = model.h[i].mlp.c_proj.weight.data.numpy().transpose().reshape((1, 3072, 768, 1, 1))\n\n\t# Input:, Output: (1, 3, 3072, 1, 1)\n\tbuilder.add_inner_product(\n\t\tname=f\"{i}_block_mlp_conv_proj\",\n\t\tinput_name=f\"{i}_block_mlp_gelu\",\n\t\toutput_name=f\"{i}_block_mlp_conv_proj\",\n\t\t# output_name=f\"output_logits\",\n\t\tinput_channels=3072,\n\t\toutput_channels=768,\n\t\tW=mlp_conv_1D_proj_weights,\n\t\tb=mlp_conv_1D_proj_bias,\n\t\thas_bias=True\n\t)\n\n\tbuilder.add_add_broadcastable(\n\t\tname=f\"{i}_block_xm_sum\",\n\t\tinput_names=[f\"{i}_block_xa_sum\", f\"{i}_block_mlp_conv_proj\"],\n\t\t# output_name=f\"output_logits\"\n\t\toutput_name=f\"{i + 1}_previous_block_final\"\n\t)\n\n\tbuilder.add_transpose(\n\t\tname=f\"{i}_block_xm_sum_t\",\n\t\tinput_name=f\"{i + 1}_previous_block_final\",\n\t\toutput_name=f\"{i + 1}_previous_block\",\n\t\taxes=[1, 0, 2, 3, 4]\n\t)\n\n\nln_f_weight = model.ln_f.weight.data.numpy().reshape((1, 1, 768, 1, 1))\nln_f_bias = model.ln_f.bias.data.numpy().reshape((1, 1, 768, 1, 1))\nln_f_epsilon = model.ln_f.variance_epsilon\n\n# Input: (1, seq, 768, 1, 1), Output:\nbuilder.add_mvn(\n\tname=f\"ln_f\",\n\tinput_name=f\"{steps}_previous_block_final\",\n\toutput_name=f\"ln_f\",\n\t# output_name=f\"output_logits\",\n\tacross_channels=True,\n\tnormalize_variance=True,\n\tepsilon=ln_f_epsilon\n)\n\nbuilder.add_scale(\n\tname=f\"ln_f_scaled\",\n\tinput_name=f\"ln_f\",\n\toutput_name=f\"ln_f_scaled\",\n\t# output_name=f\"output_logits\",\n\tW=ln_f_weight,\n\tb=ln_f_bias,\n\thas_bias=True,\n\tshape_scale=[768],\n\tshape_bias=[768]\n)\n\nlm_head_weights = lm_head_model.lm_head.weight.data.numpy().reshape((1, 50257, 768, 1, 1))\n\nbuilder.add_inner_product(\n\tname=\"lm_head\",\n\tinput_name=\"ln_f_scaled\",\n\toutput_name=\"output_logits\",\n\tinput_channels=768,\n\toutput_channels=50257,\n\tW=lm_head_weights,\n\tb=None,\n\thas_bias=False\n)\n\n# compile spec to model\nmlmodel = coremltools.models.MLModel(builder.spec)\n\nsave_spec(builder.spec, f'../Resources/{model_name}-{sequence_length}-{steps}.mlmodel')\n# model = coremltools.models.MLModel('gpt2.mlmodel')\n\ninput_ids = np.zeros(sequence_length)\nposition_ids = np.arange(sequence_length).astype(np.float)\n\ninput_data = {\n\t'input_ids': input_ids,\n\t'position_ids': position_ids,\n}\n\n# predictions = mlmodel.predict(input_data)[\"output_logits\"]\n# equal = np.amax(predictions - mlp_conv_proj.detach().numpy())\n\nprint(predictions)\n\n\n# save_spec(builder.spec, 'gpt2.mlmodel')\n"
] |
[
[
"numpy.arange",
"numpy.array",
"numpy.zeros"
]
] |
sajid-ali-nu/multislice
|
[
"1e36e067ff53809f4cc6286562b221c4bddbcb60"
] |
[
"multislice/prop_utils.py"
] |
[
"import numpy as np\nfrom multislice import prop\nfrom .fft_utils import FFT_2d_Obj\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nimport time\nimport numexpr as ne\nfrom skimage.restoration import unwrap_phase\n\n__all__ = ['modify',\n 'modify_two_materials_case_1',\n 'modify_two_materials_case_2',\n 'decide',\n 'find_edge',\n 'get_focal_spot',\n 'plot_2d_complex',\n 'number_of_steps',\n 'optic_illumination', ]\n\n\n'''\ndecide : decide whether to use TF or IR approach depending on the distance\n\nInputs - step size in z, step size in xy, support length, wavelength\n\nOutputs - propogator\n'''\n\n\ndef decide(step_z, step_xy, L, wavel):\n dist = step_z\n sampling = step_xy\n critical = wavel * dist / L\n if ((L**2) / (wavel * step_z)) > 0.1:\n\n if sampling > critical:\n p = prop.propTF\n print('propogator to be used : Transfer Function')\n else:\n p = prop.prop1FT\n print('propogator to be used : Single Fourier Transform')\n else:\n p = prop.FF\n print('propogator to be used : Fraunhofer')\n return p\n\n\n'''\nmodify : wavefront is modified according to the material present\n\nInputs - wavefront, slice properties (here the zone plate),step size in z , wavelength\n\nOutputs - modified wavefront\n\n(used as part of the multislice loop)\n'''\n\n\ndef modify(wavefront, zp_delta, zp_beta, step_z, wavel):\n dist = step_z\n kz = 2 * np.pi * dist / wavel\n beta_slice = zp_beta\n delta_slice = zp_delta\n '''\n Compute the following with numexpr\n wavefront * np.exp((kz * delta_slice) * 1j) * np.exp(-kz * beta_slice)\n '''\n return ne.evaluate('wavefront*exp(kz*(delta_slice*1j - beta_slice))')\n\n\n'''\nmodify_two_materials_case_1 : wavefront is modified according to the materials present that are horizontally stacked\n (Fig 8, a-(i) in Optics Express Vol. 25, Issue 3, pp. 1831-1846)\n\nInputs - wavefront, propogation distance,wavelength, pattern of first material, delta, beta(first material),\n pattern_2,delta, beta(second material)\n\nOutputs - modified wavefront\n\n(used as part of the multislice loop)\n'''\n\n\ndef modify_two_materials_case_1(wavefront, step_z, wavel, frac_1,\n frac_2, pattern_1, delta_1, beta_1, pattern_2, delta_2, beta_2):\n dist = step_z\n kz_1 = 2 * np.pi * dist * frac_1 / wavel\n kz_2 = 2 * np.pi * dist * frac_2 / wavel\n\n '''\n Collapsing the following into one numexpr statement :\n modulation_1 = pattern_1*np.exp((kz_1 * delta_1)*1j -kz_1*beta_1)\n modulation_2 = pattern_2*np.exp((kz_2 * delta_2)*1j -kz_2*beta_2)\n output = wavefront * ( modulation_1 * modulation_2 )\n '''\n\n return ne.evaluate(\n 'wavefront*(pattern_1*exp((kz_1*delta_1)*1j -kz_1*beta_1)*pattern_2*exp((kz_2*delta_2)*1j -kz_2*beta_2))')\n\n\n'''\nmodify_two_materials_case_2 : wavefront is modified according to the materials present that are vertically stacked\n (Fig 8, a-(ii) in Optics Express Vol. 25, Issue 3, pp. 1831-1846)\n\nInputs - wavefront, propogation distance,wavelength, pattern of first material, delta, beta(first material),\n pattern_2,delta, beta(second material)\n\nOutputs - modified wavefront\n\n(used as part of the multislice loop)\n'''\n\n\ndef modify_two_materials_case_2(\n wavefront, step_z, wavel, pattern_1, delta_1, beta_1, pattern_2, delta_2, beta_2):\n pi = np.pi\n kz = ne.evaluate('2 * pi * step_z /wavel')\n\n '''\n Collapsing the following into one numexpr statement :\n modulation_1 = exp((kz*delta_1)*1j - kz*beta_1)\n modulation_2 = exp((kz*delta_2)*1j - kz*beta_2)\n output = wavefront * ( pattern_1*modulation_1 + pattern_2*modulation_2 )\n '''\n\n return ne.evaluate(\n 'wavefront * ( pattern_1*exp((kz*delta_1)*1j - kz*beta_1)+pattern_2*exp((kz*delta_2)*1j - kz*beta_2) )')\n\n\n'''\nfind_edge : get the distance of the pixel of interest from the edge of the array\nInputs - x - co-ordinate of the pixel (where the wavefront in the focal plane hits it's maximum value),\n grid_size, n - length of the spot we would like to capture\nOutputs - if the desired length 'n' can be safely captured, the output is n, else the output is the number\n of pixels one can capture (the only reason this would happen\n is if the focal spot is too close to the edge of the output wavefront due to tilt\n (remember that intput wavefront gets tilted))\n'''\n\n\ndef find_edge(x, grid_size, n):\n if x < (grid_size / 2):\n if x > n:\n return n\n else:\n return int(np.floor(x / 2) * 2)\n else:\n if (grid_size - x) > n:\n return n\n else:\n return int(np.floor((grid_size - x) / 2) * 2)\n\n\n'''\nget_focal_spot : get the region in the output plane containing the focal spot\nInputs : focal_plane - the wavefront at the focal plane, grid_size, n - half-size of the array to be returned\nOutputs : a numpy array containing the focal spot\n'''\n\n\ndef get_focal_spot(focal_plane_, grid_size, n=250):\n x_, y_ = np.where(np.abs(focal_plane_) == np.max(np.abs(focal_plane_)))\n x_ = x_[0]\n y_ = y_[0]\n\n x1 = find_edge(x_, grid_size, n)\n y1 = find_edge(y_, grid_size, n)\n\n print('max_loc :', x_, y_, x1, y1)\n\n focal_spot_ = np.zeros((2 * n, 2 * n), dtype='complex')\n if (x1 + y1) != 2 * n:\n focal_spot_[n - x1:n + x1, n - y1:n +\n y1] = focal_plane_[x_ - x1:x_ + x1, y_ - y1:y_ + y1]\n else:\n focal_spot_[:, :] = focal_plane_[x_ - n:x_ + n, y_ - n:y_ + n]\n return focal_spot_, x_, y_, np.max(focal_plane_)\n\n\n'''\nplot_2d_complex : function used to plot complex 2d array\n\nInputs - input_array : input array to be plotted, name : name of the variable being plotted, mode : used to specify linear or log plot , coords : used to plot the boundaries(as **kwargs)\n\nOutputs - plots of magnitude and phase!\n'''\n\n\ndef plot_2d_complex(input_array, mode='linear',\n name='input_array', *args, **kwargs):\n fig, (ax1, ax2) = plt.subplots(1, 2)\n if 'coords' in kwargs:\n if mode == 'linear':\n im1 = ax1.imshow((np.abs(input_array)), extent=kwargs['coords'])\n if mode == 'log':\n im1 = ax1.imshow(np.log((np.abs(input_array))),\n extent=kwargs['coords'])\n ax1.title.set_text('magnitude of ' + str(name) +\n ' ( in ' + str(mode) + ' scale)')\n ax1.title.set_y(1.08)\n fig.colorbar(im1, ax=ax1)\n scaling = np.round(np.log10(np.abs(kwargs['coords'][0])))\n scaling = np.int(scaling)\n ax1.set_xlabel('axes in 10^(' + str(scaling) + ')')\n im2 = ax2.imshow(unwrap_phase(np.angle(input_array)),\n extent=kwargs['coords'])\n ax2.title.set_text('phase of ' + str(name))\n ax2.title.set_y(1.08)\n fig.subplots_adjust(right=1.75)\n ax2.set_xlabel('axes in 10^(' + str(scaling) + ')')\n fig.colorbar(im2, ax=ax2)\n else:\n if mode == 'linear':\n im1 = ax1.imshow((np.abs(input_array)))\n if mode == 'log':\n im1 = ax1.imshow(np.log((np.abs(input_array))))\n ax1.title.set_text('magnitude of ' + str(name) +\n ' ( in ' + str(mode) + ' scale)')\n ax1.title.set_y(1.08)\n fig.colorbar(im1, ax=ax1)\n im2 = ax2.imshow(unwrap_phase(np.angle(input_array)))\n ax2.title.set_text('phase of ' + str(name))\n ax2.title.set_y(1.08)\n fig.subplots_adjust(right=1.75)\n fig.colorbar(im2, ax=ax2)\n plt.show()\n if 'print_max' in args:\n print('maximum value of ' + str(name) + ': ', np.max(abs(input_array)))\n print('minimum value of ' + str(name) + ': ', np.min(abs(input_array)))\n print('location of maxima in ' + str(name) + ': ',\n np.where(abs(input_array) == np.max(abs(input_array))))\n\n\n'''\nnumber_of_steps : calculate number of steps required for propogation along direction of beam\n\nInputs - step_xy : sampling size in xy plane, wavel : wavelength, thickness : thickness of object\n\nOutputs - number of steps : number of steps for propogation through the object\n(As per the meteric described in Optics Express Vol. 25, Issue 3, pp. 1831-1846)\n'''\n\n\ndef number_of_steps(step_xy, wavel, thickness):\n eps1 = 0.1\n eps2 = 0.1\n delta_z_suggested = ((eps2 * (step_xy**2)) / (eps1**2 * wavel))\n number_of_steps = int(thickness / delta_z_suggested)\n print('suggested step size :', delta_z_suggested)\n print(\n 'number of steps required for propogation through the zone plate :',\n number_of_steps)\n return number_of_steps\n\n\n'''\noptic_illumination : calculate illumination from zone plate (or any other xray optic)\n\nInputs - wavefront : input wave, pattern : pattern of the optic along one plane,delta,beta : refractive index of the optic, thickness : thickness of zone plate, number_of_steps_zp : number of steps for propogation through zp, d1 : propogation distance before zp, d2 : propogation distance after zp (typically the focal length)\n\nOutputs - wavefront : output wave\n'''\n\n\ndef optic_illumination(wavefront_input,\n pattern, delta, beta,\n thickness, step_xy, wavel,\n number_of_steps, d1, d2, use_fftw='True', **kwargs):\n\n wavefront = np.copy(wavefront_input)\n L = np.shape(wavefront_input)[0] * step_xy\n xray_object = str('xray optical element')\n mode = str('serial')\n if use_fftw == 'True':\n fft_obj = FFT_2d_Obj(np.shape(wavefront))\n else:\n fft_obj = None\n\n if 'xray_object' in kwargs:\n xray_object = kwargs['xray_object']\n if 'mode' in kwargs:\n mode = kwargs['mode']\n\n # pre object\n if d1 != 0:\n print('Free space propogation before ' + str(xray_object) + '...')\n step_z = d1\n p = decide(step_z, step_xy, L, wavel)\n print('Fresnel Number :', ((L**2) / (wavel * step_z)))\n wavefront, L = p(wavefront, step_xy, L, wavel, step_z, fft_obj)\n\n # through object\n step_z = thickness / number_of_steps\n p = decide(step_z, step_xy, L, wavel)\n print('Fresnel Number :', ((L**2) / (wavel * step_z)))\n time.sleep(1)\n if mode == 'parallel':\n for i in range(number_of_steps):\n t0 = time.time()\n wavefront = modify_two_materials_case_2(\n wavefront, step_z, wavel, pattern, delta, beta, np.ones(np.shape(pattern)) - pattern, 0, 0)\n t1 = time.time()\n\n wavefront, L = p(wavefront, step_xy, L, wavel, step_z, fft_obj)\n t2 = time.time()\n print(i, 'modify : ', t1 - t0, 'prop :', t2 - t1)\n else:\n for i in tqdm(range(number_of_steps),\n desc='Propogation through ' + str(xray_object) + '...'):\n wavefront = modify_two_materials_case_2(\n wavefront, step_z, wavel, pattern, delta, beta, np.ones(np.shape(pattern)) - pattern, 0, 0)\n wavefront, L = p(wavefront, step_xy, L, wavel, step_z)\n\n # post object\n if d2 != 0:\n step_z = d2\n print('Free space propogation after ' + str(xray_object) + '...')\n p = decide(step_z, step_xy, L, wavel)\n print('Fresnel Number :', ((L**2) / (wavel * step_z)))\n wavefront, L = p(wavefront, step_xy, L, wavel, step_z)\n\n wavefront_out = np.copy(wavefront)\n del wavefront\n return wavefront_out, L\n"
] |
[
[
"numpy.abs",
"matplotlib.pyplot.subplots",
"numpy.max",
"numpy.copy",
"numpy.int",
"numpy.shape",
"numpy.floor",
"numpy.angle",
"matplotlib.pyplot.show",
"numpy.zeros"
]
] |
dumpmemory/NonDeepNetworks
|
[
"5513bf588f4e64c99583440507232675c2e21e34"
] |
[
"imagenet/timm/models/simplenet_impl/utils.py"
] |
[
"import math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport tqdm\nimport time as tm\nfrom timm.models.layers.drop import drop_path, drop_block_fast_2d\n\ndef round(f):\n return math.ceil(f / 2.) * 2\n\ndef num_param(net):\n return sum(p.numel() for p in net.parameters() if p.requires_grad)\n\ndef trace_net(net, inp, save_pth=\"traced_model.pt\"):\n traced_script_module = torch.jit.trace(\n net, inp, strict=True)\n traced_script_module.save(save_pth)\n\nclass MultiBatchNorm2d(nn.Module):\n def __init__(self, n1, n2, num_branch=None):\n super().__init__()\n self.num_branch = num_branch\n if num_branch is None:\n self.b1 = nn.BatchNorm2d(n1)\n self.b2 = nn.BatchNorm2d(n2)\n else:\n assert n2 is None\n self.b = nn.ModuleList(\n [nn.BatchNorm2d(n1) for _ in range(num_branch)])\n\n def forward(self, x):\n if self.num_branch is None:\n x1, x2 = x\n x1 = self.b1(x1)\n x2 = self.b2(x2)\n out = (x1, x2)\n else:\n out = []\n for _x, _b in zip(x, self.b):\n out.append(_b(_x))\n\n return out\n\n\nclass Concat2d(nn.Module):\n def __init__(self, shuffle=False):\n super().__init__()\n self.shuffle = shuffle\n\n def forward(self, x):\n if self.shuffle:\n b, _, h, w = x[0].shape\n x = [_x.unsqueeze(1) for _x in x]\n out = torch.cat(x, 1)\n out = out.transpose(1, 2)\n out = torch.reshape(out, (b, -1, h, w))\n else:\n out = torch.cat(x, 1)\n return out\n\n\nclass ReLU2d(nn.Module):\n def __init__(self):\n super().__init__()\n self.relu = nn.ReLU()\n\n def forward(self, x):\n x1, x2 = x\n x1 = self.relu(x1)\n x2 = self.relu(x2)\n return (x1, x2)\n\n\n\"\"\"\nsource:\nhttps://github.com/moskomule/senet.pytorch/blob/23839e07525f9f5d39982140fccc8b925fe4dee9/senet/se_module.py#L4-L19\n\n\"\"\"\nclass SELayer(nn.Module):\n def __init__(self, channel, out_channel=None, reduction=16, version=1):\n super(SELayer, self).__init__()\n self.version = version\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n if out_channel is None:\n out_channel = channel\n self.channel = channel\n self.out_channel = out_channel\n if version == 1:\n self.fc = nn.Sequential(\n nn.Linear(channel, out_channel // reduction, bias=False),\n nn.ReLU(inplace=True),\n nn.Linear(out_channel // reduction, out_channel, bias=False),\n nn.Sigmoid()\n )\n elif version == 2:\n reduction = reduction // 2\n self.fc = nn.Sequential(\n nn.AvgPool1d(reduction),\n nn.Linear(channel // reduction, out_channel, bias=False),\n nn.Sigmoid()\n )\n else:\n assert False, version\n\n def forward(self, x, x2=None):\n if x2 is None:\n assert self.out_channel == self.channel\n x2 = x\n b, c, _, _ = x.size()\n b, c2, _, _ = x2.size()\n assert c == self.channel\n assert c2 == self.out_channel\n\n y = self.avg_pool(x).view(b, c)\n if self.version == 2:\n y = y.view(b, 1, c)\n y = self.fc(y).view(b, c2, 1, 1)\n return x2 * y.expand_as(x2)\n\n\nclass SE1(nn.Module):\n # Squeeze-and-excitation block in https://arxiv.org/abs/1709.01507\n # ch_in, ch_out, number, shortcut, groups, expansion\n def __init__(self, c_in, c_out, n=1, shortcut=True, g=1, e=0.5, ver=1):\n super(SE1, self).__init__()\n self.ver = ver\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.cvsig = ConvSig(c_in, c_out, 1, 1, g=g)\n\n def forward(self, x):\n x = self.cvsig(self.avg_pool(x))\n if self.ver == 2:\n x = 2 * x\n return x\n\ndef update_bn(loader, model, total_imgs=1000, device=None):\n r\"\"\"Updates BatchNorm running_mean, running_var buffers in the model.\n It performs one pass over data in `loader` to estimate the activation\n statistics for BatchNorm layers in the model.\n Arguments:\n loader (torch.utils.data.DataLoader): dataset loader to compute the\n activation statistics on. Each data batch should be either a\n tensor, or a list/tuple whose first element is a tensor\n containing data.\n model (torch.nn.Module): model for which we seek to update BatchNorm\n statistics.\n device (torch.device, optional): If set, data will be trasferred to\n :attr:`device` before being passed into :attr:`model`.\n Example:\n >>> loader, model = ...\n >>> torch.optim.swa_utils.update_bn(loader, model)\n .. note::\n The `update_bn` utility assumes that each data batch in :attr:`loader`\n is either a tensor or a list or tuple of tensors; in the latter case it\n is assumed that :meth:`model.forward()` should be called on the first\n element of the list or tuple corresponding to the data batch.\n \"\"\"\n # print(model)\n\n momenta = {}\n for module in model.modules():\n if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):\n module.running_mean = torch.zeros_like(module.running_mean)\n module.running_var = torch.ones_like(module.running_var)\n momenta[module] = module.momentum\n\n if not momenta:\n return\n\n was_training = model.training\n model.train()\n for module in momenta.keys():\n module.momentum = None\n module.num_batches_tracked *= 0\n\n if device != None:\n model.to(device)\n\n num_images_total = 0\n\n for i, data in tqdm.tqdm(enumerate(loader), total = total_imgs):\n if i*loader.batch_size >= total_imgs:\n break\n img = data['img']\n img = img.to(device)\n model(img)\n\n for bn_module in momenta.keys():\n bn_module.momentum = momenta[bn_module]\n model.train(was_training)\n\n print(\"update_bn is completed successfully, total_imgs = \", total_imgs)\n\n\ndef autopad(k, p=None): # kernel, padding\n # Pad to 'same'\n if p is None:\n p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad\n return p\n\nclass Conv(nn.Module):\n # Standard convolution\n def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups\n super(Conv, self).__init__()\n self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)\n self.bn = nn.BatchNorm2d(c2)\n self.act = nn.SiLU() if act else nn.Identity()\n\n def forward(self, x):\n return self.act(self.bn(self.conv(x)))\n\n def fuseforward(self, x):\n return self.act(self.conv(x))\n\nclass ConvSig(nn.Module):\n # Standard convolution\n def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups\n super(ConvSig, self).__init__()\n self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)\n self.act = nn.Sigmoid() if act else nn.Identity()\n\n def forward(self, x):\n return self.act(self.conv(x))\n\n def fuseforward(self, x):\n return self.act(self.conv(x))\n\n\nclass ConvSqu(nn.Module):\n # Standard convolution\n def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups\n super(ConvSqu, self).__init__()\n self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)\n self.act = nn.SiLU() if act else nn.Identity()\n\n def forward(self, x):\n return self.act(self.conv(x))\n\n def fuseforward(self, x):\n return self.act(self.conv(x))\n\n# source: https://github.com/BangguWu/ECANet/blob/master/models/eca_module.py\nclass eca_layer(nn.Module):\n \"\"\"Constructs a ECA module.\n Args:\n channel: Number of channels of the input feature map\n k_size: Adaptive selection of kernel size\n \"\"\"\n def __init__(self, k_size=3):\n super(eca_layer, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.conv = nn.Conv1d(1, 1, kernel_size=k_size,\n padding=(k_size - 1) // 2, bias=False)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n # feature descriptor on the global spatial information\n y = self.avg_pool(x)\n\n # Two different branches of ECA module\n y = self.conv(\n y.squeeze(-1).transpose(-1, -2)\n ).transpose(-1, -2).unsqueeze(-1)\n\n # Multi-scale information fusion\n y = self.sigmoid(y)\n\n return y\n\ndef conv_bn(in_channels, out_channels, kernel_size, stride, padding, groups=1):\n result = nn.Sequential()\n result.add_module('conv', nn.Conv2d(in_channels=in_channels, out_channels=out_channels,\n kernel_size=kernel_size, stride=stride, padding=padding, groups=groups, bias=False))\n result.add_module('bn', nn.BatchNorm2d(num_features=out_channels))\n return result\n\nclass RepVGGBlock_train(nn.Module):\n\n def __init__(self, in_channels, out_channels, kernel_size,\n stride=1, dilation=1, groups=1, drop_path=0.0, activation=nn.ReLU(),\n padding_mode='zeros', deploy=False, avg_pool=False):\n super(RepVGGBlock_train, self).__init__()\n self.avg_pool = avg_pool\n self.deploy = deploy\n self.drop_path = drop_path\n self.groups = groups\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.stride = stride\n\n # 0x0, 1x1, 3x3\n self.largest_kernel_size = kernel_size\n self.kernels = (kernel_size // 2) + 2\n\n self.conv_out_channels = out_channels * self.kernels\n\n #self.kernels = 3\n #self.largest_kernel_size = (self.kernels-2)*2 + 1\n\n #assert kernel_size == 3\n\n padding=dilation*(kernel_size-1)//2\n\n self.nonlinearity = activation\n\n self.conv = nn.Conv2d(in_channels=in_channels, out_channels=self.conv_out_channels, kernel_size=kernel_size, stride=stride,\n padding=padding, dilation=dilation, groups=groups, bias=False, padding_mode=padding_mode)\n\n self.bn = nn.BatchNorm2d(self.conv_out_channels)\n\n #print(f\" self.conv.weight.shape = {self.conv.weight.shape}\")\n\n self.prepare_mult_add()\n self.change_weights()\n\n\n def forward(self, x):\n if self.training:\n self.change_weights()\n\n conv_out = self.conv(x)\n\n if not self.bn is None:\n conv_out = self.bn(conv_out)\n\n out_list = []\n for i in range(self.kernels):\n start = i*self.out_channels\n end = (i+1)*self.out_channels\n drop_path_in = conv_out[:,start:end,:,:]\n if i > 0:\n drop_path_in = drop_path(drop_path_in, self.drop_path, self.training)\n out_list.append(drop_path_in)\n\n out = torch.sum(torch.stack(out_list), dim=0)\n\n \"\"\"\n out = conv_out[:,0:self.out_channels,:,:]\n for i in range(self.kernels):\n start = i*self.out_channels\n end = (i+1)*self.out_channels\n if i > 0:\n drop_path_in = conv_out[:,start:end,:,:]\n drop_path_in = drop_path(drop_path_in, self.drop_path, self.training)\n out.add_(drop_path_in)\n \"\"\"\n\n out = self.nonlinearity(out)\n\n return out\n\n\n def prepare_mult_add(self):\n with torch.no_grad():\n n = self.out_channels\n c = self.in_channels // self.groups\n h = self.largest_kernel_size\n w = self.largest_kernel_size\n # each_conv_shape = [n, c, h, w]\n\n dtype = self.conv.weight.dtype\n device = self.conv.weight.device\n shape = self.conv.weight.shape\n\n mult_list = []\n add_list = []\n for i in range(self.kernels):\n ksize = (i-1)*2 + 1\n pad = (self.largest_kernel_size - ksize) // 2\n if i==0:\n mult = torch.zeros([n,c,h,w], dtype=dtype, requires_grad=False)\n if self.avg_pool:\n pad = self.largest_kernel_size - self.stride\n add = torch.ones([n,c,self.stride,self.stride], dtype=dtype, requires_grad=False) / (self.stride*self.stride)\n add = torch.nn.functional.pad(add, [pad//2, pad - pad//2, pad//2, pad - pad//2])\n else:\n add = torch.ones([n,c,1,1], dtype=dtype, requires_grad=False)\n add = torch.nn.functional.pad(add, [w//2, w//2, h//2, h//2])\n elif i>=1:\n mult = torch.ones([n,c,ksize,ksize], dtype=dtype, requires_grad=False)\n mult = torch.nn.functional.pad(mult, [pad, pad, pad, pad])\n add = torch.zeros([n,c,h,w], dtype=dtype, requires_grad=False)\n\n #print(f\"\\n i = {i}, mult = {mult} \\n end of mult, i = {i}. \\n\")\n #tm.sleep(5)\n #print(f\"\\n i = {i}, add = {add} \\n end of add, i = {i}. \\n\")\n #tm.sleep(5)\n\n mult_list.append(mult)\n add_list.append(add)\n\n self.mult_weights = torch.cat(mult_list, dim=0)\n self.add_weights = torch.cat(add_list, dim=0)\n self.mult_weights.requires_grad=False\n self.add_weights.requires_grad=False\n\n\n def change_weights(self):\n\n #print(f\" self.conv.weight.shape = {self.conv.weight.shape}, mult_weights.shape={mult_weights.shape}, add_weights.shape={add_weights.shape}\")\n\n if self.mult_weights.device != self.conv.weight.device or self.add_weights.device != self.conv.weight.device:\n #print(f\"\\n move self.mult_weights to {self.conv.weight.device}\")\n self.mult_weights = self.mult_weights.to(self.conv.weight.device)\n self.add_weights = self.add_weights.to(self.conv.weight.device)\n #print(f\" moved self.conv.weight.device = {self.conv.weight.device}, self.mult_weights.device={self.mult_weights.device}, self.add_weights.device={self.add_weights.device}\")\n\n\n #print(f\" self.conv.weight.device = {self.conv.weight.device}, self.mult_weights.device={self.mult_weights.device}, self.add_weights.device={self.add_weights.device}\")\n #print(\"change_weights()\")\n\n with torch.no_grad():\n self.conv.weight.data.mul(self.mult_weights).add(self.add_weights)\n #self.conv.weight.data = self.conv.weight.data * self.mult_weights + self.add_weights\n\n\n #self.conv.weight.requires_grad=True\n\n #print(f\"\\n self.conv.weight[0,:,:,:] = \\n {self.conv.weight[0,:,:,:]} \\n end of self.conv.weight. \\n\")\n #tm.sleep(5)\n\n def fuse_conv_bn(self):\n \"\"\"\n # n,c,h,w - conv\n # n - bn (scale, bias, mean, var)\n\n if type(self.bn) is nn.Identity or type(self.bn) is None:\n return\n\n self.conv.weight\n running_mean = self.bn.running_mean\n running_var = self.bn.running_var\n gamma = self.bn.weight\n beta = self.bn.bias\n eps = self.bn.eps\n \"\"\"\n std = (self.bn.running_var + self.bn.eps).sqrt()\n bias = self.bn.bias - self.bn.running_mean * self.bn.weight / std\n\n t = (self.bn.weight / std).reshape(-1, 1, 1, 1)\n weights = self.conv.weight * t\n\n self.bn = nn.Identity()\n self.conv = nn.Conv2d(in_channels = self.conv.in_channels,\n out_channels = self.conv.out_channels,\n kernel_size = self.conv.kernel_size,\n stride=self.conv.stride,\n padding = self.conv.padding,\n dilation = self.conv.dilation,\n groups = self.conv.groups,\n bias = True,\n padding_mode = self.conv.padding_mode)\n\n self.conv.weight = torch.nn.Parameter(weights)\n self.conv.bias = torch.nn.Parameter(bias)\n\n\n\n\nclass RepVGGBlock_train_shared_bn(nn.Module):\n\n def __init__(self, in_channels, out_channels, kernel_size,\n stride=1, dilation=1, groups=1, drop_path=0.0, activation=nn.ReLU(),\n padding_mode='zeros', deploy=False, avg_pool=False):\n super(RepVGGBlock_train_shared_bn, self).__init__()\n self.avg_pool = avg_pool\n self.deploy = deploy\n self.drop_path = drop_path\n self.groups = groups\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.stride = stride\n\n # 0x0, 1x1, 3x3\n self.largest_kernel_size = kernel_size\n self.kernels = (kernel_size // 2) + 2\n\n self.conv_in_channels = in_channels * self.kernels\n\n #self.kernels = 3\n #self.largest_kernel_size = (self.kernels-2)*2 + 1\n\n #assert kernel_size == 3\n\n padding=dilation*(kernel_size-1)//2\n\n self.nonlinearity = activation\n\n self.conv = nn.Conv2d(in_channels=self.conv_in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride,\n padding=padding, dilation=dilation, groups=groups, bias=False, padding_mode=padding_mode)\n\n self.bn = nn.BatchNorm2d(out_channels)\n\n #print(f\" self.conv.weight.shape = {self.conv.weight.shape}\")\n\n self.prepare_mult_add()\n self.change_weights()\n\n\n def forward(self, x):\n if self.training:\n self.change_weights()\n\n input_list = []\n for i in range(self.kernels):\n drop_path_in = x\n if i > 0:\n drop_path_in = drop_path(drop_path_in, self.drop_path, self.training)\n input_list.append(drop_path_in)\n input = torch.cat(input_list, dim=1)\n\n conv_out = self.conv(input)\n\n if not self.bn is None:\n conv_out = self.bn(conv_out)\n\n out = self.nonlinearity(conv_out)\n\n return out\n\n\n def prepare_mult_add(self):\n with torch.no_grad():\n n = self.out_channels\n c = self.in_channels // self.groups\n h = self.largest_kernel_size\n w = self.largest_kernel_size\n # each_conv_shape = [n, c, h, w]\n\n dtype = self.conv.weight.dtype\n device = self.conv.weight.device\n shape = self.conv.weight.shape\n\n mult_list = []\n add_list = []\n for i in range(self.kernels):\n ksize = (i-1)*2 + 1\n pad = (self.largest_kernel_size - ksize) // 2\n if i==0:\n mult = torch.zeros([n,c,h,w], dtype=dtype, requires_grad=False)\n if self.avg_pool:\n pad = self.largest_kernel_size - self.stride\n add = torch.ones([n,c,self.stride,self.stride], dtype=dtype, requires_grad=False) / (self.stride*self.stride)\n add = torch.nn.functional.pad(add, [pad//2, pad - pad//2, pad//2, pad - pad//2])\n else:\n add = torch.ones([n,c,1,1], dtype=dtype, requires_grad=False)\n add = torch.nn.functional.pad(add, [w//2, w//2, h//2, h//2])\n elif i>=1:\n mult = torch.ones([n,c,ksize,ksize], dtype=dtype, requires_grad=False)\n mult = torch.nn.functional.pad(mult, [pad, pad, pad, pad])\n add = torch.zeros([n,c,h,w], dtype=dtype, requires_grad=False)\n\n #print(f\"\\n i = {i}, mult = {mult} \\n end of mult, i = {i}. \\n\")\n #tm.sleep(5)\n #print(f\"\\n i = {i}, add = {add} \\n end of add, i = {i}. \\n\")\n #tm.sleep(5)\n\n mult_list.append(mult)\n add_list.append(add)\n\n self.mult_weights = torch.cat(mult_list, dim=1)\n self.add_weights = torch.cat(add_list, dim=1)\n self.mult_weights.requires_grad=False\n self.add_weights.requires_grad=False\n\n\n def change_weights(self):\n\n #print(f\" self.conv.weight.shape = {self.conv.weight.shape}, mult_weights.shape={mult_weights.shape}, add_weights.shape={add_weights.shape}\")\n\n if self.mult_weights.device != self.conv.weight.device or self.add_weights.device != self.conv.weight.device:\n #print(f\"\\n move self.mult_weights to {self.conv.weight.device}\")\n self.mult_weights = self.mult_weights.to(self.conv.weight.device)\n self.add_weights = self.add_weights.to(self.conv.weight.device)\n #print(f\" moved self.conv.weight.device = {self.conv.weight.device}, self.mult_weights.device={self.mult_weights.device}, self.add_weights.device={self.add_weights.device}\")\n\n\n #print(f\" self.conv.weight.device = {self.conv.weight.device}, self.mult_weights.device={self.mult_weights.device}, self.add_weights.device={self.add_weights.device}\")\n #print(\"change_weights()\")\n\n with torch.no_grad():\n self.conv.weight.data.mul(self.mult_weights).add(self.add_weights)\n #self.conv.weight.data = self.conv.weight.data * self.mult_weights + self.add_weights\n\n\n #self.conv.weight.requires_grad=True\n\n #print(f\"\\n self.conv.weight[0,:,:,:] = \\n {self.conv.weight[0,:,:,:]} \\n end of self.conv.weight. \\n\")\n #tm.sleep(5)\n\n def fuse_conv_bn(self):\n \"\"\"\n # n,c,h,w - conv\n # n - bn (scale, bias, mean, var)\n\n if type(self.bn) is nn.Identity or type(self.bn) is None:\n return\n\n self.conv.weight\n running_mean = self.bn.running_mean\n running_var = self.bn.running_var\n gamma = self.bn.weight\n beta = self.bn.bias\n eps = self.bn.eps\n \"\"\"\n std = (self.bn.running_var + self.bn.eps).sqrt()\n bias = self.bn.bias - self.bn.running_mean * self.bn.weight / std\n\n t = (self.bn.weight / std).reshape(-1, 1, 1, 1)\n weights = self.conv.weight * t\n\n self.bn = nn.Identity()\n self.conv = nn.Conv2d(in_channels = self.conv.in_channels,\n out_channels = self.conv.out_channels,\n kernel_size = self.conv.kernel_size,\n stride=self.conv.stride,\n padding = self.conv.padding,\n dilation = self.conv.dilation,\n groups = self.conv.groups,\n bias = True,\n padding_mode = self.conv.padding_mode)\n\n self.conv.weight = torch.nn.Parameter(weights)\n self.conv.bias = torch.nn.Parameter(bias)\n\n\n\n\ndef fuse_model(m):\n prev_previous_type = nn.Identity()\n prev_previous_name = ''\n previous_type = nn.Identity()\n previous_name = ''\n for name, module in m.named_modules():\n if prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d and type(module) == nn.ReLU:\n print(\"FUSED \", prev_previous_name, previous_name, name)\n torch.quantization.fuse_modules(m, [prev_previous_name, previous_name, name], inplace=True)\n elif prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d:\n print(\"FUSED \", prev_previous_name, previous_name)\n torch.quantization.fuse_modules(m, [prev_previous_name, previous_name], inplace=True)\n elif previous_type == nn.Conv2d and type(module) == nn.ReLU:\n print(\"FUSED \", previous_name, name)\n #torch.quantization.fuse_modules(m, [previous_name, name], inplace=True)\n\n prev_previous_type = previous_type\n prev_previous_name = previous_name\n previous_type = type(module)\n previous_name = name\n\ndef sparsity(model):\n # Return global model sparsity\n a, b = 0., 0.\n for p in model.parameters():\n a += p.numel()\n b += (p == 0).sum()\n return b / a\n\n\ndef prune(model, amount=0.3):\n # Prune model to requested global sparsity\n import torch.nn.utils.prune as prune\n print('Pruning model... ', end='')\n for name, m in model.named_modules():\n if isinstance(m, nn.Conv2d):\n prune.l1_unstructured(m, name='weight', amount=amount) # prune\n prune.remove(m, 'weight') # make permanent\n print(' %.3g global sparsity' % sparsity(model))\n\n\ndef fuse_conv_and_bn(conv, bn):\n # https://tehnokv.com/posts/fusing-batchnorm-and-conv/\n with torch.no_grad():\n # init\n fusedconv = nn.Conv2d(conv.in_channels,\n conv.out_channels,\n kernel_size=conv.kernel_size,\n stride=conv.stride,\n padding=conv.padding,\n bias=True).to(conv.weight.device)\n\n # prepare filters\n w_conv = conv.weight.clone().view(conv.out_channels, -1)\n w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))\n fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size()))\n\n # prepare spatial bias\n b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias\n b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))\n fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)\n\n return fusedconv\n\n\ndef model_info(model, input=torch.zeros(1, 3, 224, 224), verbose=False):\n # Plots a line-by-line description of a PyTorch model\n n_p = sum(x.numel() for x in model.parameters()) # number parameters\n n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients\n if verbose:\n print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma'))\n for i, (name, p) in enumerate(model.named_parameters()):\n name = name.replace('module_list.', '')\n print('%5g %40s %9s %12g %20s %10.3g %10.3g' %\n (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))\n\n try: # FLOPS\n from thop import profile\n print(\"try thop\")\n flops = profile((model), inputs=(input,), verbose=False)[0] / 1E9 * 2\n fs = ', %.1f GFLOPS' % (flops) # 224x224 FLOPS\n except:\n fs = ''\n\n print('Model Summary: %g layers, %g parameters, %g gradients%s' % (len(list(model.parameters())), n_p, n_g, fs))\n\n\ndef channel_shuffle(x, groups):\n batchsize, num_channels, height, width = x.size()\n channels_per_group = num_channels // groups\n\n # reshape\n x = x.view(batchsize, groups,\n channels_per_group, height, width)\n\n x = torch.transpose(x, 1, 2).contiguous()\n\n # flatten\n x = x.view(batchsize, -1, height, width)\n\n return x\n"
] |
[
[
"torch.transpose",
"torch.zeros",
"torch.cat",
"torch.no_grad",
"torch.mm",
"torch.jit.trace",
"torch.ones",
"torch.sqrt",
"torch.quantization.fuse_modules",
"torch.reshape",
"torch.nn.Sigmoid",
"torch.ones_like",
"torch.nn.functional.pad",
"torch.nn.Sequential",
"torch.nn.Parameter",
"torch.nn.Conv2d",
"torch.zeros_like",
"torch.nn.Linear",
"torch.nn.utils.prune.l1_unstructured",
"torch.nn.Conv1d",
"torch.nn.BatchNorm2d",
"torch.stack",
"torch.nn.SiLU",
"torch.nn.utils.prune.remove",
"torch.nn.Identity",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.ReLU",
"torch.nn.AvgPool1d"
]
] |
PrediktorAS/quarry
|
[
"80f14781506dcb2e85dbda8057ad184f93140ce5"
] |
[
"tests/test_translate_from_rdslike_vanilla_with_owl.py"
] |
[
"# Copyright 2021 Prediktor AS\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\nimport os\r\nimport pandas as pd\r\nimport pytest\r\nfrom owlrl import DeductiveClosure, OWLRL_Semantics\r\nfrom rdflib import Graph\r\n\r\nimport swt_translator as swtt\r\n\r\nPATH_HERE = os.path.dirname(__file__)\r\n\r\n\r\[email protected](scope='session')\r\ndef create_ttl():\r\n namespaces = ['http://opcfoundation.org/UA/', 'http://prediktor.com/sparql_testcase',\r\n 'http://prediktor.com/RDS-like-typelib/',\r\n 'http://opcfoundation.org/UA/IEC61850-7-3', 'http://opcfoundation.org/UA/IEC61850-7-4']\r\n\r\n output_file_ttl = PATH_HERE + '/expected/translate_from_rdslike_vanilla_with_owl/kb.ttl'\r\n output_file_owl = PATH_HERE + '/expected/translate_from_rdslike_vanilla_with_owl/ont.owl'\r\n swtt.translate(xml_dir=PATH_HERE + '/input_data/translate_from_rdslike', namespaces=namespaces,\r\n output_ttl_file=output_file_ttl, output_owl_file=output_file_owl)\r\n return output_file_ttl, output_file_owl\r\n\r\n\r\[email protected]\r\ndef set_up_rdflib(create_ttl):\r\n output_file_ttl, output_file_owl = create_ttl\r\n g = Graph()\r\n g.parse(source=output_file_ttl, format='turtle')\r\n g.parse(source=output_file_owl, format='xml')\r\n DeductiveClosure(OWLRL_Semantics).expand(g)\r\n return g\r\n\r\n\r\ndef test_basic_query(set_up_rdflib):\r\n g = set_up_rdflib\r\n q = \"\"\"\r\n PREFIX iec61850ln: <http://opcfoundation.org/UA/IEC61850-7-4#>\r\n PREFIX rdslike: <http://prediktor.com/RDS-like-typelib/#>\r\n PREFIX opcua: <http://opcfoundation.org/UA/#>\r\n SELECT ?node ?name WHERE {\r\n ?node rdslike:hasLogicalNode ?svbr.\r\n ?svbr a iec61850ln:SVBR.\r\n ?node opcua:displayName ?name.\r\n }\r\n \"\"\"\r\n res = g.query(q)\r\n results = [tuple(map(str, r)) for r in res]\r\n df_actual = pd.DataFrame(results, columns=list(map(str, res.vars)))\r\n\r\n # df_actual.to_csv(PATH_HERE + '/expected/translate_from_rdslike_vanilla_with_owl/basic_query.csv', index=False)\r\n\r\n df_expected = pd.read_csv(PATH_HERE + '/expected/translate_from_rdslike_vanilla_with_owl/basic_query.csv')\r\n\r\n df_actual = df_actual.sort_values(by=df_actual.columns.values.tolist()).reset_index(drop=True)\r\n df_expected = df_expected.sort_values(by=df_actual.columns.values.tolist()).reset_index(drop=True)\r\n pd.testing.assert_frame_equal(df_actual, df_expected)\r\n\r\n\r\ndef test_subclass_within_rds(set_up_rdflib):\r\n g = set_up_rdflib\r\n q = \"\"\"\r\n PREFIX iec61850ln: <http://opcfoundation.org/UA/IEC61850-7-4#>\r\n PREFIX rdslike: <http://prediktor.com/RDS-like-typelib/#>\r\n PREFIX opcua: <http://opcfoundation.org/UA/#>\r\n SELECT ?node WHERE {\r\n ?node a rdslike:TopSystemType.}\r\n \"\"\"\r\n res = g.query(q)\r\n results = [tuple(map(str, r)) for r in res]\r\n df_actual = pd.DataFrame(results, columns=list(map(str, res.vars)))\r\n\r\n # df_actual.to_csv(PATH_HERE + '/expected/translate_from_rdslike_vanilla_with_owl/subclass_within_rds.csv', index=False)\r\n\r\n df_expected = pd.read_csv(PATH_HERE + '/expected/translate_from_rdslike_vanilla_with_owl/subclass_within_rds.csv')\r\n\r\n df_actual = df_actual.sort_values(by=df_actual.columns.values.tolist()).reset_index(drop=True)\r\n df_expected = df_expected.sort_values(by=df_actual.columns.values.tolist()).reset_index(drop=True)\r\n pd.testing.assert_frame_equal(df_actual, df_expected)\r\n\r\n\r\ndef test_subproperty_from_opcua(set_up_rdflib):\r\n g = set_up_rdflib\r\n q = \"\"\"\r\n PREFIX iec61850ln: <http://opcfoundation.org/UA/IEC61850-7-4#>\r\n PREFIX rdslike: <http://prediktor.com/RDS-like-typelib/#>\r\n PREFIX opcua: <http://opcfoundation.org/UA/#>\r\n SELECT ?nodea ?nodeb WHERE {\r\n ?nodea opcua:references ?nodeb.}\r\n \"\"\"\r\n res = g.query(q)\r\n results = [tuple(map(str, r)) for r in res]\r\n\r\n df_actual = pd.DataFrame(results, columns=list(map(str, res.vars)))\r\n\r\n # df_actual.to_csv(PATH_HERE + '/expected/translate_from_rdslike_vanilla_with_owl/subproperty_from_opcua.csv', index=False)\r\n\r\n df_expected = pd.read_csv(PATH_HERE + '/expected/translate_from_rdslike_vanilla_with_owl/subproperty_from_opcua.csv')\r\n\r\n df_actual = df_actual.sort_values(by=df_actual.columns.values.tolist()).reset_index(drop=True)\r\n df_expected = df_expected.sort_values(by=df_actual.columns.values.tolist()).reset_index(drop=True)\r\n pd.testing.assert_frame_equal(df_actual, df_expected)\r\n\r\n\r\ndef test_attributes(set_up_rdflib):\r\n g = set_up_rdflib\r\n q = \"\"\"\r\n PREFIX opcua: <http://opcfoundation.org/UA/#>\r\n SELECT ?node ?browseName ?browseNameNamespace ?displayName ?description ?nodeId ?nodeClass WHERE {\r\n ?node opcua:browseName ?browseName.\r\n ?node opcua:browseNameNamespace ?browseNameNamespace.\r\n ?node opcua:displayName ?displayName.\r\n ?node opcua:description ?description.\r\n ?node opcua:nodeId ?nodeId.\r\n ?node opcua:nodeClass ?nodeClass.\r\n }\r\n \"\"\"\r\n res = g.query(q)\r\n results = [tuple(map(str, r)) for r in res]\r\n df_actual = pd.DataFrame(results, columns=list(map(str, res.vars)))\r\n\r\n # df_actual.to_csv(PATH_HERE + '/expected/translate_from_rdslike_vanilla_with_owl/attributes.csv', index=False)\r\n\r\n df_expected = pd.read_csv(PATH_HERE + '/expected/translate_from_rdslike_vanilla_with_owl/attributes.csv')\r\n\r\n df_actual = df_actual.sort_values(by=df_actual.columns.values.tolist()).reset_index(drop=True)\r\n df_expected = df_expected.sort_values(by=df_actual.columns.values.tolist()).reset_index(drop=True).fillna('')\r\n pd.testing.assert_frame_equal(df_actual, df_expected)\r\n\r\n\r\ndef test_functional_aspect_reference(set_up_rdflib):\r\n g = set_up_rdflib\r\n q = \"\"\"\r\n PREFIX iec61850ln: <http://opcfoundation.org/UA/IEC61850-7-4#>\r\n PREFIX rdslike: <http://prediktor.com/RDS-like-typelib/#>\r\n PREFIX opcua: <http://opcfoundation.org/UA/#>\r\n SELECT ?nodea ?nodeb WHERE {\r\n ?nodea rdslike:functionalAspect ?nodeb.}\r\n \"\"\"\r\n res = g.query(q)\r\n results = [tuple(map(str, r)) for r in res]\r\n df_actual = pd.DataFrame(results, columns=list(map(str, res.vars)))\r\n\r\n # df_actual.to_csv(PATH_HERE + '/expected/translate_from_rdslike_vanilla_with_owl/functional_aspect_reference.csv', index=False)\r\n\r\n df_expected = pd.read_csv(\r\n PATH_HERE + '/expected/translate_from_rdslike_vanilla_with_owl/functional_aspect_reference.csv')\r\n\r\n df_actual = df_actual.sort_values(by=df_actual.columns.values.tolist()).reset_index(drop=True)\r\n df_expected = df_expected.sort_values(by=df_actual.columns.values.tolist()).reset_index(drop=True).fillna('')\r\n pd.testing.assert_frame_equal(df_actual, df_expected)\r\n"
] |
[
[
"pandas.read_csv",
"pandas.testing.assert_frame_equal"
]
] |
Vishal-V/Tiny-ImageNet-Challenge
|
[
"14cbfc0785ee3026871ac39c3e9a677168fb8b1b"
] |
[
"spp.py"
] |
[
"from tensorflow.keras import Layer\nimport tensorflow.keras as K\n\n\nclass SpatialPyramidPooling(Layer):\n \"\"\"Spatial pyramid pooling layer for 2D inputs.\n See Spatial Pyramid Pooling in Deep Convolutional Networks for Visual Recognition,\n K. He, X. Zhang, S. Ren, J. Sun\n # Arguments\n pool_list: list of int\n List of pooling regions to use. The length of the list is the number of pooling regions,\n each int in the list is the number of regions in that pool. For example [1,2,4] would be 3\n regions with 1, 2x2 and 4x4 max pools, so 21 outputs per feature map\n # Input shape\n 4D tensor with shape:\n `(samples, channels, rows, cols)` if dim_ordering='th'\n or 4D tensor with shape:\n `(samples, rows, cols, channels)` if dim_ordering='tf'.\n # Output shape\n 2D tensor with shape:\n `(samples, channels * sum([i * i for i in pool_list])`\n \"\"\"\n\n def __init__(self, pool_list, **kwargs):\n\n self.dim_ordering = K.image_dim_ordering()\n assert self.dim_ordering in {'tf', 'th'}, 'dim_ordering must be in {tf, th}'\n\n self.pool_list = pool_list\n\n self.num_outputs_per_channel = sum([i * i for i in pool_list])\n\n super(SpatialPyramidPooling, self).__init__(**kwargs)\n\n def build(self, input_shape):\n if self.dim_ordering == 'th':\n self.nb_channels = input_shape[1]\n elif self.dim_ordering == 'tf':\n self.nb_channels = input_shape[3]\n\n def compute_output_shape(self, input_shape):\n return (input_shape[0], self.nb_channels * self.num_outputs_per_channel)\n\n def get_config(self):\n config = {'pool_list': self.pool_list}\n base_config = super(SpatialPyramidPooling, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n def call(self, x, mask=None):\n\n input_shape = K.shape(x)\n\n if self.dim_ordering == 'th':\n num_rows = input_shape[2]\n num_cols = input_shape[3]\n elif self.dim_ordering == 'tf':\n num_rows = input_shape[1]\n num_cols = input_shape[2]\n\n row_length = [K.cast(num_rows, 'float32') / i for i in self.pool_list]\n col_length = [K.cast(num_cols, 'float32') / i for i in self.pool_list]\n\n outputs = []\n\n if self.dim_ordering == 'th':\n for pool_num, num_pool_regions in enumerate(self.pool_list):\n for jy in range(num_pool_regions):\n for ix in range(num_pool_regions):\n x1 = ix * col_length[pool_num]\n x2 = ix * col_length[pool_num] + col_length[pool_num]\n y1 = jy * row_length[pool_num]\n y2 = jy * row_length[pool_num] + row_length[pool_num]\n\n x1 = K.cast(K.round(x1), 'int32')\n x2 = K.cast(K.round(x2), 'int32')\n y1 = K.cast(K.round(y1), 'int32')\n y2 = K.cast(K.round(y2), 'int32')\n new_shape = [input_shape[0], input_shape[1],\n y2 - y1, x2 - x1]\n x_crop = x[:, :, y1:y2, x1:x2]\n xm = K.reshape(x_crop, new_shape)\n pooled_val = K.max(xm, axis=(2, 3))\n outputs.append(pooled_val)\n\n elif self.dim_ordering == 'tf':\n for pool_num, num_pool_regions in enumerate(self.pool_list):\n for jy in range(num_pool_regions):\n for ix in range(num_pool_regions):\n x1 = ix * col_length[pool_num]\n x2 = ix * col_length[pool_num] + col_length[pool_num]\n y1 = jy * row_length[pool_num]\n y2 = jy * row_length[pool_num] + row_length[pool_num]\n\n x1 = K.cast(K.round(x1), 'int32')\n x2 = K.cast(K.round(x2), 'int32')\n y1 = K.cast(K.round(y1), 'int32')\n y2 = K.cast(K.round(y2), 'int32')\n\n new_shape = [input_shape[0], y2 - y1,\n x2 - x1, input_shape[3]]\n\n x_crop = x[:, y1:y2, x1:x2, :]\n xm = K.reshape(x_crop, new_shape)\n pooled_val = K.max(xm, axis=(1, 2))\n outputs.append(pooled_val)\n\n if self.dim_ordering == 'th':\n outputs = K.concatenate(outputs)\n elif self.dim_ordering == 'tf':\n outputs = K.concatenate(outputs)\n\n return outputs"
] |
[
[
"tensorflow.keras.image_dim_ordering",
"tensorflow.keras.max",
"tensorflow.keras.cast",
"tensorflow.keras.shape",
"tensorflow.keras.concatenate",
"tensorflow.keras.reshape",
"tensorflow.keras.round"
]
] |
xiaohan2012/lst
|
[
"793944d1dd8235adbe2f651270ab12e46ff8f6f7"
] |
[
"tree_util.py"
] |
[
"import matplotlib\n# Force matplotlib to not use any Xwindows backend.\nmatplotlib.use('Agg')\n\nimport networkx as nx\nfrom subprocess import check_output\n\nfrom dag_util import get_roots\nfrom test_util import make_path\n\n\ndef to_bracket_notation(tree):\n def aux(node):\n nbrs = sorted(tree.neighbors(node))\n if len(nbrs) == 0:\n return '{%s}' % node\n else:\n return '{%s%s}' % (\n node,\n ''.join([aux(n) for n in nbrs])\n )\n if tree.number_of_nodes() == 0:\n return '{}'\n else:\n assert nx.is_arborescence(tree), tree.nodes()\n return aux(get_roots(tree)[0])\n \n\nJAR_PATH = make_path('external/APTED-0.1.1.jar')\n\n\ndef salzburg_ted(tree1, tree2):\n \"\"\"\n tree edit distance\n\n From [Source](tree-edit-distance.dbresearch.uni-salzburg.at/#download)\n \"\"\"\n # print('##### 1 ######')\n # print(to_bracket_notation(tree1))\n # print('##### 2 ######')\n # print(to_bracket_notation(tree2))\n output = check_output('java -jar {} --trees {} {}'.format(\n JAR_PATH,\n to_bracket_notation(tree1),\n to_bracket_notation(tree2)\n ).split())\n \n try:\n return float(output)\n except ValueError:\n print(output)\n raise\n\n\ndef tree_similarity_ratio(ted, t1, t2):\n \"\"\"\n Return the similarity ratio from 0 to 1 between two trees given their edit distance\n \n `ratio` idea from [DiffLib](https://fossies.org/dox/Python-3.5.1/difflib_8py_source.html)\n \"\"\"\n # print(ted)\n # import networkx as nx\n # empty_tree = nx.DiGraph()\n # print('#nodes',\n # t1.number_of_nodes(),\n # t2.number_of_nodes())\n # print('ted against empty_tree',\n # salzburg_ted(t1, empty_tree),\n # salzburg_ted(t2, empty_tree))\n\n return 1 - 2 * ted/(t1.number_of_nodes() + t2.number_of_nodes())\n \n\ndef tree_density(tree, X, edge_weight='c'):\n cost = sum(tree[s][t][edge_weight]\n for s, t in tree.edges_iter())\n try:\n return float(cost) / len(set(tree.nodes()).intersection(X))\n except ZeroDivisionError:\n return float('inf')\n\n\ndef draw_pred_tree_against_true_tree(pred_tree, true_tree, meta_graph,\n draw_which='together',\n output_path_suffix=''):\n \"\"\"\n\n Draw predicted event against the true event\n while using the meta graph as the background\n\n doesn't draw the entire meta_graph, just nx.compose(pred_tree, true_tree)\n \"\"\"\n # some checking\n for n in true_tree.nodes_iter():\n assert meta_graph.has_node(n), n\n for s, t in true_tree.edges_iter():\n assert meta_graph.has_edge(s, t), (s, t,\n (meta_graph.node[s]['sender_id'], meta_graph.node[s]['recipient_ids']),\n (meta_graph.node[t]['sender_id'], meta_graph.node[t]['recipient_ids']),\n meta_graph.node[s]['timestamp'],\n meta_graph.node[t]['timestamp'],\n meta_graph.node[t]['timestamp'] - meta_graph.node[s]['timestamp'])\n for n in pred_tree.nodes_iter():\n assert meta_graph.has_node(n), n\n for s, t in pred_tree.edges_iter():\n assert meta_graph.has_edge(s, t), (s, t)\n \n node_color_types = {'tp': 'green',\n 'fn': 'blue',\n 'fp': 'red',\n 'tn': 'gray'}\n edge_color_types = {'tp': 'green',\n 'fn': 'blue',\n 'fp': 'red',\n 'tn': 'gray'}\n\n def get_style_general(n, true_tree_bool_func, pred_tree_bool_func,\n style_map):\n if isinstance(n, list) or isinstance(n, tuple):\n true_has, pred_has = (true_tree_bool_func(*n),\n pred_tree_bool_func(*n))\n else:\n true_has, pred_has = (true_tree_bool_func(n),\n pred_tree_bool_func(n))\n if true_has and pred_has:\n return style_map['tp']\n elif true_has and not pred_has:\n return style_map['fn']\n elif not true_has and pred_has:\n return style_map['fp']\n else:\n return style_map['tn']\n \n root = get_roots(true_tree)[0]\n get_node_color = (lambda n: 'black'\n if n == root\n else\n get_style_general(\n n,\n true_tree.has_node,\n pred_tree.has_node,\n node_color_types)\n )\n get_edge_color = lambda n: get_style_general(n,\n true_tree.has_edge,\n pred_tree.has_edge,\n edge_color_types)\n\n if draw_which == \"together\":\n g = nx.compose(true_tree, pred_tree)\n output_path = 'tmp/tree_inspection/true_event_vs_pred_event{}.png'.format(output_path_suffix)\n else:\n g = true_tree\n output_path = 'tmp/tree_inspection/true_event{}.png'.format(output_path_suffix)\n\n pos = nx.graphviz_layout(g, prog='dot')\n\n nx.draw(g, pos,\n node_color=map(get_node_color, g.nodes_iter()),\n edge_color=map(get_edge_color, g.edges_iter()),\n node_size=200,\n alpha=0.5,\n arrows=False\n )\n\n if False:\n edge_label_func = lambda s, t: '{0:.2f}({1:.2f}, {2:.2f})'.format(\n meta_graph[s][t]['c'],\n meta_graph[s][t]['orig_c'],\n meta_graph[s][t]['recency']\n )\n else:\n edge_label_func = lambda s, t: '{0:.2f}'.format(meta_graph[s][t]['c'])\n\n if True:\n nx.draw_networkx_edge_labels(\n g, pos,\n edge_labels={(s, t): edge_label_func(s, t)\n for s, t in g.edges_iter()},\n alpha=0.5\n )\n\n if True:\n nx.draw_networkx_labels(\n g, pos,\n edge_labels={i: str(i) for i in g.nodes()},\n alpha=0.5\n )\n \n plt.savefig(output_path)\nif __name__ == '__main__':\n import numpy as np\n np.set_printoptions(precision=2, suppress=True)\n import matplotlib.pyplot as plt\n import cPickle as pkl\n\n plt.figure(figsize=(8, 8))\n # pred_path, mg_path = pkl.load(open('.paths.pkl'))\n # true_path = 'data/synthetic_single_tree/events--n_noisy_interactions_fraction=0.0.pkl'\n \n true_path = 'data/synthetic_single_tree/interactions--event_size=20--n_noisy_interactions_fraction=1.0-1.json'\n paths = pkl.load(open('tmp/synthetic_single_tree/paths/fraction=1.0--greedy--U=3.2559987036--dijkstra=False--timespan=44.0----distance_weights={\\\"topics\\\":1.0}--preprune_secs=44.0--self_talking_penalty=0.0----cand_tree_percent=0.1--root_sampling=random-1.pkl'))\n\n pred_tree = pkl.load(open(paths['result']))[0]\n true_tree = pkl.load(open(paths['true_events']))[0]\n \n meta_graph = nx.read_gpickle(paths['meta_graph'])\n\n # print('mg.c:', [meta_graph[s][t]['c'] for s, t in true_tree.edges_iter()])\n # print('t.c:', [true_tree[s][t]['c'] for s, t in true_tree.edges_iter()])\n # print 'true_tree.cost', sum(meta_graph[s][t]['c'] for s, t in true_tree.edges_iter())\n # print 'pred_tree.cost', sum(meta_graph[s][t]['c'] for s, t in pred_tree.edges_iter())\n for s, t in true_tree.edges_iter():\n print(s, t, meta_graph[s][t])\n\n\n output_path_suffix = ''\n\n if True:\n draw_which = 'together'\n else:\n draw_which = 'true_tree'\n\n draw_pred_tree_against_true_tree(pred_tree, true_tree, meta_graph,\n draw_which=draw_which,\n output_path_suffix=output_path_suffix)\n\n"
] |
[
[
"matplotlib.use",
"numpy.set_printoptions",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.figure"
]
] |
luizperes/training-grammar-guru
|
[
"62bd0112a41c2833ae1b14c4b39d50c2c0778c25"
] |
[
"neural-network/train_and_evaluate.py"
] |
[
"# Based on https://csil-git1.cs.surrey.sfu.ca/lperesde/nlpclass-1777-pixel/blob/master/evaluator/tmosharr/deep_learning.py\nimport numpy as np\nimport pandas as pd\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom keras.utils import np_utils\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import KFold\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.pipeline import Pipeline\n\n\ndef create_model():\n model = Sequential()\n model.add(Dense(16, input_dim=31, activation='relu')) # Hidden layer.\n model.add(Dense(3, activation='softmax' )) # Output layer.\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n return model\n\n# Seed for RNG. Doesn't seem to be used anywhere.\nseed = 7\nnp.random.seed(seed)\n\n# print('Extracting data from files..')\ndftrain = pd.read_csv('feat_train.csv')\ndftest = pd.read_csv('feat_test.csv')\ndataset_train = dftrain.values\ndataset_test = dftest.values\n\n# print('Preparing data..')\n# Split into input and target data sets for training.\nX_train = dataset_train[:, 0:31].astype(float)\nY_train = dataset_train[:, 31].astype(int)\n# Prepare test data.\nX_test = dataset_test[:, :].astype(float)\n\n# Map training targets to labels.\ncat_y= []\nfor y in Y_train:\n if y==1:\n cat_y.append('better')\n elif y==0:\n cat_y.append('same')\n else:\n cat_y.append('worse')\n\n# Transform labels into a normalized encoding.\nencoder = LabelEncoder()\nencoder.fit(cat_y)\nencoded_Y = encoder.transform(cat_y)\ndummy_y = np_utils.to_categorical(encoded_Y)\n\n# Create and train model.\n# print('Creating base model..')\nmodel = create_model()\n\n# print('Training model..')\nmodel.fit(X_train, dummy_y, epochs=50, batch_size= 100, verbose=False)\n\n# Evaluate the model.\n# print('Evaluating model..')\npredictions = model.predict(X_test)\nfor prediction in predictions:\n # Initialize best index and score.\n best_index = -1\n best_score = -1\n\n # Look for the actual best index and score in the current prediction.\n for index in range(len(prediction)):\n if prediction[index] > best_score:\n best_score = prediction[index]\n best_index = index\n\n # Print the best index.\n if best_index == 0:\n print(1)\n elif best_index == 1:\n print(0)\n else:\n print(-1)\n\n# Record model weights in .h5 file.\n# https://stackoverflow.com/questions/47266383/save-and-load-weights-in-keras\n# print('Saving model..')\nmodel.save_weights('nn_weights.h5')\n# print('Done! Bye!')\n"
] |
[
[
"sklearn.preprocessing.LabelEncoder",
"pandas.read_csv",
"numpy.random.seed"
]
] |
srivama/Python
|
[
"809d4c077c179feb077f09a3cd2501f9724366a2"
] |
[
"other/game_of_life/game_o_life.py"
] |
[
"'''Conway's Game Of Life, Author Anurag Kumar(mailto:[email protected]) \n\nRequirements:\n - numpy\n - random\n - time\n - matplotlib\n\nPython:\n - 3.5\n\nUsage:\n - $python3 game_o_life <canvas_size:int>\n\nGame-Of-Life Rules:\n \n 1.\n Any live cell with fewer than two live neighbours\n dies, as if caused by under-population.\n 2.\n Any live cell with two or three live neighbours lives\n on to the next generation.\n 3.\n Any live cell with more than three live neighbours\n dies, as if by over-population.\n 4.\n Any dead cell with exactly three live neighbours be-\n comes a live cell, as if by reproduction.\n '''\nimport numpy as np\nimport random\nimport time\nimport sys\nfrom matplotlib import pyplot as plt\nimport matplotlib.animation as animation\nfrom matplotlib.colors import ListedColormap\n\nusage_doc = 'Usage of script: script_nama <size_of_canvas:int>'\n\nchoice = [0]*100 + [1]*10\nrandom.shuffle(choice)\n\n\ndef create_canvas(size):\n canvas = [[False for i in range(size)] for j in range(size)]\n return canvas\n\n\ndef seed(canvas):\n for i, row in enumerate(canvas):\n for j, _ in enumerate(row):\n canvas[i][j] = bool(random.getrandbits(1))\n\n\ndef run(canvas):\n ''' This function runs the rules of game through all points, and changes their status accordingly.(in the same canvas)\n @Args:\n --\n canvas : canvas of population to run the rules on.\n\n @returns:\n --\n None\n '''\n canvas = np.array(canvas)\n next_gen_canvas = np.array(create_canvas(canvas.shape[0]))\n for r, row in enumerate(canvas):\n for c, pt in enumerate(row):\n # print(r-1,r+2,c-1,c+2)\n next_gen_canvas[r][c] = __judge_point(pt, canvas[r-1:r+2, c-1:c+2])\n\n canvas = next_gen_canvas\n del next_gen_canvas # cleaning memory as we move on.\n return canvas.tolist()\n\n\ndef __judge_point(pt, neighbours):\n dead = 0\n alive = 0\n # finding dead or alive neighbours count.\n for i in neighbours:\n for status in i:\n if status:\n alive += 1\n else:\n dead += 1\n\n # handling duplicate entry for focus pt.\n if pt:\n alive -= 1\n else:\n dead -= 1\n\n # running the rules of game here.\n state = pt\n if pt:\n if alive < 2:\n state = False\n elif alive == 2 or alive == 3:\n state = True\n elif alive > 3:\n state = False\n else:\n if alive == 3:\n state = True\n\n return state\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 2:\n raise Exception(usage_doc)\n\n canvas_size = int(sys.argv[1])\n # main working structure of this module.\n c = create_canvas(canvas_size)\n seed(c)\n fig, ax = plt.subplots()\n fig.show()\n cmap = ListedColormap(['w', 'k'])\n try:\n while True:\n c = run(c)\n ax.matshow(c, cmap=cmap)\n fig.canvas.draw()\n ax.cla()\n except KeyboardInterrupt:\n # do nothing.\n pass\n"
] |
[
[
"numpy.array",
"matplotlib.colors.ListedColormap",
"matplotlib.pyplot.subplots"
]
] |
brucew2099/Machine-Learning-for-Time-Series-Forecasting
|
[
"53d6b9cf8dcc3fb9c4ec22675143e9815d72a72e"
] |
[
"Notebooks/common/utils.py"
] |
[
"import os\nimport re\nimport sys\nimport zipfile\nfrom collections import UserDict\n\nimport numpy as np\nimport pandas as pd\nimport requests\n\n\n# This function unzips the GEFCom2014 data zip file and extracts the 'extended'\n# load forecasting competition data. Data is saved in energy.csv\ndef extract_data(data_dir):\n GEFCom_dir = os.path.join(data_dir, \"GEFCom2014\", \"GEFCom2014 Data\")\n\n GEFCom_zipfile = os.path.join(data_dir, \"GEFCom2014.zip\")\n if not os.path.exists(GEFCom_zipfile):\n sys.exit(\n \"Download GEFCom2014.zip from https://mlftsfwp.blob.core.windows.net/mlftsfwp/GEFCom2014.zip and save it to the '{}' directory.\".format(\n data_dir\n )\n )\n\n # unzip root directory\n zip_ref = zipfile.ZipFile(GEFCom_zipfile, \"r\")\n zip_ref.extractall(os.path.join(data_dir, \"GEFCom2014\"))\n zip_ref.close()\n\n # extract the extended competition data\n zip_ref = zipfile.ZipFile(os.path.join(GEFCom_dir, \"GEFCom2014-E_V2.zip\"), \"r\")\n zip_ref.extractall(os.path.join(data_dir, \"GEFCom2014-E\"))\n zip_ref.close()\n\n # load the data from Excel file\n data = pd.read_excel(\n os.path.join(data_dir, \"GEFCom2014-E\", \"GEFCom2014-E.xlsx\"), parse_dates=[\"Date\"]\n )\n\n # create timestamp variable from Date and Hour\n data[\"timestamp\"] = data[\"Date\"].add(pd.to_timedelta(data.Hour - 1, unit=\"h\"))\n data = data[[\"timestamp\", \"load\", \"T\"]]\n data = data.rename(columns={\"T\": \"temp\"})\n\n # remove time period with no load data\n data = data[data.timestamp >= \"2012-01-01\"]\n\n # save to csv\n data.to_csv(os.path.join(data_dir, \"energy.csv\"), index=False)\n\n\ndef download_file(url):\n local_filename = re.search(\"^[^?]+\", url.split(\"/\")[-1]).group()\n # NOTE the stream=True parameter below\n with requests.get(url, stream=True) as r:\n r.raise_for_status()\n with open(local_filename, \"wb\") as f:\n for chunk in r.iter_content(chunk_size=8192):\n f.write(chunk)\n return local_filename\n\n\ndef load_data(data_dir):\n \"\"\"Load the GEFCom 2014 energy load data\"\"\"\n\n energy = pd.read_csv(\n os.path.join(data_dir, \"energy.csv\"), parse_dates=[\"timestamp\"]\n )\n\n # Reindex the dataframe such that the dataframe has a record for every time point\n # between the minimum and maximum timestamp in the time series. This helps to\n # identify missing time periods in the data (there are none in this dataset).\n\n energy.index = energy[\"timestamp\"]\n energy = energy.reindex(\n pd.date_range(min(energy[\"timestamp\"]), max(energy[\"timestamp\"]), freq=\"H\")\n )\n energy = energy.drop(\"timestamp\", axis=1)\n\n return energy\n\n\ndef mape(predictions, actuals):\n \"\"\"Mean absolute percentage error\"\"\"\n return ((predictions - actuals).abs() / actuals).mean()\n\n\ndef create_evaluation_df(predictions, test_inputs, H, scaler):\n \"\"\"Create a data frame for easy evaluation\"\"\"\n eval_df = pd.DataFrame(\n predictions, columns=[\"t+\" + str(t) for t in range(1, H + 1)]\n )\n eval_df[\"timestamp\"] = test_inputs.dataframe.index\n eval_df = pd.melt(\n eval_df, id_vars=\"timestamp\", value_name=\"prediction\", var_name=\"h\"\n )\n eval_df[\"actual\"] = np.transpose(test_inputs[\"target\"]).ravel()\n eval_df[[\"prediction\", \"actual\"]] = scaler.inverse_transform(\n eval_df[[\"prediction\", \"actual\"]]\n )\n return eval_df\n\n\nclass TimeSeriesTensor(UserDict):\n \"\"\"A dictionary of tensors for input into the RNN model.\n\n Use this class to:\n 1. Shift the values of the time series to create a Pandas dataframe containing all the data\n for a single training example\n 2. Discard any samples with missing values\n 3. Transform this Pandas dataframe into a numpy array of shape\n (samples, time steps, features) for input into Keras\n\n The class takes the following parameters:\n - **dataset**: original time series\n - **target** name of the target column\n - **H**: the forecast horizon\n - **tensor_structures**: a dictionary discribing the tensor structure of the form\n { 'tensor_name' : (range(max_backward_shift, max_forward_shift), [feature, feature, ...] ) }\n if features are non-sequential and should not be shifted, use the form\n { 'tensor_name' : (None, [feature, feature, ...])}\n - **freq**: time series frequency (default 'H' - hourly)\n - **drop_incomplete**: (Boolean) whether to drop incomplete samples (default True)\n \"\"\"\n\n def __init__(\n self, dataset, target, H, tensor_structure, freq=\"H\", drop_incomplete=True\n ):\n self.dataset = dataset\n self.target = target\n self.tensor_structure = tensor_structure\n self.tensor_names = list(tensor_structure.keys())\n\n self.dataframe = self._shift_data(H, freq, drop_incomplete)\n self.data = self._df2tensors(self.dataframe)\n\n def _shift_data(self, H, freq, drop_incomplete):\n\n # Use the tensor_structures definitions to shift the features in the original dataset.\n # The result is a Pandas dataframe with multi-index columns in the hierarchy\n # tensor - the name of the input tensor\n # feature - the input feature to be shifted\n # time step - the time step for the RNN in which the data is input. These labels\n # are centred on time t. the forecast creation time\n df = self.dataset.copy()\n\n idx_tuples = []\n for t in range(1, H + 1):\n df[\"t+\" + str(t)] = df[self.target].shift(t * -1, freq=freq)\n idx_tuples.append((\"target\", \"y\", \"t+\" + str(t)))\n\n for name, structure in self.tensor_structure.items():\n rng = structure[0]\n dataset_cols = structure[1]\n\n for col in dataset_cols:\n\n # do not shift non-sequential 'static' features\n if rng is None:\n df[\"context_\" + col] = df[col]\n idx_tuples.append((name, col, \"static\"))\n\n else:\n for t in rng:\n sign = \"+\" if t > 0 else \"\"\n shift = str(t) if t != 0 else \"\"\n period = \"t\" + sign + shift\n shifted_col = name + \"_\" + col + \"_\" + period\n df[shifted_col] = df[col].shift(t * -1, freq=freq)\n idx_tuples.append((name, col, period))\n\n df = df.drop(self.dataset.columns, axis=1)\n idx = pd.MultiIndex.from_tuples(\n idx_tuples, names=[\"tensor\", \"feature\", \"time step\"]\n )\n df.columns = idx\n\n if drop_incomplete:\n df = df.dropna(how=\"any\")\n\n return df\n\n def _df2tensors(self, dataframe):\n\n # Transform the shifted Pandas dataframe into the multidimensional numpy arrays. These\n # arrays can be used to input into the keras model and can be accessed by tensor name.\n # For example, for a TimeSeriesTensor object named \"model_inputs\" and a tensor named\n # \"target\", the input tensor can be acccessed with model_inputs['target']\n\n inputs = {}\n y = dataframe[\"target\"]\n y = y.to_numpy()\n inputs[\"target\"] = y\n\n for name, structure in self.tensor_structure.items():\n rng = structure[0]\n cols = structure[1]\n tensor = dataframe[name][cols].to_numpy()\n if rng is None:\n tensor = tensor.reshape(tensor.shape[0], len(cols))\n else:\n tensor = tensor.reshape(tensor.shape[0], len(cols), len(rng))\n tensor = np.transpose(tensor, axes=[0, 2, 1])\n inputs[name] = tensor\n\n return inputs\n\n def subset_data(self, new_dataframe):\n\n # Use this function to recreate the input tensors if the shifted dataframe\n # has been filtered.\n\n self.dataframe = new_dataframe\n self.data = self._df2tensors(self.dataframe)\n"
] |
[
[
"pandas.to_timedelta",
"pandas.MultiIndex.from_tuples",
"pandas.melt",
"numpy.transpose"
]
] |
ashwani227/humanBodyFallDetection
|
[
"41e33f0c820d4a2b72c2998c6095d234ff698ab2"
] |
[
"Fall detection.py"
] |
[
"import cv2\r\nimport numpy as np\r\nimport math\r\nimport winsound\r\nimport time\r\ncap = cv2.VideoCapture(\"C:\\\\Users\\\\singl\\\\Downloads\\\\MM803\\\\Video DataSet\\\\Office\\\\Video (3).avi\")\r\n#cap = cv2.VideoCapture(\"C:\\\\Users\\\\singl\\\\Downloads\\\\MM803\\\\Video DataSet\\\\Home_02\\\\video (41).avi\")\r\n#cap = cv2.VideoCapture(\"C:\\\\Users\\\\singl\\\\Downloads\\\\MM803\\\\Video DataSet\\\\Lecture room\\\\video (9).avi\")\r\n\r\n\r\n\r\n\r\ncount = 0\r\ncount1 =0\r\nslope=0\r\nslope1 = 100\r\nminArea = 120*100\r\nradianToDegree=57.324\r\nminimumLengthOfLine=150.0\r\nminAngle=18\r\nmaxAngle=72\r\nlist_falls=[]\r\ncount_fall=0\r\nfirstFrame= None\r\n\r\ntime.sleep(1)\r\n\r\n#Function definition for frame Conversion\r\ndef convertFrame(frame):\r\n r = 750.0 / frame.shape[1]\r\n dim = (750, int(frame.shape[0] * r))\r\n frame = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)\r\n gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\r\n gray = cv2.GaussianBlur(gray, (31,31),0)\r\n\r\n return frame,gray\r\n\r\nwhile True:\r\n\r\n ret,frame= cap.read()\r\n if frame is None:\r\n break\r\n frame,gray = convertFrame(frame);\r\n\r\n #comparison Frame\r\n if firstFrame is None:\r\n time.sleep(1.0)\r\n _,frame= cap.read()\r\n frame,gray=convertFrame(frame)\r\n firstFrame = gray\r\n continue\r\n\r\n #Frame difference between current and comparison frame\r\n frameDelta= cv2.absdiff(firstFrame,gray)\r\n #Thresholding\r\n thresh1 = cv2.threshold(frameDelta,20,255,cv2.THRESH_BINARY)[1]\r\n #Dilation of Pixels\r\n thresh = cv2.dilate(thresh1,None,iterations = 15)\r\n\r\n\r\n\r\n #Finding the Region of Interest with changes\r\n _,contour,_ = cv2.findContours(thresh.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\r\n\r\n for con in contour:\r\n\r\n if len(con)>=5 and cv2.contourArea(con)>minArea:\r\n ellipse = cv2.fitEllipse(con)\r\n cv2.ellipse(frame,ellipse,(255,255,0),5)\r\n\r\n #Co-ordinates of extreme points\r\n extTop = tuple(con[con[:, :, 1].argmin()][0])\r\n extBot = tuple(con[con[:, :, 1].argmax()][0])\r\n extLeft = tuple(con[con[:, :, 0].argmin()][0])\r\n extRight = tuple(con[con[:, :, 0].argmax()][0])\r\n\r\n line1 = math.sqrt((extTop[0]-extBot[0])*(extTop[0]-extBot[0])+(extTop[1]-extBot[1])*(extTop[1]-extBot[1]))\r\n midPoint = [extTop[0]-int((extTop[0]-extBot[0])/2),extTop[1]-int((extTop[1]-extBot[1])/2)]\r\n if line1>minimumLengthOfLine:\r\n #cv2.line(frame,(extBot[0],extBot[1]),(extTop[0],extTop[1]), (255, 0, 0), 5)\r\n if (extTop[0]!=extBot[0]):\r\n slope = abs(extTop[1]-extBot[1])/(extTop[0]-extBot[0])\r\n\r\n else:\r\n #cv2.line(frame, (extLeft[0], extLeft[1]), (extRight[0], extRight[1]), (255, 0, 0), 5)\r\n if (extRight[0] != extLeft[0]):\r\n slope = abs(extRight[1]-extLeft[1])/(extRight[0]-extLeft[0])\r\n #print(slope)\r\n\r\n #cv2.line(frame, (midPoint[0], midPoint[1]), (midPoint[0] + 1, midPoint[1] + 100), (255, 255, 255), 5)\r\n #angle in Radians with perpendicular\r\n originalAngleP = np.arctan((slope1 - slope) / (1 + slope1 * slope))\r\n #angle with Horizontal\r\n originalAngleH = np.arctan(slope)\r\n #Angle in degrees\r\n originalAngleH = originalAngleH*radianToDegree\r\n originalAngleP=originalAngleP*radianToDegree\r\n #print(originalAngleP)\r\n if (abs(originalAngleP) > minAngle and abs(originalAngleH) < maxAngle and abs(originalAngleP)+abs(originalAngleH)>89 and abs(originalAngleP)+abs(originalAngleH)<91):\r\n count += 1\r\n if (count > 18):\r\n count_fall+=1\r\n #print(\"Fall detected\")\r\n list_falls.append((time.time()))\r\n if(count_fall>1):\r\n if(list_falls[len(list_falls)-1]-list_falls[len(list_falls)-2]<.5):\r\n #print (list_falls[len(list_falls)-1]-list_falls[len(list_falls)-2])\r\n print (\"Fall detected\")\r\n else:\r\n continue\r\n\r\n count = 0\r\n\r\n cv2.imshow('Frame', frame)\r\n #cv2.imshow('gray',gray)\r\n #cv2.imshow('Thresh',thresh)\r\n #cv2.imshow('FirstFrame',firstFrame)\r\n k = cv2.waitKey(30) & 0xff\r\n if k == 27:\r\n break\r\n#print (list_falls)\r\ncap.release()\r\ncv2.waitKey(1)\r\ncv2.destroyAllWindows()\r\n\r\n\r\n\r\n"
] |
[
[
"numpy.arctan"
]
] |
ARBUCHELI/EXERCISE-VPU-AND-THE-DEVCLOUD
|
[
"4e8063ff6710d48a2406d6325f03c9ef8241ab5f"
] |
[
"Exercise_VPU_and_the_DevCloud.py"
] |
[
"#!/usr/bin/env python\n# coding: utf-8\n\n# # Exercise: VPU and the DevCloud\n# \n# Now that we've walked through the process of requesting a CPU with a VPU (Intel® NCS2) on Intel's DevCloud and loading a model on the Intel® NCS2, you will have the opportunity to do this yourself with the addition of running inference on an image using both a CPU and IGPU.\n# \n# In this exercise, you will do the following:\n# 1. Write a Python script to load a model and run inference 100 times on a device on Intel's DevCloud.\n# * Calculate the time it takes to load the model.\n# * Calculate the time it takes to run inference 100 times.\n# 2. Write a shell script to submit a job to Intel's DevCloud.\n# 3. Submit a job using `qsub` on an **IEI Tank-870** edge node, run `liveQStat` to view the status of your submitted jobs, then retrieve and view the results from your job.\n# * One job using `CPU` as the device.\n# * One job using `GPU` as the device.\n# * One job using `VPU` as the device.\n# 4. Plot and compare the results using bar graphs with `matplotlib` for the following metrics:\n# * Model Loading Time\n# * Inference Time\n# * Frames Per Second (FPS)\n# \n# Click the **Exercise Overview** button below for a demonstration.\n\n# <span class=\"graffiti-highlight graffiti-id_8egtpc8-id_0itthyg\"><i></i><button>Exercise Overview</button></span>\n\n# #### IMPORTANT: Set up paths so we can run Dev Cloud utilities\n# You *must* run this every time you enter a Workspace session.\n\n# In[27]:\n\n\nget_ipython().run_line_magic('env', 'PATH=/opt/conda/bin:/opt/spark-2.4.3-bin-hadoop2.7/bin:/opt/conda/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/opt/intel_devcloud_support')\nimport os\nimport sys\nsys.path.insert(0, os.path.abspath('/opt/intel_devcloud_support'))\nsys.path.insert(0, os.path.abspath('/opt/intel'))\n\n\n# ## The Model\n# \n# We will be using the `vehicle-license-plate-detection-barrier-0106` model for this exercise.\n# \n# Remember to use the appropriate model precisions for each device:\n# \n# * CPU - `FP32`\n# * IGPU - `FP16`\n# * VPU - `FP16`\n# \n# The model has already been downloaded for you in the `/data/models/intel` directory on Intel's DevCloud.\n# \n# We will be running inference on an image of a car. The path to the image is `/data/resources/car.png`.\n\n# # Step 1: Creating a Python Script\n# \n# The first step is to create a Python script that you can use to load the model and perform inference. We'll use the `%%writefile` magic to create a Python file called `inference_on_device.py`. In the next cell, you will need to complete the `TODO` items for this Python script.\n# \n# `TODO` items:\n# \n# 1. Load the model\n# \n# 2. Get the name of the input node\n# \n# 3. Prepare the model for inference (create an input dictionary)\n# \n# 4. Run inference 100 times in a loop\n# \n# If you get stuck, you can click on the **Show Solution** button below for a walkthrough with the solution code.\n\n# In[28]:\n\n\nget_ipython().run_cell_magic('writefile', 'inference_on_device.py', '\\nimport time\\nimport numpy as np\\nimport cv2\\nfrom openvino.inference_engine import IENetwork\\nfrom openvino.inference_engine import IECore\\nimport argparse\\n\\ndef main(args):\\n model=args.model_path\\n model_weights=model+\\'.bin\\'\\n model_structure=model+\\'.xml\\'\\n \\n start=time.time()\\n \\n # TODO: Load the model\\n model=IENetwork(model_structure, model_weights)\\n\\n core = IECore()\\n net = core.load_network(network=model, device_name=args.device, num_requests=1)\\n \\n load_time=time.time()-start\\n print(f\"Time taken to load model = {load_time} seconds\")\\n \\n # Get the name of the input node\\n input_name=next(iter(model.inputs))\\n # Reading and Preprocessing Image\\n input_img=cv2.imread(\\'/data/resources/car.png\\')\\n input_img=cv2.resize(input_img, (300,300), interpolation = cv2.INTER_AREA)\\n input_img=np.moveaxis(input_img, -1, 0)\\n\\n # TODO: Prepare the model for inference (create input dict etc.)\\n input_dict={input_name:input_img}\\n start=time.time()\\n for _ in range(100):\\n # TODO: Run Inference in a Loop\\n net.infer(input_dict)\\n inference_time=time.time()-start\\n fps=100/inference_time\\n print(f\"Time Taken to run 100 inference is = {inference_time} seconds\")\\n \\n with open(f\"/output/{args.path}.txt\", \"w\") as f:\\n f.write(str(load_time)+\\'\\\\n\\')\\n f.write(str(inference_time)+\\'\\\\n\\')\\n f.write(str(fps)+\\'\\\\n\\')\\n\\nif __name__==\\'__main__\\':\\n parser=argparse.ArgumentParser()\\n parser.add_argument(\\'--model_path\\', required=True)\\n parser.add_argument(\\'--device\\', default=None)\\n parser.add_argument(\\'--path\\', default=None)\\n \\n args=parser.parse_args() \\n main(args)')\n\n\n# <span class=\"graffiti-highlight graffiti-id_l7v4f6u-id_oeichcd\"><i></i><button>Show Solution</button></span>\n\n# ## Step 2: Creating a Job Submission Script\n# \n# To submit a job to the DevCloud, you'll need to create a shell script. Similar to the Python script above, we'll use the `%%writefile` magic command to create a shell script called `inference_model_job.sh`. In the next cell, you will need to complete the `TODO` items for this shell script.\n# \n# `TODO` items:\n# 1. Create three variables:\n# * `DEVICE` - Assign the value as the first argument passed into the shell script.\n# * `MODELPATH` - Assign the value as the second argument passed into the shell script.\n# * `SAVEPATH` - Assign the value as the third argument passed into the shell script.\n# 2. Call the Python script using the three variable values as the command line argument\n# \n# If you get stuck, you can click on the **Show Solution** button below for a walkthrough with the solution code.\n\n# In[29]:\n\n\nget_ipython().run_cell_magic('writefile', 'inference_model_job.sh', '#!/bin/bash\\n\\nexec 1>/output/stdout.log 2>/output/stderr.log\\n\\nmkdir -p /output\\n\\n# TODO: Create DEVICE variable\\n# TODO: Create MODELPATH variable\\n# TODO: Create SAVEPATH variable\\nDEVICE=$1\\nMODELPATH=$2\\nSAVEPATH=$3\\n\\n# TODO: Call the Python script\\npython3 inference_on_device.py --model_path ${MODELPATH} --device ${DEVICE} --path ${SAVEPATH}\\n\\ncd /output\\n\\ntar zcvf output.tgz * # compresses all files in the current directory (output)')\n\n\n# <span class=\"graffiti-highlight graffiti-id_muqucn2-id_g84tg5a\"><i></i><button>Show Solution</button></span>\n\n# ## Step 3: Submitting a Job to Intel's DevCloud\n# \n# In the next three sub-steps, you will write your `!qsub` commands to submit your jobs to Intel's DevCloud to load your model and run inference on the **IEI Tank-870** edge node with an **Intel Core i5** CPU and an **Intel Neural Compute Stick 2** VPU.\n# \n# Your `!qsub` command should take the following flags and arguments:\n# 1. The first argument should be the shell script filename\n# 2. `-d` flag - This argument should be `.`\n# 3. `-l` flag - This argument should request an edge node with an **IEI Tank-870**. The default quantity is 1, so the **1** after `nodes` is optional. \n# * **Intel Core i5 6500TE** for your `CPU`.\n# * **Intel HD Graphics 530** for your `GPU`.\n# * **Intel Neural Compute Stick 2** for your `VPU`.\n# \n# To get the queue labels for these devices, you can go to [this link](https://devcloud.intel.com/edge/get_started/devcloud/)\n# \n# 4. `-F` flag - This argument should contain the three values to assign to the variables of the shell script:\n# * **DEVICE** - Device type for the job: `CPU`,`GPU` or `MYRIAD`.\n# * **MODELPATH** - Full path to the model for the job. As a reminder, the model is located in `/data/models/intel`.\n# * **SAVEPATH** - Name of the file you want to save the performance metrics as. These should be named as the following:\n# - `cpu_stats` for the `CPU` job\n# - `vpu_stats` for the `VPU` job\n# - `gpu_stats` for the `GPU` job\n# \n# **Note**: There is an optional flag, `-N`, you may see in a few exercises. This is an argument that only works on Intel's DevCloud that allows you to name your job submission. This argument doesn't work in Udacity's workspace integration with Intel's DevCloud.\n\n# ## Step 3a: Running on the NCS2\n# \n# In the cell below, write the qsub command that will submit your job to the VPU (NCS2).\n# \n# If you get stuck, you can click on the **Show Solution** button below for a walkthrough with the solution code.\n\n# In[30]:\n\n\nvpu_job_id_core = get_ipython().getoutput('qsub inference_model_job.sh -d . -l nodes=tank-870:i5-6500te:intel-ncs2 -F \"MYRIAD /data/models/intel/vehicle-license-plate-detection-barrier-0106/FP16/vehicle-license-plate-detection-barrier-0106 vpu_stats\" -N store_core ')\nprint(vpu_job_id_core[0])\n\n\n# <span class=\"graffiti-highlight graffiti-id_g3v8ffh-id_bvjbx8j\"><i></i><button>Show Solution</button></span>\n\n# ### Check Job Status\n# \n# To check on the job that was submitted, use `liveQStat` to check the status of the job. The cell is locked until this finishes polling 10 times or you can interrupt the kernel to stop it by pressing the stop button at the top: \n# \n# Column `S` shows the state of your running jobs.\n# \n# For example:\n# - If `JOB ID`is in Q state, it is in the queue waiting for available resources.\n# - If `JOB ID` is in R state, it is running.\n\n# In[31]:\n\n\nimport liveQStat\nliveQStat.liveQStat()\n\n\n# ###### Get Results\n# \n# Run the next cell to retrieve your job's results.\n\n# In[32]:\n\n\nimport get_results\n\nget_results.getResults(vpu_job_id_core[0], filename=\"output.tgz\", blocking=True)\n\n\n# ###### Unpack your output files and view stdout.log\n\n# In[33]:\n\n\nget_ipython().system('tar zxf output.tgz')\n\n\n# In[34]:\n\n\nget_ipython().system('cat stdout.log')\n\n\n# ###### View stderr.log\n# This can be used for debugging\n\n# In[35]:\n\n\nget_ipython().system('cat stderr.log')\n\n\n# ## Step 3b: Running on the CPU\n# \n# In the cell below, write the qsub command that will submit your job to the CPU.\n# \n# If you get stuck, you can click on the **Show Solution** button below for a walkthrough with the solution code.\n\n# In[36]:\n\n\ncpu_job_id_core = get_ipython().getoutput('qsub inference_model_job.sh -d . -l nodes=tank-870:i5-6500te -F \"CPU /data/models/intel/vehicle-license-plate-detection-barrier-0106/FP32/vehicle-license-plate-detection-barrier-0106 cpu_stats\" -N store_core ')\nprint(cpu_job_id_core[0])\n\n\n# <span class=\"graffiti-highlight graffiti-id_e6ujds3-id_ar4zvdk\"><i></i><button>Show Solution</button></span>\n\n# ### Check Job Status\n# \n# To check on the job that was submitted, use `liveQStat` to check the status of the job. The cell is locked until this finishes polling 10 times or you can interrupt the kernel to stop it by pressing the stop button at the top: \n# \n# Column `S` shows the state of your running jobs.\n# \n# For example:\n# - If `JOB ID`is in Q state, it is in the queue waiting for available resources.\n# - If `JOB ID` is in R state, it is running.\n\n# In[37]:\n\n\nimport liveQStat\nliveQStat.liveQStat()\n\n\n# ###### Get Results\n# \n# Run the next cell to retrieve your job's results.\n\n# In[38]:\n\n\nimport get_results\n\nget_results.getResults(cpu_job_id_core[0], filename=\"output.tgz\", blocking=True)\n\n\n# ###### Unpack your output files and view stdout.log\n\n# In[39]:\n\n\nget_ipython().system('tar zxf output.tgz')\n\n\n# In[40]:\n\n\nget_ipython().system('cat stdout.log')\n\n\n# ###### View stderr.log\n# This can be used for debugging\n\n# In[41]:\n\n\nget_ipython().system('cat stderr.log')\n\n\n# ## Step 3c: Running on the GPU\n# \n# In the cell below, write the qsub command that will submit your job to the GPU.\n# \n# If you get stuck, you can click on the **Show Solution** button below for a walkthrough with the solution code.\n\n# In[42]:\n\n\ngpu_job_id_core = get_ipython().getoutput('qsub inference_model_job.sh -d . -l nodes=tank-870:i5-6500te:intel-hd-530 -F \"GPU /data/models/intel/vehicle-license-plate-detection-barrier-0106/FP16/vehicle-license-plate-detection-barrier-0106 gpu_stats\" -N store_core ')\nprint(gpu_job_id_core[0])\n\n\n# <span class=\"graffiti-highlight graffiti-id_i3ywb2p-id_wq5meiq\"><i></i><button>Show Solution</button></span>\n\n# ### Check Job Status\n# \n# To check on the job that was submitted, use `liveQStat` to check the status of the job. The cell is locked until this finishes polling 10 times or you can interrupt the kernel to stop it by pressing the stop button at the top: \n# \n# Column `S` shows the state of your running jobs.\n# \n# For example:\n# - If `JOB ID`is in Q state, it is in the queue waiting for available resources.\n# - If `JOB ID` is in R state, it is running.\n\n# In[43]:\n\n\nimport liveQStat\nliveQStat.liveQStat()\n\n\n# ###### Get Results\n# \n# Run the next cell to retrieve your job's results.\n\n# In[44]:\n\n\nimport get_results\n\nget_results.getResults(gpu_job_id_core[0], filename=\"output.tgz\", blocking=True)\n\n\n# ###### Unpack your output files and view stdout.log\n\n# In[45]:\n\n\nget_ipython().system('tar zxf output.tgz')\n\n\n# In[46]:\n\n\nget_ipython().system('cat stdout.log')\n\n\n# ###### View stderr.log\n# This can be used for debugging\n\n# In[47]:\n\n\nget_ipython().system('cat stderr.log')\n\n\n# ## Step 4: Plot and Compare Results\n# \n# Run the cell below to plot and compare the results.\n\n# In[48]:\n\n\nimport matplotlib.pyplot as plt\n\n\n# In[26]:\n\n\ndef plot(labels, data, title, label):\n fig = plt.figure()\n ax = fig.add_axes([0,0,1,1])\n ax.set_ylabel(label)\n ax.set_title(title)\n ax.bar(labels, data)\n \ndef read_files(paths, labels):\n load_time=[]\n inference_time=[]\n fps=[]\n \n for path in paths:\n if os.path.isfile(path):\n f=open(path, 'r')\n load_time.append(float(f.readline()))\n inference_time.append(float(f.readline()))\n fps.append(float(f.readline()))\n\n plot(labels, load_time, 'Model Load Time', 'seconds')\n plot(labels, inference_time, 'Inference Time', 'seconds')\n plot(labels, fps, 'Frames per Second', 'Frames')\n\npaths=['vpu_stats.txt', 'gpu_stats.txt', 'cpu_stats.txt']\nread_files(paths, ['VPU', 'GPU', 'CPU'])\n\n\n# In[ ]:\n\n\n\n\n"
] |
[
[
"matplotlib.pyplot.figure"
]
] |
webbery/lovechat
|
[
"51b6fff53957c03b36f8e61d5a3593cccda2ae2c"
] |
[
"deploy/dp/dp.py"
] |
[
"import numpy as np\nnp.set_printoptions(suppress=True)\nimport hanlp\n\nclass SyntacticParser():\n def __init__(self):\n print('begin syntatic')\n self.tokenizer = hanlp.load('PKU_NAME_MERGED_SIX_MONTHS_CONVSEG')\n print('begin syntactic_parser')\n self.syntactic_parser = hanlp.load(hanlp.pretrained.dep.CTB7_BIAFFINE_DEP_ZH)\n print('begin tagger')\n self.tagger = hanlp.load(hanlp.pretrained.pos.CTB5_POS_RNN_FASTTEXT_ZH)\n print('finish syntatic')\n\n def parse(self,sentences):\n #if isinstance(sentences,list)==False: return None\n print(sentences)\n token = self.tokenizer(sentences)\n print(token)\n tags = self.tagger(token)\n print(tags)\n pairs = []\n #if isinstance(sentences[0],list)==True:\n # pass\n #else:\n for idx in range(len(tags)):\n pairs.append((token[idx],tags[idx]))\n print(pairs)\n return self.syntactic_parser(pairs)\n\nparser = SyntacticParser()"
] |
[
[
"numpy.set_printoptions"
]
] |
dan-r95/ChessVision
|
[
"736fff64f5743d9070bec7c817b7381a76944d57"
] |
[
"board_old.py"
] |
[
"import cv2\nimport numpy as np\nfrom utils import convertTo8U\nfrom math import *\n\nimg = cv2.imread('img1.jpg',0)\nim = cv2.resize(img, (int(0.2*img.shape[1]), int(0.2*img.shape[0])))\n\n# Otsu's thresholding - doesn't work\n# ret,thresh = cv2.threshold(im,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)\n# cv2.imshow('otsu', thresh)\n\n''' Edge Detection '''\n# TODO: tune the minVal and maxVal parameters\nedges = cv2.Canny(im, 100, 300)\n# cv2.imshow('edges', edges)\n\n''' Corner Detection '''\n# Harris corner detection\ndst = cv2.cornerHarris(im,5,5,0.01)\n# np.savetxt('test.txt', dst)\nf = np.vectorize(convertTo8U)\ndst = f(dst)\n# np.savetxt('dst.txt', dst)\n# img_grey = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY)\n\n# dst = cv2.convertScaleAbs(dst)\ncv2.imshow('scaleabs', dst)\n\nret,thresh = cv2.threshold(dst,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)\ncv2.imshow('corners', thresh)\n\n# lines = cv2.HoughLines(thresh, 1, np.pi/180, 10)\n# top_lines = lines[:18]\n# print(top_lines.shape)\n\n# for i in xrange(len(top_lines)):\n# \tline = top_lines[i][0]\n# \trho = line[0]\n# \ttheta = line[1]\n# \tprint(theta)\n# \ta, b = cos(theta), sin(theta)\n# \tx0, y0 = a*rho, b*rho\n# \tx1 = int(x0 + 1000*(-b))\n# \ty1 = int(y0 + 1000*(a))\n# \tx2 = int(x0 - 1000*(-b))\n# \ty2 = int(y0 - 1000*(a))\n# \tpt1 = (x1, y1)\n# \tpt2 = (x2, y2)\n# \tcv2.line(thresh, pt1, pt2, (255, 0, 0))\n\n# cv2.imshow('lines', thresh)\n\n''' Hough Transform '''\n\n# Identify chessboard lines with Hough transform\n# TODO: tune the threshold parameter (fourth parameter)\n# lines = cv2.HoughLines(edges, 1, np.pi/180, 250)\n# top_lines = lines[:18]\n# print(top_lines.shape)\n\n# for i in xrange(len(top_lines)):\n# \tline = top_lines[i][0]\n# \trho = line[0]\n# \ttheta = line[1]\n# \tprint(theta)\n# \ta, b = cos(theta), sin(theta)\n# \tx0, y0 = a*rho, b*rho\n# \tx1 = int(x0 + 1000*(-b))\n# \ty1 = int(y0 + 1000*(a))\n# \tx2 = int(x0 - 1000*(-b))\n# \ty2 = int(y0 - 1000*(a))\n# \tpt1 = (x1, y1)\n# \tpt2 = (x2, y2)\n# \tcv2.line(im, pt1, pt2, (255, 0, 0))\n\n# cv2.imshow('lines', im)\n# cv2.waitKey(0)\n\n# Use histogram of line orientations to identify two sets that represent\n# edges going along ranks and files\n\n\n# Remove extraneous lines from the sets\n\n# Find one square\n\n# Calculate homography\n\n# Extropolate to other squares and update homography accordingly\n\ncv2.waitKey(0)"
] |
[
[
"numpy.vectorize"
]
] |
dogfooter-master/dogfooter
|
[
"e1e39375703fe3019af7976f97c44cf2cb7ca0fa"
] |
[
"dogfootermacro_main.py"
] |
[
"\nimport threading\nimport time\nimport collections\nimport sys\nimport os\nimport likeyoubot_logger\nimport likeyoubot_win\nimport signal\nfrom PIL import ImageGrab\nimport cv2\nimport numpy as np\n\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWebEngineWidgets import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5 import uic\n\n\nsignal.signal(signal.SIGINT, signal.SIG_DFL)\n\nform_class = uic.loadUiType('mainwindow.ui')[0]\ndogfootermacro_title = 'DogFooterMacro Thresholder'\nglobal dogfootermacro_logger\n\nclass PlayThread(QThread):\n\tdef __init__(self, ui):\n\t\tQThread.__init__(self)\n\t\tself.ui = ui\n\t\tself.hwnd = self.ui.hwnd_dic[self.ui.search_comboBox.currentText()]\n\t\tself.win = self.ui.win\n\n\tdef __del__(self):\n\t\tself.wait()\n\n\tdef run(self):\n\n\t\t(anchor_x, anchor_y, end_x, end_y) = self.win.get_window_location(self.hwnd)\n\t\tadj_x, adj_y = self.win.get_player_adjust(self.hwnd)\n\t\twhile(True):\n\t\t\t# img = ImageGrab.grab(bbox=(anchor_x - adj_x, anchor_y - adj_y, end_x, end_y))\n\t\t\timg = self.win.get_window_screenshot(self.hwnd, 2)\n\t\t\t# img = ImageGrab.grab(bbox=(100,10,400,780)) #bbox specifies specific region (bbox= x,y,width,height)\n\t\t\timg_np = np.array(img)\n\t\t\t# img_np = cv2.resize(img_np, (width, height), interpolation = cv2.INTER_AREA)\n\t\t\t\n\t\t\tr = int(self.ui.lower_r_slider.value())\n\t\t\tg = int(self.ui.lower_g_slider.value())\n\t\t\tb = int(self.ui.lower_b_slider.value())\n\n\t\t\t# lowerBound = np.array((r, g, b), dtype=np.uint8, ndmin=1)\n\t\t\t# upperBound = np.array((255, 255, 255), dtype=np.uint8, ndmin=1)\n\t\t\tlowerBound = (r, g, b)\n\t\t\t\n\t\t\tr = int(self.ui.upper_r_slider.value())\n\t\t\tg = int(self.ui.upper_g_slider.value())\n\t\t\tb = int(self.ui.upper_b_slider.value())\n\n\t\t\tupperBound = (r, g, b)\n\n\t\t\tanchor_x = int(self.ui.view_anchor_x_spinBox.value())\n\t\t\tanchor_y = int(self.ui.view_anchor_y_spinBox.value())\n\n\t\t\twidth = int(self.ui.view_width_spinBox.value())\n\t\t\theight = int(self.ui.view_height_spinBox.value())\n\n\t\t\tif width < anchor_x + 120:\n\t\t\t\twidth = anchor_x + 120\n\n\t\t\tif height < anchor_y + 120:\n\t\t\t\theight = anchor_y + 120\n\n\n\n\t\t\timg_np = cv2.inRange(img_np, lowerBound, upperBound)\n\t\t\timg_np = img_np[anchor_y:height, anchor_x:width]\n\t\t\t# tgt = img_np.copy()\n\t\t\t# for row in range(img.height):\n\t\t\t# \tfor col in range(img.width):\n\t\t\t# \t\timg_np[row][col][0] = 255 - img_np[row][col][0] \n\t\t\t# \t\timg_np[row][col][1] = 255 - img_np[row][col][1] \n\t\t\t# \t\timg_np[row][col][2] = 255 - img_np[row][col][2] \n\n\t\t\t# frame = cv2.cvtColor(img_np, cv2.COLOR_BGR2RGB)\n\t\t\ttitle = \"Press ESC or Q \" + str(self.hwnd)\n\n\t\t\tcv2.imshow(title, img_np)\n\t\t\twait_key = cv2.waitKey(25)\n\n\t\t\tif wait_key & 0xFF == ord('q'):\n\t\t\t\tbreak\n\t\t\telif wait_key == 27:\n\t\t\t\tbreak\n\n\t\t\tif cv2.getWindowProperty(title, 0) == -1:\n\t\t\t\tbreak\n\n\t\tcv2.destroyAllWindows()\t\n\n\nclass MainWindow(QMainWindow, form_class):\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.setupUi(self)\n\n\t\tself.setWindowTitle(dogfootermacro_title)\n\t\tself.search_button.clicked.connect(self.callback_search_button_clicked)\n\t\tself.search_comboBox.currentIndexChanged.connect(self.callback_search_comboBox_currentIndexChanged)\n\t\tself.play_button.clicked.connect(self.callback_play_button_clicked)\n\n\t\tself.upper_r_slider.valueChanged.connect(self.callback_upper_r_slider_changed)\n\t\tself.upper_r_spinBox.valueChanged.connect(self.callback_upper_r_spinBox_changed)\n\t\tself.upper_g_slider.valueChanged.connect(self.callback_upper_g_slider_changed)\n\t\tself.upper_g_spinBox.valueChanged.connect(self.callback_upper_g_spinBox_changed)\n\t\tself.upper_b_slider.valueChanged.connect(self.callback_upper_b_slider_changed)\n\t\tself.upper_b_spinBox.valueChanged.connect(self.callback_upper_b_spinBox_changed)\n\n\n\t\tself.lower_r_slider.valueChanged.connect(self.callback_lower_r_slider_changed)\n\t\tself.lower_r_spinBox.valueChanged.connect(self.callback_lower_r_spinBox_changed)\n\t\tself.lower_g_slider.valueChanged.connect(self.callback_lower_g_slider_changed)\n\t\tself.lower_g_spinBox.valueChanged.connect(self.callback_lower_g_spinBox_changed)\n\t\tself.lower_b_slider.valueChanged.connect(self.callback_lower_b_slider_changed)\n\t\tself.lower_b_spinBox.valueChanged.connect(self.callback_lower_b_spinBox_changed)\n\n\n\t\tself.win = likeyoubot_win.LYBWin(dogfootermacro_title)\n\t\tself.hwnd_dic = {}\n\n\tdef callback_search_button_clicked(self):\n\t\tself.win.find_window_wildcard('')\n\t\tfor each_hwnd in self.win.handle_list:\n\t\t\tdogfootermacro_logger.debug(str(each_hwnd) + ' ' + self.win.get_title(each_hwnd))\n\t\t\tif each_hwnd in self.win.parent_handle_dic:\n\t\t\t\ttitle = self.win.get_title(self.win.parent_handle_dic[each_hwnd])\n\t\t\telse:\n\t\t\t\ttitle = self.win.get_title(each_hwnd)\n\n\t\t\tself.search_comboBox.addItem(title)\n\t\t\tself.hwnd_dic[title] = each_hwnd\n\n\t\tdogfootermacro_logger.debug(self.search_comboBox.count())\n\t\tif self.search_comboBox.count() > 0:\n\t\t\tself.play_button.setEnabled(True)\n\t\telse:\n\t\t\tself.play_button.setEnabled(False)\n\n\tdef callback_search_comboBox_currentIndexChanged(self):\n\t\tdogfootermacro_logger.debug('callback_search_comboBox_currentIndexChanged called()')\n\n\tdef callback_play_button_clicked(self):\n\t\tdogfootermacro_logger.debug('callback_play_button_clicked called()')\t\n\t\tself.play_thread = PlayThread(self)\n\t\tself.play_thread.start()\n\n\tdef callback_upper_r_slider_changed(self):\n\t\tself.upper_r_spinBox.setValue(self.upper_r_slider.value())\n\n\tdef callback_upper_r_spinBox_changed(self):\n\t\tself.upper_r_slider.setValue(self.upper_r_spinBox.value())\n\n\tdef callback_upper_g_slider_changed(self):\n\t\tself.upper_g_spinBox.setValue(self.upper_g_slider.value())\n\n\tdef callback_upper_g_spinBox_changed(self):\n\t\tself.upper_g_slider.setValue(self.upper_g_spinBox.value())\n\n\tdef callback_upper_b_slider_changed(self):\n\t\tself.upper_b_spinBox.setValue(self.upper_b_slider.value())\n\n\tdef callback_upper_b_spinBox_changed(self):\n\t\tself.upper_b_slider.setValue(self.upper_b_spinBox.value())\n\n\tdef callback_lower_r_slider_changed(self):\n\t\tself.lower_r_spinBox.setValue(self.lower_r_slider.value())\n\n\tdef callback_lower_r_spinBox_changed(self):\n\t\tself.lower_r_slider.setValue(self.lower_r_spinBox.value())\n\n\tdef callback_lower_g_slider_changed(self):\n\t\tself.lower_g_spinBox.setValue(self.lower_g_slider.value())\n\n\tdef callback_lower_g_spinBox_changed(self):\n\t\tself.lower_g_slider.setValue(self.lower_g_spinBox.value())\n\n\tdef callback_lower_b_slider_changed(self):\n\t\tself.lower_b_spinBox.setValue(self.lower_b_slider.value())\n\n\tdef callback_lower_b_spinBox_changed(self):\n\t\tself.lower_b_slider.setValue(self.lower_b_spinBox.value())\t\nif __name__ == '__main__':\n\ttry:\n\t\tdogfootermacro_logger = likeyoubot_logger.LYBLogger.getLogger()\n\t\tapp = QApplication(sys.argv)\n\t\tmainWindow = MainWindow()\n\t\tmainWindow.show()\n\t\tapp.exec_()\n\texcept:\n\t\tprint('create logger fail: ' +str(sys.exc_info()[0]) + '(' +str(sys.exc_info()[1]) + ')')\n\t\tsys.exit(1) \n"
] |
[
[
"numpy.array"
]
] |
coutouly/nenupy
|
[
"76cf9f6a6a93e9eed16f8450e3cfe385440a212e",
"76cf9f6a6a93e9eed16f8450e3cfe385440a212e"
] |
[
"nenupy/observation/sqldatabase.py",
"nenupy/astro/sky.py"
] |
[
"#! /usr/bin/python3\n# -*- coding: utf-8 -*-\n\n\n\"\"\"\n ************\n SQL Database\n ************\n\n Query obs containing 19 antennas in database:\n\n SELECT * \n FROM observation o \n inner join analogbeam a\n on o.id = a.observation_id\n where a.nAntennas = 19;\n\n Query obs containing MA 55 in database:\n\n select * \n from observation o\n inner join analogbeam a\n on o.id = a.observation_id\n inner join mini_array_association aa\n on a.id = aa.analog_beam_id\n inner join miniarray ma\n on ma.id = aa.mini_array_id\n where ma.name = 55;\n\n from nenupy.observation import ParsetDataBase\n from nenupy.observation import Parset\n from sqlalchemy import create_engine\n import os\n\n os.remove('/Users/aloh/Desktop/ma_base.db')\n db = ParsetDataBase(dataBaseName='/Users/aloh/Desktop/ma_base.db')#, engine=create_engine('mysql:///'))\n parset = Parset('/Users/aloh/Desktop/es11-2021-06-04-crab.parset')\n parset.addToDatabase(data_base=db)\n parset2 = Parset('/Users/aloh/Desktop/parset/test_alan.parset')\n parset2.addToDatabase(data_base=db)\n\"\"\"\n\n\n__author__ = 'Alan Loh'\n__copyright__ = 'Copyright 2020, nenupy'\n__credits__ = ['Alan Loh']\n__maintainer__ = 'Alan'\n__email__ = '[email protected]'\n__status__ = 'Production'\n__all__ = [\n 'SchedulingTable',\n 'AnalogBeamTable',\n 'DigitalBeamTable',\n 'ParsetDataBase'\n]\n\n\nimport numpy as np\nfrom os.path import abspath, isfile, basename, dirname\nfrom astropy.time import Time, TimeDelta\nimport astropy.units as u\nfrom astropy.coordinates import SkyCoord, AltAz, ICRS, solar_system_ephemeris, get_body\n\nfrom sqlalchemy.ext.declarative import DeferredReflection, declarative_base\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.inspection import inspect\nfrom sqlalchemy import (\n Column,\n ForeignKey,\n Integer,\n BigInteger,\n String,\n Float,\n Boolean,\n DateTime,\n create_engine,\n)\nfrom sqlalchemy.orm import Session, sessionmaker, relationship\n\nfrom nenupy.instru import sb2freq\nfrom nenupy import nenufar_position\n\nimport logging\nlog = logging.getLogger(__name__)\n\n\nBase = declarative_base()\n\n# ============================================================= #\n# ------------------------- Constants ------------------------- #\n# ============================================================= #\nMINI_ARRAYS = np.concatenate(\n (np.arange(96, dtype=int), np.arange(100, 107, dtype=int))\n)\n\nANTENNAS = np.arange(1, 20, dtype=int)\n\nSUB_BANDS = np.arange(512, dtype=int)\n\nRECEIVERS = np.array(['undysputed', 'xst', 'nickel', 'seti', 'radiogaga', 'codalema'])\n# ============================================================= #\n# ============================================================= #\n\n\n# ============================================================= #\n# ---------------------- SchedulingTable ---------------------- #\n# ============================================================= #\n# class NenufarUserTable(DeferredReflection, Base):\n# \"\"\"\n# Fake class for NenuFAR User Table\n# \"\"\"\n\n# __tablename__ = 'nenufar_users'\n\n\nclass SchedulingTable(Base):\n \"\"\"\n \"\"\"\n\n __tablename__ = 'scheduling'\n\n id = Column(Integer, primary_key=True)\n name = Column(String(255), nullable=False)\n fileName = Column(String(255), nullable=False)\n path = Column(String(255), nullable=True)\n comments = Column(String(255), nullable=True)\n checkTime = Column(DateTime, nullable=True)\n startTime = Column(DateTime, nullable=False)\n endTime = Column(DateTime, nullable=False)\n abortTime = Column(DateTime, nullable=True)\n state = Column(String(30), nullable=False)\n status = Column(String(30), nullable=False, default=\"unknown\")\n other_error = Column(String(150), nullable=True)\n type = Column(String(30), nullable=False, default=\"unknown\")\n topic = Column(String(255), nullable=False, default=\"debug\")\n tags = Column(String(255), nullable=True)\n submitTime = Column(DateTime, nullable=False, default=Time.now().datetime)\n token = Column(String(255), nullable=True)\n username = Column(String(255), nullable=False, default=\"testobs\")\n checker_username = Column(String(255), nullable=True)\n\n # scheduling_id = Column(BigInteger, ForeignKey('scheduling.id', ondelete=\"CASCADE\"))\n # scheduling = relationship(SchedulingTable, cascade=\"all, delete\")\n # obs_name = Column(String(40), nullable=False)\n # contact_name = Column(String(255), nullable=False)\n # contact_email = Column(String(255), nullable=False)\n # key_project_code = Column(String(4), nullable=False)\n # key_project_name = Column(String(100), nullable=False)\n # start_time = Column(DateTime, nullable=False)\n # stop_time = Column(DateTime, nullable=False)\n # parset_file = Column(String(300), nullable=False)\n receivers = relationship(\"ReceiverAssociation\", back_populates='scheduling', cascade=\"all, delete, delete-orphan\")\n nickel_subbands = relationship(\"SubBandNickelAssociation\", back_populates='scheduling', cascade=\"all, delete, delete-orphan\")\n# ============================================================= #\n# ============================================================= #\n\n\n# ============================================================= #\n# ----------------------- ReceiverTable ----------------------- #\n# ============================================================= #\nclass ReceiverAssociation(Base):\n \"\"\" \"\"\"\n __tablename__ = \"receiver_association\"\n\n scheduling_id = Column(BigInteger, ForeignKey(\"scheduling.id\", ondelete=\"CASCADE\"), primary_key=True)\n receiver_id = Column(ForeignKey(\"receivers.id\", ondelete=\"CASCADE\"), primary_key=True)\n \n receiver = relationship(\"ReceiverTable\", back_populates=\"schedulings\", cascade=\"all, delete\")\n scheduling = relationship(\"SchedulingTable\", back_populates=\"receivers\", cascade=\"all, delete\")\n\n\nclass ReceiverTable(Base):\n \"\"\" \"\"\"\n __tablename__ = 'receivers'\n\n id = Column(Integer, primary_key=True)\n schedulings = relationship(\"ReceiverAssociation\", back_populates='receiver', cascade=\"all, delete, delete-orphan\")\n name = Column(String(20), nullable=False)\n# ============================================================= #\n# ============================================================= #\n\n\n# ============================================================= #\n# ---------------------- MiniArrayTable ----------------------- #\n# ============================================================= #\nclass MiniArrayAssociation(Base):\n \"\"\"\n \"\"\"\n __tablename__ = 'mini_array_association'\n\n analog_beam_id = Column(ForeignKey(\"analogbeam.id\", ondelete=\"CASCADE\"), primary_key=True)\n mini_array_id = Column(ForeignKey(\"miniarray.id\", ondelete=\"CASCADE\"), primary_key=True)\n antenna_id = Column(ForeignKey(\"antenna.id\", ondelete=\"CASCADE\"), primary_key=True)\n \n mini_array = relationship(\"MiniArrayTable\", back_populates=\"analog_beams\", cascade=\"all, delete\")\n analog_beam = relationship(\"AnalogBeamTable\", back_populates=\"mini_arrays\", cascade=\"all, delete\")\n antenna = relationship(\"AntennaTable\", back_populates=\"mini_arrays\", cascade=\"all, delete\")\n\nclass MiniArrayTable(Base):\n \"\"\"\n \"\"\"\n __tablename__ = 'miniarray'\n\n id = Column(Integer, primary_key=True)\n analog_beams = relationship(\"MiniArrayAssociation\", back_populates='mini_array', cascade=\"all, delete, delete-orphan\")\n name = Column(String(3), nullable=False)\n # antennas = relationship(\"_AntennaAssociation\", back_populates='mini_array')\n# ============================================================= #\n# ============================================================= #\n\n\n# ============================================================= #\n# ----------------------- AntennaTable ------------------------ #\n# ============================================================= #\n# class _AntennaAssociation(Base):\n# \"\"\"\n# \"\"\"\n# __tablename__ = 'antenna_association'\n\n# mini_array_id = Column(ForeignKey(\"miniarray.id\"), primary_key=True)\n# antenna_id = Column(ForeignKey(\"antenna.id\"), primary_key=True)\n# antenna = relationship(\"_AntennaTable\", back_populates=\"mini_arrays\")\n# mini_array = relationship(\"_MiniArrayTable\", back_populates=\"antennas\")\n\n\nclass AntennaTable(Base):\n \"\"\"\n \"\"\"\n __tablename__ = 'antenna'\n\n id = Column(Integer, primary_key=True)\n name = Column(String(2), nullable=False)\n mini_arrays = relationship(\"MiniArrayAssociation\", back_populates='antenna', cascade=\"all, delete, delete-orphan\")\n# ============================================================= #\n# ============================================================= #\n\n\n# ============================================================= #\n# ----------------------- SubBandTable ------------------------ #\n# ============================================================= #\nclass SubBandAssociation(Base):\n \"\"\"\n \"\"\"\n __tablename__ = 'subband_association'\n\n digital_beam_id = Column(ForeignKey(\"digitalbeam.id\", ondelete=\"CASCADE\"), primary_key=True)\n subband_id = Column(ForeignKey(\"subband.id\", ondelete=\"CASCADE\"), primary_key=True)\n \n # extra_data = Column(String(50))\n subband = relationship(\"SubBandTable\", back_populates=\"digital_beams\", cascade=\"all, delete\")\n digital_beam = relationship(\"DigitalBeamTable\", back_populates=\"subbands\", cascade=\"all, delete\")\n\n\nclass SubBandNickelAssociation(Base):\n \"\"\"\n \"\"\"\n __tablename__ = 'subband_nickel_association'\n\n scheduling_id = Column(BigInteger, ForeignKey(\"scheduling.id\", ondelete=\"CASCADE\"), primary_key=True)\n subband_id = Column(ForeignKey(\"subband.id\", ondelete=\"CASCADE\"), primary_key=True)\n subband = relationship(\"SubBandTable\", back_populates=\"scheduling\", cascade=\"all, delete\")\n scheduling = relationship(\"SchedulingTable\", back_populates=\"nickel_subbands\", cascade=\"all, delete\")\n\n\nclass SubBandTable(Base):\n \"\"\"\n \"\"\"\n __tablename__ = 'subband'\n\n id = Column(Integer, primary_key=True)\n digital_beams = relationship(\"SubBandAssociation\", back_populates='subband', cascade=\"all, delete, delete-orphan\")\n scheduling = relationship(\"SubBandNickelAssociation\", back_populates='subband', cascade=\"all, delete, delete-orphan\")\n index = Column(String(3), nullable=False)\n frequency_mhz = Column(Float, nullable=False)\n\n\n# class SubBandNickelTable(Base):\n# \"\"\"\n# \"\"\"\n# __tablename__ = 'subband_nickel'\n\n# id = Column(Integer, primary_key=True)\n# scheduling = relationship(\"SubBandNickelAssociation\", back_populates='subband', cascade=\"all, delete, delete-orphan\")\n# index = Column(String(3), nullable=False)\n# frequency_mhz = Column(Float, nullable=False)\n# ============================================================= #\n# ============================================================= #\n\n\n# ============================================================= #\n# ---------------------- AnalogBeamTable ---------------------- #\n# ============================================================= #\nclass AnalogBeamTable(Base):\n \"\"\"\n \"\"\"\n\n __tablename__ = 'analogbeam'\n\n id = Column(Integer, primary_key=True)\n scheduling_id = Column(BigInteger, ForeignKey('scheduling.id', ondelete=\"CASCADE\"))\n scheduling = relationship(SchedulingTable, cascade=\"all, delete\")\n\n ra_j2000 = Column(Float, nullable=True)\n dec_j2000 = Column(Float, nullable=True)\n observed_coord_type = Column(String(50), nullable=False)\n observed_pointing_type = Column(String(50), nullable=False)\n start_time = Column(DateTime, nullable=False)\n stop_time = Column(DateTime, nullable=False)\n # nMiniArrays = Column(Integer, nullable=False)\n # miniArrays = Column(String(500), nullable=False)\n mini_arrays = relationship(\"MiniArrayAssociation\", back_populates='analog_beam')\n # nAntennas = Column(Integer, nullable=False)\n #antennas = Column(String(200), nullable=False)\n beam_squint_freq_mhz = Column(Float, nullable=False)\n\n\n # --------------------------------------------------------- #\n # --------------------- Getter/Setter --------------------- #\n # @property\n # def _miniArrays(self):\n # return list(map(int, self.miniArrays.split(',')))\n # @_miniArrays.setter\n # def _miniArrays(self, m):\n # if not isinstance(m, (list, np.ndarray)):\n # raise TypeError(\n # 'miniarrays should be a list-like object'\n # )\n # self.miniArrays = ','.join([str(mi) for mi in m])\n\n\n # @property\n # def _antennas(self):\n # return list(map(int, self.antennas.split(',')))\n # @_antennas.setter\n # def _antennas(self, a):\n # if not isinstance(a, (list, np.ndarray)):\n # raise TypeError(\n # 'antennas should be a list-like object'\n # )\n # self.antennas = ','.join([str(ai) for ai in a])\n# ============================================================= #\n# ============================================================= #\n\n\n# ============================================================= #\n# --------------------- DigitalBeamTable ---------------------- #\n# ============================================================= #\nclass DigitalBeamTable(Base):\n \"\"\"\n \"\"\"\n\n __tablename__ = 'digitalbeam'\n\n id = Column(Integer, primary_key=True)\n anabeam_id = Column(Integer, ForeignKey('analogbeam.id', ondelete=\"CASCADE\"))\n anabeam = relationship(AnalogBeamTable, cascade=\"all, delete\")\n\n ra_j2000 = Column(Float, nullable=True)\n dec_j2000 = Column(Float, nullable=True)\n observed_coord_type = Column(String(50), nullable=False)\n observed_pointing_type = Column(String(50), nullable=False)\n start_time = Column(DateTime, nullable=False)\n stop_time = Column(DateTime, nullable=False)\n # subBands = Column(String(500), nullable=False)\n subbands = relationship(\"SubBandAssociation\", back_populates='digital_beam', cascade=\"all, delete, delete-orphan\")\n freq_min_mhz = Column(Float, nullable=False)\n freq_max_mhz = Column(Float, nullable=False)\n processing = Column(String(255), nullable=True)\n\n\n # --------------------------------------------------------- #\n # --------------------- Getter/Setter --------------------- #\n # @property\n # def _subBands(self):\n # return list(map(int, self.subBands.split(',')))\n # @_subBands.setter\n # def _subBands(self, s):\n # if not isinstance(s, (list, np.ndarray)):\n # raise TypeError(\n # 'subBands should be a list-like object'\n # )\n # self.subBands = ','.join([str(si) for si in s])\n# ============================================================= #\n# ============================================================= #\n\n\n# ============================================================= #\n# ----------------------- ParsetDataBase ---------------------- #\n# ============================================================= #\nclass DuplicateParsetEntry(Exception):\n pass\n\nclass UserNameNotFound(Exception):\n pass\n\nclass ParsetDataBase(object):\n \"\"\"\n \"\"\"\n\n def __init__(self, database_name, engine=None, session=None):\n self.name = database_name\n self.engine = engine\n self.session = session\n\n log.info(f\"Session started on {self.engine.url}.\")\n\n self.parset = None\n self.current_scheduling = None\n self.anaid = {}\n\n\n # --------------------------------------------------------- #\n # --------------------- Getter/Setter --------------------- #\n @property\n def parset(self):\n return self._parset\n @parset.setter\n def parset(self, p):\n # if p is not None:\n # # Check if an entry already exists\n # if inspect(self.engine).has_table(\"scheduling\"):\n # entry_exists = self.session.query(SchedulingTable).filter_by(fileName=p).first() is not None\n # if entry_exists:\n # log.info(f\"Parset {p} already in {self.name}. Skipping it.\")\n # raise DuplicateParsetEntry(f\"Duplicated parset {p}.\")\n \n if p is not None:\n parset_entry = self.session.query(SchedulingTable).filter_by(fileName=basename(p)).first()\n if parset_entry is not None:\n if inspect(self.engine).has_table(\"analogbeam\"):\n scheduling_id = parset_entry.id\n entry = self.session.query(AnalogBeamTable).filter_by(scheduling_id=scheduling_id).first()\n if entry is not None:\n log.info(f\"Parset {basename(p)} already in {self.name}. Skipping it.\")\n raise DuplicateParsetEntry(f\"Duplicated parset {basename(p)}.\")\n self._parset = p\n\n\n # --------------------------------------------------------- #\n # ------------------------ Methods ------------------------ #\n @classmethod\n def new(cls, database_name='my_base.db', engine=None):\n \"\"\" \"\"\"\n if engine is None:\n # Create a default engine\n engine = create_engine(\n 'sqlite:///' + database_name,\n pool_pre_ping=True\n )\n Base.metadata.create_all(engine)\n DBSession = sessionmaker(bind=engine)\n session = DBSession()\n return cls(database_name=database_name, engine=engine, session=session)\n\n\n @classmethod\n def from_existing_database(cls, engine):\n \"\"\" \"\"\"\n Base = automap_base()\n Base.prepare(engine, reflect=True)\n session = Session(engine)\n name = engine.url.database\n return cls(database_name=name, engine=engine, session=session)\n\n\n def create_configuration_tables(self):\n \"\"\" Creates the tables 'mini_arrays', 'antennas', 'sub-bands' and 'receivers'. \"\"\"\n\n existing_tables = inspect(self.engine).get_table_names()\n\n if (\"miniarray\" in existing_tables) and (self.session.query(MiniArrayTable).first() is not None):\n log.warning(\"'miniarrays' table already exists.\")\n else:\n # Initialize the Mini-Array Table (96 core + 6 remote Mini-Arrays)\n log.debug(\"Generating the 'miniarrays' table.\")\n self.session.add_all([\n MiniArrayTable(name=str(miniarray_name))\n for miniarray_name in MINI_ARRAYS\n ])\n\n if (\"antenna\" in existing_tables) and (self.session.query(AntennaTable).first() is not None):\n log.warning(\"'antenna' table already exists.\")\n else:\n # Initialize the Antenna Table\n log.debug(\"Generating the 'antennas' table.\")\n self.session.add_all([\n AntennaTable(name=str(antenna_name))\n for antenna_name in ANTENNAS\n ])\n\n if (\"subband\" in existing_tables) and (self.session.query(SubBandTable).first() is not None):\n log.warning(\"'subband' table already exists.\")\n else:\n # Initialize the SubBand Table\n log.debug(\"Generating the 'sub-bands' table.\")\n self.session.add_all([\n SubBandTable(index=str(subband), frequency_mhz=sb2freq(subband)[0].value)\n for subband in SUB_BANDS\n ])\n\n # if (\"subband_nickel\" in existing_tables) and (self.session.query(SubBandNickelTable).first() is not None):\n # log.warning(\"'subband_nickel' table already exists.\")\n # else:\n # # Initialize the SubBand Table\n # log.debug(\"Generating the 'subband_nickel' table.\")\n # self.session.add_all([\n # SubBandNickelTable(index=str(subband), frequency_mhz=sb2freq(subband)[0].value)\n # for subband in SUB_BANDS\n # ])\n\n if (\"receivers\" in existing_tables) and (self.session.query(ReceiverTable).first() is not None):\n log.warning(\"'receivers' table already exists.\")\n else:\n # Initialize the Receiver Table\n log.debug(\"Generating the 'receivers' table.\")\n self.session.add_all([\n ReceiverTable(name=receiver)\n for receiver in RECEIVERS\n ])\n\n # Commit the changes\n self.session.commit()\n log.info(\"Tables 'mini-arrays', 'antennas', 'sub-bands' and 'receivers' ready.\")\n\n\n def create_association_tables(self):\n \"\"\" \"\"\"\n\n\n def done(self):\n \"\"\"\n \"\"\"\n self.engine.dispose()\n \n\n def delete_row(self, scheduling_id):\n \"\"\" \"\"\"\n self.session.query(SchedulingTable).filter_by(id=scheduling_id).delete()\n self.session.commit()\n\n\n def add_row(self, parset_property, desc):\n \"\"\"\n \"\"\"\n pProp = parset_property\n\n if desc.lower() == 'observation':\n new_row, is_new = self._create_scheduling_row(parset_property)\n\n # Keep track of current scheduling row\n self.current_scheduling = new_row\n\n elif desc.lower() == 'anabeam':\n new_row, is_new = self._create_analog_beam_row(parset_property)\n\n # Keep track of analog beam rows\n self.anaid[pProp['anaIdx']] = new_row\n\n elif desc.lower() == 'digibeam':\n new_row, is_new = self._create_digital_beam_row(parset_property)\n\n else:\n raise ValueError(\n 'desc should be observation/anabeam/digibeam'\n )\n\n if is_new:\n self.session.add(new_row)\n self.session.commit()\n\n\n # --------------------------------------------------------- #\n # ----------------------- Internal ------------------------ #\n @staticmethod\n def _normalize_beam_pointing(parset_property) -> dict:\n \"\"\" Returns a RA, Dec whatever the pointing type is. \"\"\"\n\n # Sort out the beam start and stop times\n duration = TimeDelta(parset_property['duration'] , format='sec')\n start_time = parset_property['startTime']\n stop_time = (parset_property['startTime'] + duration)\n\n # Deal with coordinates and pointing types\n direction_type = parset_property['directionType'].lower()\n if direction_type == \"j2000\":\n # Nothing else to do\n log.debug(f\"'{direction_type}' beam direction type.\")\n decal_ra = float(parset_property.get(\"decal_ra\", 0.0))*u.deg\n decal_dec = float(parset_property.get(\"decal_dec\", 0.0))*u.deg\n right_ascension = (parset_property['angle1'].to(u.deg) + decal_ra).value\n declination = (parset_property['angle2'].to(u.deg) + decal_dec).value\n\n elif direction_type == \"azelgeo\":\n # This is a transit observation, compute the mean RA/Dec\n log.debug(f\"'{direction_type}' beam direction type, taking the mean RA/Dec.\")\n # Convert AltAz to RA/Dec\n radec = SkyCoord(\n parset_property['angle1'] + float(parset_property.get(\"decal_az\", 0.0))*u.deg,\n parset_property['angle2'] + float(parset_property.get(\"decal_el\", 0.0))*u.deg,\n frame=AltAz(\n obstime=start_time + duration/2.,\n location=nenufar_position\n )\n ).transform_to(ICRS)\n right_ascension = radec.ra.deg\n declination = radec.dec.deg\n \n elif direction_type == \"natif\":\n # This is a test observation, unable to parse the RA/Dec\n log.debug(f\"'{direction_type}' beam direction type, RA/Dec fields will be empty.\")\n right_ascension = None\n declination = None\n\n else:\n # Dealing with a Solar System source\n log.debug(f\"'{direction_type}' beam direction type, taking the mean RA/Dec.\")\n with solar_system_ephemeris.set('builtin'):\n source = get_body(\n body=direction_type,\n time=start_time + duration/2.,\n location=nenufar_position\n )\n radec = source.transform_to(ICRS)\n decal_ra = float(parset_property.get(\"decal_ra\", 0.0))*u.deg\n decal_dec = float(parset_property.get(\"decal_dec\", 0.0))*u.deg\n right_ascension = radec.ra.deg + decal_ra.value\n declination = radec.dec.deg + decal_dec.value\n\n return {\n \"ra\": right_ascension,\n \"dec\": declination,\n \"start_time\": start_time.datetime,\n \"stop_time\": stop_time.datetime\n }\n\n\n def _create_scheduling_row(self, parset_property):\n \"\"\" \"\"\"\n # Link to receivers\n receivers_on = parset_property.get(\"hd_receivers\", [])\n receivers_on += parset_property.get(\"nri_receivers\", [])\n if parset_property.get(\"xst_userfile\", False):\n # Add the xst option, which is not a proper receiver\n receivers_on.append(\"xst\")\n if not np.all(np.isin(receivers_on, RECEIVERS)):\n log.warning(f\"One of the receiver listed ({receivers_on}) does not belong to the predefined list ({RECEIVERS}).\")\n\n receivers = self.session.query(ReceiverTable).filter(ReceiverTable.name.in_(receivers_on)).all()\n\n # scheduling_row = SchedulingTable(\n # name=parset_property['name'],\n # contact_name=parset_property['contactName'],\n # contact_email=parset_property['contactEmail'],\n # key_project_code=parset_property['topic'].split(' ', 1)[0],\n # key_project_name=parset_property['topic'].split(' ', 1)[1],\n # start_time=parset_property['startTime'].datetime,\n # stop_time=parset_property['stopTime'].datetime,\n # receivers=[ReceiverAssociation(receiver=receiver) for receiver in receivers],\n # parset_file=self.parset\n # )\n\n scheduling_row = self.session.query(SchedulingTable).filter_by(fileName=basename(self.parset)).first()\n if scheduling_row is None:\n username = \"testobs\" if parset_property[\"contactName\"]==\"\" else parset_property[\"contactName\"]\n\n # Check if 'username' exists\n if inspect(self.engine).has_table(\"nenufar_users\"):\n class NenufarUserTable(DeferredReflection, Base):\n \"\"\"\n Fake class for NenuFAR User Table\n \"\"\"\n __tablename__ = 'nenufar_users'\n\n DeferredReflection.prepare(self.engine)\n username_entry = self.session.query(NenufarUserTable).filter_by(username=username).first()\n if username_entry is None:\n log.warning(f\"Username '{username}' not found in 'nenufar_users' table, skipping it.\")\n raise UserNameNotFound(f\"'{username}'\")\n\n # Sort out the topic\n topic = parset_property.get(\"topic\", \"ES00 DEBUG\")\n if topic.lower().strip() == 'maintenance':\n topic = \"MAINTENANCE\"\n else:\n # We have something like \"ES00 DEBUG\"\n topic = topic.split(\" \", 1)[1]\n\n # Create the new row\n scheduling_row = SchedulingTable(\n name=parset_property['name'],\n fileName=basename(self.parset),\n path=dirname(self.parset),\n startTime=parset_property[\"startTime\"].datetime,\n endTime=parset_property[\"stopTime\"].datetime,\n state=\"default_value\",\n topic=topic,\n username=username,\n receivers=[ReceiverAssociation(receiver=receiver) for receiver in receivers]\n )\n is_new = True\n log.debug(f\"Row of table 'scheduling' created for '{scheduling_row.name}'.\")\n else:\n # Only add the receiver association, the row already exists in scheduling table\n [ReceiverAssociation(receiver=receiver, scheduling=scheduling_row) for receiver in receivers]\n is_new = False\n log.debug(f\"Row of table 'scheduling' updated for '{scheduling_row.name}'.\")\n \n if \"nickel\" in receivers_on:\n log.debug(\"Adding the association to NICKEL subbands.\")\n nickel_subbands = self.session.query(SubBandTable).filter(SubBandTable.index.in_(parset_property.get(\"nri_subbandList\", []))).all()\n [SubBandNickelAssociation(subband=sb, scheduling=scheduling_row) for sb in nickel_subbands]\n\n return scheduling_row, is_new\n\n\n def _create_analog_beam_row(self, parset_property):\n \"\"\" \"\"\"\n\n log.debug(f\"Treating 'analogbeam' (index {parset_property['anaIdx']})...\")\n\n pointing = self._normalize_beam_pointing(parset_property)\n \n # Link to Antenna\n antennas = self.session.query(AntennaTable).filter(AntennaTable.name.in_(parset_property['antList'])).all()\n #antennas_assoc = [_AntennaAssociation(antenna=ant) for ant in antennas]\n # Link to Mini-Arrays\n miniarrays = self.session.query(MiniArrayTable).filter(MiniArrayTable.name.in_(parset_property['maList'])).all()\n #for ma in miniarrays:\n # ma.antennas = antennas_assoc\n\n analog_beam_row = AnalogBeamTable(\n ra_j2000 = pointing[\"ra\"],\n dec_j2000 = pointing[\"dec\"],\n observed_coord_type = parset_property['directionType'],\n observed_pointing_type = 'TRANSIT' if parset_property['directionType'] == 'AZELGEO' else 'TRACKING',\n start_time = pointing[\"start_time\"],\n stop_time = pointing[\"stop_time\"],\n # nMiniArrays = len(pProp['maList']),\n # _miniArrays = pProp['maList'],\n mini_arrays = [MiniArrayAssociation(mini_array=ma, antenna=ant) for ma in miniarrays for ant in antennas],\n # nAntennas = len(pProp['antList']),\n #_antennas = pProp['antList'],\n beam_squint_freq_mhz = parset_property['optFrq'] if parset_property.get(\"beamSquint\", False) else 0,\n scheduling = self.current_scheduling\n )\n\n log.debug(f\"Row of table 'analogbeam' (index {parset_property['anaIdx']}) created for '{self.current_scheduling.name}'.\")\n\n return analog_beam_row, True\n\n\n def _create_digital_beam_row(self, parset_property):\n \"\"\" \"\"\"\n\n log.debug(f\"Treating 'digitalbeam' (index {parset_property['digiIdx']})...\")\n\n pointing = self._normalize_beam_pointing(parset_property)\n \n # Link to Sub-Bands\n subbands = self.session.query(SubBandTable).filter(SubBandTable.index.in_(parset_property['subbandList'])).all()\n\n digital_beam_row = DigitalBeamTable(\n ra_j2000 = pointing[\"ra\"],\n dec_j2000 = pointing[\"dec\"],\n observed_coord_type = parset_property[\"directionType\"],\n observed_pointing_type = 'TRANSIT' if parset_property[\"directionType\"] == 'AZELGEO' else 'TRACKING',\n start_time = pointing[\"start_time\"],\n stop_time = pointing[\"stop_time\"],\n # _subBands = pProp['subbandList'],\n subbands = [SubBandAssociation(subband=sb) for sb in subbands],\n freq_min_mhz = sb2freq( max(min(parset_property['subbandList']), 0) )[0].value, \n freq_max_mhz = sb2freq( min(max(parset_property['subbandList']), 511) )[0].value,\n anabeam = self.anaid[parset_property['noBeam']],\n processing = parset_property[\"toDo\"]\n )\n\n log.debug(f\"Row of table 'digitalbeam' (index {parset_property['digiIdx']}) created for '{self.current_scheduling.name}'.\")\n\n return digital_beam_row, True\n# ============================================================= #\n# ============================================================= #\n\n",
"#! /usr/bin/python3\n# -*- coding: utf-8 -*-\n\n\n\"\"\"\n ***\n Sky\n ***\n\"\"\"\n\n\n__author__ = \"Alan Loh\"\n__copyright__ = \"Copyright 2021, nenupy\"\n__credits__ = [\"Alan Loh\"]\n__maintainer__ = \"Alan\"\n__email__ = \"[email protected]\"\n__status__ = \"Production\"\n__all__ = [\n \"SkySliceBase\",\n \"SkySlice\",\n \"HpxSkySlice\",\n \"Sky\",\n \"HpxSky\"\n]\n\n\nimport numpy as np\nimport copy\nfrom typing import Union\nimport logging\nlog = logging.getLogger(__name__)\n\nfrom astropy.coordinates import SkyCoord, EarthLocation, ICRS, AltAz\nfrom astropy.time import Time\nimport astropy.units as u\nfrom astropy.visualization.wcsaxes.frame import EllipticalFrame\nfrom astropy.wcs import WCS\nfrom reproject import reproject_from_healpix\nimport dask.array as da\nfrom dask.diagnostics import ProgressBar\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import patheffects\nfrom matplotlib.colorbar import ColorbarBase\nfrom mpl_toolkits.axes_grid1.inset_locator import inset_axes\nfrom matplotlib.cm import get_cmap\nfrom matplotlib.ticker import LinearLocator\nfrom matplotlib.colors import Normalize\n\ntry:\n import healpy.pixelfunc as hpx\nexcept ImportError:\n log.warning(\"Unable to load 'healpy', some functionalities may not be working.\")\n hpx = None\n\nfrom nenupy import nenufar_position, DummyCtMgr\nfrom nenupy.astro.astro_tools import AstroObject\n\n\n# ============================================================= #\n# ----------------------- SkySliceBase ------------------------ #\n# ============================================================= #\nclass SkySliceBase(AstroObject):\n \"\"\" \"\"\"\n\n def __init__(self,\n coordinates: SkyCoord,\n frequency: u.Quantity,\n time: Time,\n polarization: Union[str, float, int],\n value: np.ndarray,\n observer: EarthLocation = nenufar_position\n ):\n self.coordinates = coordinates\n self.time = time\n self.frequency = frequency\n self.polarization = polarization\n self.observer = observer\n self.value = value\n\n\n @property\n def visible_sky(self):\n \"\"\" \"\"\"\n altaz = self.horizontal_coordinates\n return altaz.alt.deg > 0\n\n\n # --------------------------------------------------------- #\n # ------------------------ Methods ------------------------ #\n def plot(self, **kwargs):\n r\"\"\" Display the selected content of the :attr:`~nenupy.astro.sky.Sky.value`\n attribute belonging to a :class:`~nenupy.astro.sky.Sky` instance as\n a celestial map in equatorial coordinates.\n\n This method is available on a :class:`~nenupy.astro.sky.SkySlice` instance,\n resulting from a selection upon a :class:`~nenupy.astro.sky.Sky` instance\n (using the indexing operator).\n\n Several parameters, listed below, can be tuned to adapt the plot\n to the user requirements:\n\n .. rubric:: Data display keywords\n\n :param center:\n Coordinates of the celestial map to be displayed\n at the center of the image.\n Default is ``(RA=0deg, Dec=0deg)``.\n :type center:\n :class:`~astropy.coordinates.SkyCoord`\n :param radius:\n Angular radius from the center of the image above\n which the plot should be cropped.\n Default is ``None`` (i.e., full sky image).\n :type radius:\n :class:`~astropy.units.Quantity`\n :param resolution:\n Set the pixel resolution. The upper threshold is 0.775 deg,\n any value above that does not affect the figure appearence.\n Default is ``astropy.units.Quantity(1, unit=\"deg\")``.\n :type resolution:\n :class:`~astropy.units.Quantity`\n :param only_visible:\n If set to ``True`` only the sky above the horizon is displayed.\n Setting this parameter to ``False`` does not really make sense\n for :class:`~nenupy.astro.sky.Sky` instances representing antenna\n response for instance.\n Default is ``True``.\n :type only_visible:\n `bool`\n :param decibel:\n If set to ``True``, the data values are displayed at the decibel scale,\n i.e., :math:`10 \\log( \\rm{data} )`. \n Default is ``False``.\n :type decibel:\n `bool`\n\n .. rubric:: Overplot keywords\n\n :param scatter:\n Add a scatter plot (as defined in `matplotlib.pyplot.scatter`).\n Expected syntax is ``(<SkyCoord>, <marker_size>, <color>)``.\n Default is ``None`` (i.e., no scatter overplot).\n :type scatter:\n `tuple`\n :param text:\n Add a text overlay (as defined in `matplotlib.pyplot.text`).\n Expected syntax is ``(<SkyCoord>, <[text]>, <color>)``.\n Default is ``None`` (i.e., no text overplot).\n :type text:\n `tuple`\n :param contour:\n Add a contour plot (as defined in `matplotlib.pyplot.contour`).\n Expected syntax is ``(<numpy.ndarray>, <[levels]>, <colormap>)``.\n Default is ``None`` (i.e., no contour overplot).\n :type contour:\n `tuple`\n\n .. rubric:: Plotting layout keywords\n \n :param altaz_overlay:\n If set to ``True``, the horizontal coordinates grid is overplotted\n in addition to the equatorial one.\n Default is ``False``.\n :type altaz_overlay:\n `bool`\n :param cmap:\n Color map applied while representing the data (see \n `Matplotlib colormaps <https://matplotlib.org/stable/gallery/color/colormap_reference.html>`_).\n Default is ``\"YlGnBu_r\"``.\n :type cmap:\n `str`\n :param show_colorbar:\n Show or not the color bar.\n Default is ``True``.\n :type show_colorbar:\n `bool`\n :param colorbar_label:\n Set the label of the color bar.\n Default is ``\"\"``.\n :type colorbar_label:\n `str`\n :param figname:\n Name of the file (absolute or relative path) to save the figure.\n If set to ``\"return\"``, the method returns the `tuple` ``(fig, ax)``\n (as defined by `matplotlib <https://matplotlib.org/>`_).\n Default is ``None`` (i.e., only show the figure).\n :type figname:\n `str`\n :param figsize:\n Set the figure size.\n Default is ``(15, 10)``.\n :type figsize:\n `tuple`\n :param ticks_color:\n Set the color of the equatorial grid and the Right Ascension ticks.\n Default is ``\"0.9\"`` (grey).\n :type ticks_color:\n `str`\n :param title:\n Set the figure title.\n Default is ``\"<time>, <frequency>\"``.\n :type title:\n `str`\n \n \"\"\"\n # Parsing the keyword arguments\n resolution = kwargs.get(\"resolution\", 1*u.deg)\n figname = kwargs.get(\"figname\", None)\n cmap = kwargs.get(\"cmap\", \"YlGnBu_r\")\n figsize = kwargs.get(\"figsize\", (15, 10))\n center = kwargs.get(\"center\", SkyCoord(0*u.deg, 0*u.deg))\n radius = kwargs.get(\"radius\", None)\n ticks_color = kwargs.get(\"ticks_color\", \"0.9\")\n colorbar_label = kwargs.get(\"colorbar_label\", \"\")\n title = kwargs.get(\"title\", f\"{self.time.isot.split('.')[0]}, {self.frequency:.2f}\")\n visible_sky = kwargs.get(\"only_visible\", True)\n decibel = kwargs.get(\"decibel\", False)\n altaz_overlay = kwargs.get(\"altaz_overlay\", False)\n\n # Initialize figure\n wcs, shape = self._compute_wcs(\n center=center,\n resolution=getattr(self, \"resolution\", resolution),\n radius=radius\n )\n fig = plt.figure(figsize=figsize)\n ax = plt.subplot(\n projection=wcs,\n frame_class=EllipticalFrame\n )\n\n # Get the data projected on fullsky\n data = self._fullsky_projection(\n wcs=wcs,\n shape=shape,\n display_visible_sky=visible_sky\n )\n\n # Scale the data in decibel\n if decibel:\n data = 10 * np.log10(data)\n\n vmin = kwargs.get(\"vmin\", np.nanmin(data))\n vmax = kwargs.get(\"vmax\", np.nanmax(data))\n\n # Plot the data\n im = ax.imshow(\n data,\n origin=\"lower\",\n interpolation=\"quadric\",\n cmap=cmap,\n vmin=vmin,\n vmax=vmax\n )\n\n # Define ax ticks\n ax.coords.grid(color=ticks_color, alpha=0.5)\n path_effects=[patheffects.withStroke(linewidth=3, foreground='black')]\n\n ra_axis = ax.coords[0]\n dec_axis = ax.coords[1]\n ra_axis.set_ticks_visible(False)\n ra_axis.set_ticklabel_visible(True)\n ra_axis.set_ticklabel(color=ticks_color, exclude_overlapping=True, path_effects=path_effects)\n ra_axis.set_axislabel(\"RA\", color=ticks_color, path_effects=path_effects)\n ra_axis.set_major_formatter(\"d\")\n \n ra_axis.set_ticks(number=12)\n dec_axis.set_ticks_visible(False)\n dec_axis.set_ticklabel_visible(True)\n dec_axis.set_axislabel(\"Dec\", minpad=2)\n dec_axis.set_major_formatter(\"d\")\n dec_axis.set_ticks(number=10)\n\n if altaz_overlay:\n frame = AltAz(obstime=self.time, location=self.observer)\n overlay = ax.get_coords_overlay(frame)\n overlay.grid(color=\"tab:orange\", alpha=0.5)\n az_axis = overlay[0]\n alt_axis = overlay[1]\n az_axis.set_axislabel(\"Azimuth\", color=ticks_color, path_effects=path_effects)\n az_axis.set_ticks_visible(False)\n az_axis.set_ticklabel_visible(True)\n az_axis.set_ticklabel(color=ticks_color, path_effects=path_effects)\n az_axis.set_major_formatter(\"d\")\n az_axis.set_ticks(number=12)\n alt_axis.set_axislabel(\"Elevation\")\n alt_axis.set_ticks_visible(False)\n alt_axis.set_ticklabel_visible(True)\n alt_axis.set_major_formatter(\"d\")\n alt_axis.set_ticks(number=10)\n\n # Add NSEW points\n nesw_labels = np.array([\"N\", \"E\", \"S\", \"W\"])\n nesw = SkyCoord(\n np.array([0, 90, 180, 270]),\n np.array([0, 0, 0, 0]),\n unit=\"deg\",\n frame=frame\n ).transform_to(ICRS)\n for label, coord in zip(nesw_labels, nesw):\n ax.text(\n x=coord.ra.deg,\n y=coord.dec.deg,\n s=label,\n color=\"tab:orange\",\n transform=ax.get_transform(\"world\"),\n path_effects=path_effects,\n verticalalignment=\"center\",\n horizontalalignment=\"center\",\n clip_on=True\n )\n\n # Colorbar\n if kwargs.get(\"show_colorbar\", True):\n cax = inset_axes(\n ax,\n width='3%',\n height='100%',\n loc='lower left',\n bbox_to_anchor=(1.05, 0., 1, 1),\n bbox_transform=ax.transAxes,\n borderpad=0,\n )\n cb = ColorbarBase(\n cax,\n cmap=get_cmap(name=cmap),\n orientation='vertical',\n norm=Normalize(\n vmin=vmin,\n vmax=vmax\n ),\n ticks=LinearLocator()\n )\n cb.solids.set_edgecolor(\"face\")\n cb.set_label(colorbar_label)\n cb.formatter.set_powerlimits((0, 0))\n\n # Overplot\n # if kwargs.get(\"circle\", None) is not None:\n # from matplotlib.patches import Circle\n # frame = AltAz(obstime=self.time, location=self.observer)\n # c = Circle(\n # (0, 75),\n # 20,\n # edgecolor='yellow',\n # linewidth=5,\n # facecolor='none',\n # #transform=ax.get_transform('world')\n # #transform=ax.get_transform('fk5')\n # transform=ax.get_transform(frame)\n # )\n # ax.add_patch(c)\n if kwargs.get(\"moc\", None) is not None:\n # In order fo that to work; I had to comment #axis_viewport.set(ax, wcs)\n # from add_patches_to_mpl_axe() in mocpy/moc/plot/fill.py\n # OR re-set the limits (done here)\n try:\n frame = AltAz(obstime=self.time, location=self.observer)\n xlimits = ax.get_xlim()\n ylimits = ax.get_ylim()\n mocs = kwargs[\"moc\"] if isinstance(kwargs[\"moc\"], list) else [kwargs[\"moc\"]]\n for moc, color in zip(mocs, [\"tab:red\", \"tab:green\"]):\n moc.fill(\n ax=ax,\n wcs=wcs,\n alpha=0.5,\n fill=True,\n color=color,\n linewidth=0,\n )\n ax.set_xlim(xlimits)\n ax.set_ylim(ylimits)\n except AttributeError:\n log.warning(\"A 'MOC' object, generated from mocpy is expected.\")\n raise\n\n if kwargs.get(\"altaz_moc\", None) is not None:\n xlimits = ax.get_xlim()\n ylimits = ax.get_ylim()\n altaz = self.horizontal_coordinates\n mask = kwargs[\"altaz_moc\"].contains(altaz.az, altaz.alt)\n ax.scatter(\n x=self.coordinates[mask].ra.deg,\n y=self.coordinates[mask].dec.deg,\n s=0.1,#[marker_size]*coords.size,\n facecolor=\"red\",\n edgecolor=None,\n alpha=0.5,\n transform=ax.get_transform(\"world\")\n )\n ax.set_xlim(xlimits)\n ax.set_ylim(ylimits)\n\n if kwargs.get(\"scatter\", None) is not None:\n parameters = kwargs[\"scatter\"]\n if len(parameters) != 3:\n raise ValueError(\n \"'scatter' syntax should be: (<SkyCoord>, <size>, <color>)\"\n )\n coords = parameters[0]\n if coords.isscalar:\n coords = coords.reshape((1,))\n marker_size = parameters[1]\n marker_color = parameters[2]\n ax.scatter(\n x=coords.ra.deg,\n y=coords.dec.deg,\n s=[marker_size]*coords.size,\n color=marker_color,\n transform=ax.get_transform(\"world\")\n )\n\n if kwargs.get(\"text\", None) is not None:\n parameters = kwargs[\"text\"]\n if len(parameters) != 3:\n raise ValueError(\n \"'text' syntax should be: (<SkyCoord>, <[text]>, <color>)\"\n )\n coords = parameters[0]\n if coords.isscalar:\n coords = coords.reshape((1,))\n text = parameters[1]\n text_color = parameters[2]\n\n for i in range(coords.size):\n ax.text(\n x=coords[i].ra.deg,\n y=coords[i].dec.deg,\n s=text[i],\n color=text_color,\n transform=ax.get_transform(\"world\"),\n clip_on=True\n )\n\n if kwargs.get(\"contour\", None) is not None:\n parameters = kwargs[\"contour\"]\n data = parameters[0]\n if len(parameters) != 3:\n raise ValueError(\n \"'contour' syntax should be: (<numpy.ndarray>, <[levels]>, <colormap>)\"\n )\n contour, _ = reproject_from_healpix(\n (data, ICRS()),\n wcs,\n nested=False,\n shape_out=shape#(ndec, nra)\n )\n ax.contour(\n contour,\n levels=parameters[1],\n cmap=parameters[2],\n )\n\n # Other\n im.set_clip_path(ax.coords.frame.patch)\n ax.set_title(title, pad=20)\n\n # Save or show\n if figname is None:\n plt.show()\n elif figname.lower() == \"return\":\n return fig, ax\n else:\n fig.savefig(\n figname,\n dpi=300,\n transparent=True,\n bbox_inches='tight'\n )\n plt.close('all')\n# ============================================================= #\n# ============================================================= #\n\n\n# ============================================================= #\n# ------------------------- SkySlice -------------------------- #\n# ============================================================= #\nclass SkySlice(SkySliceBase):\n \"\"\" \"\"\"\n\n def __init__(self,\n coordinates: SkyCoord,\n frequency: u.Quantity,\n time: Time,\n polarization: Union[str, float, int],\n value: np.ndarray,\n observer: EarthLocation = nenufar_position\n ):\n super().__init__(\n coordinates=coordinates,\n time=time,\n frequency=frequency,\n polarization=polarization,\n observer=observer,\n value=value\n )\n\n\n # --------------------------------------------------------- #\n # ----------------------- Internal ------------------------ #\n @staticmethod\n def _compute_wcs(center: SkyCoord, resolution: u.Quantity, radius: u.Quantity):\n \"\"\" \"\"\"\n dangle = 0.675\n scale = int(dangle/resolution.to(u.deg).value)\n #scale = int(resolution.to(u.deg).value/dangle)\n scale = 1 if scale <= 1 else scale\n ra_dim = 480*scale\n dec_dim = 240*scale\n if radius is not None:\n resol = dangle/scale\n ra_dim = int(2 * radius.to(u.deg).value / resol)\n dec_dim = ra_dim\n #raauto = False\n wcs = WCS(naxis=2)\n wcs.wcs.crpix = [ra_dim/2 + 0.5, dec_dim/2 + 0.5]\n wcs.wcs.cdelt = np.array([-dangle/scale, dangle/scale])\n wcs.wcs.crval = [center.ra.deg, center.dec.deg]\n wcs.wcs.ctype = ['RA---AIT', 'DEC--AIT']\n\n return wcs, (ra_dim, dec_dim)\n\n\n def _fullsky_projection(self, wcs: WCS, shape: tuple, display_visible_sky: bool):\n \"\"\" \"\"\"\n x, y = wcs.world_to_pixel(self.coordinates)\n\n data = np.zeros(shape, dtype=np.float64)\n data[:, :] = np.nan\n weights = np.zeros(shape, dtype=int)\n\n x_int = np.floor(x).astype(int)\n x_in_image = (x_int >= 0) & (x_int < shape[0])\n y_int = np.floor(y).astype(int)\n y_in_image = (y_int >= 0) & (y_int < shape[1])\n in_image_mask = x_in_image & y_in_image\n x_int = x_int[in_image_mask]\n y_int = y_int[in_image_mask]\n\n values = copy.deepcopy(self.value)\n if display_visible_sky:\n values[~self.visible_sky] = np.nan\n values = values[in_image_mask]\n\n if isinstance(values, da.Array):\n with ProgressBar() if log.getEffectiveLevel() <= logging.INFO else DummyCtMgr():\n values = values.compute()\n\n data[(x_int, y_int)] = 0.\n np.add.at(weights, (x_int, y_int), 1)\n weights[weights<0.5] = 1.\n np.add.at(data, (x_int, y_int), values)\n data[(x_int, y_int)] /= weights[(x_int, y_int)]\n\n return data.T\n# ============================================================= #\n# ============================================================= #\n\n\n# ============================================================= #\n# ------------------------ HpxSkySlice ------------------------ #\n# ============================================================= #\nclass HpxSkySlice(SkySliceBase):\n \"\"\" \"\"\"\n\n def __init__(self,\n coordinates: SkyCoord,\n frequency: u.Quantity,\n time: Time,\n polarization: Union[str, float, int],\n value: np.ndarray,\n observer: EarthLocation = nenufar_position\n ):\n super().__init__(\n coordinates=coordinates,\n time=time,\n frequency=frequency,\n polarization=polarization,\n observer=observer,\n value=value\n )\n\n\n # --------------------------------------------------------- #\n # ----------------------- Internal ------------------------ #\n @staticmethod\n def _compute_wcs(center: SkyCoord, resolution: u.Quantity, radius: u.Quantity = None):\n \"\"\" \"\"\"\n dangle = 0.675\n scale = int(dangle/resolution.to(u.deg).value)\n scale = 1 if scale <= 1 else scale\n ra_dim = 480*scale\n dec_dim = 240*scale\n if radius is not None:\n resol = dangle/scale\n ra_dim = int(2 * radius.to(u.deg).value / resol)\n dec_dim = ra_dim\n #raauto = False\n wcs = WCS(naxis=2)\n wcs.wcs.crpix = [ra_dim/2 + 0.5, dec_dim/2 + 0.5]\n wcs.wcs.cdelt = np.array([-dangle/scale, dangle/scale])\n wcs.wcs.crval = [center.ra.deg, center.dec.deg]\n wcs.wcs.ctype = ['RA---AIT', 'DEC--AIT']\n\n return wcs, (dec_dim, ra_dim)\n\n\n def _fullsky_projection(self, wcs: WCS, shape: tuple, display_visible_sky: bool):\n \"\"\" \"\"\"\n values = copy.deepcopy(self.value)\n if display_visible_sky:\n values[~self.visible_sky] = np.nan\n \n if isinstance(values, da.Array):\n with ProgressBar() if log.getEffectiveLevel() <= logging.INFO else DummyCtMgr():\n values = values.compute()\n\n with np.errstate(invalid='ignore'):\n # Ignore the invalid value in bilinear_interpolation (astropy-healpix)\n array, _ = reproject_from_healpix(\n (values, ICRS()),\n wcs,\n nested=False,\n shape_out=shape\n )\n return array\n# ============================================================= #\n# ============================================================= #\n\n\n# ============================================================= #\n# ---------------------------- Sky ---------------------------- #\n# ============================================================= #\nclass Sky(AstroObject):\n \"\"\" \"\"\"\n\n def __init__(self,\n coordinates: SkyCoord,\n time: Time = Time.now(),\n frequency: u.Quantity = 50*u.MHz,\n polarization: np.ndarray = np.array([0]),\n value: Union[float, np.ndarray] = 0.,\n observer: EarthLocation = nenufar_position\n ):\n self.coordinates = coordinates\n self.time = time\n self.frequency = frequency\n self.polarization = polarization\n self.observer = observer\n self.value = value\n \n\n def __str__(self):\n text = (\n f\"{self.__class__} instance\\n\"\n f\"value: {self.shape}\\n\"\n f\"\\t* time: {self.time.shape}\\n\"\n f\"\\t* frequency: {self.frequency.shape}\\n\"\n f\"\\t* polarization: {self.polarization.shape}\\n\"\n f\"\\t* coordinates: {self.coordinates.shape}\\n\"\n )\n return text\n\n\n def __truediv__(self, other):\n if isinstance(other, Sky):\n self.value /= other.value\n else:\n self.value /= other\n return self\n \n\n def __mul__(self, other):\n new_sky = copy.copy(self)\n if isinstance(other, Sky):\n new_sky.value *= other.value\n else:\n new_sky.value *= other\n return new_sky\n\n\n def __getitem__(self, n):\n \"\"\" \"\"\"\n val = self.value[n]\n if val.ndim != 1:\n raise IndexError(\n \"<class 'HpxSky'>: wrong index selection on <arg \"\n f\"'value'> of shape {self.value.shape} (time, \"\n \"frequency, healpix_cells). A 1D array is \"\n \"expected as a result of the selection.\"\n )\n return SkySlice(\n coordinates=self.coordinates,\n value=val,\n time=self.time[n[0]],\n frequency=self.frequency[n[1]],\n polarization=self.polarization[n[2]],\n observer=self.observer\n )\n\n\n # --------------------------------------------------------- #\n # --------------------- Getter/Setter --------------------- #\n @property\n def value(self):\n \"\"\" \"\"\"\n return self._value\n @value.setter\n def value(self, v):\n expected_shape = (\n self.time.size,\n self.frequency.size,\n self.polarization.size,\n self.coordinates.size\n )\n\n if np.isscalar(v):\n v *= np.ones(expected_shape)\n else:\n if v.shape != expected_shape:\n raise ValueError(f\"Shape incorrect, expected {expected_shape}, got {v.shape}.\")\n \n if v.dtype < np.float64:\n v = v.astype(np.float64)\n\n self._value = v\n\n \n @property\n def time(self):\n \"\"\" \"\"\"\n return self._time\n @time.setter\n def time(self, t):\n if t.isscalar:\n t = t.reshape((1,))\n self._time = t\n\n\n @property\n def frequency(self):\n \"\"\" \"\"\"\n return self._frequency\n @frequency.setter\n def frequency(self, f):\n if f.isscalar:\n f = f.reshape((1,))\n self._frequency = f\n\n\n @property\n def polarization(self):\n \"\"\" \"\"\"\n return self._polarization\n @polarization.setter\n def polarization(self, p):\n if np.ndim(p) == 0:\n p = np.array([p])\n self._polarization = p\n\n\n @property\n def shape(self):\n \"\"\" \"\"\"\n return self.value.shape\n # @property\n # def visible_sky(self):\n # \"\"\" \"\"\"\n # altaz = self.horizontal_coordinates\n # return altaz.alt.deg > 0\n\n\n @property\n def visible_mask(self):\n \"\"\" \"\"\"\n mask = self.horizontal_coordinates.alt.deg > 0\n mask = np.expand_dims(mask, (1, 2)) # add frequency and polarization\n mask = np.repeat(mask, self.frequency.size, axis=1)\n mask = np.repeat(mask, self.polarization.size, axis=2)\n return mask\n\n # --------------------------------------------------------- #\n # ------------------------ Methods ------------------------ #\n def compute_lmn(self, phase_center: SkyCoord, coordinate_mask: np.ndarray = None):\n r\"\"\" (l, m, n) image domain coordinates computed from \n HEALPix equatorial coordinates (in Right-Ascension\n :math:`\\alpha` and Declination :math:`\\delta`, see\n :attr:`~nenupy.astro.sky.Sky.coordinates`) with\n respect to the ``phase_center`` (of equatorial \n coordinates :math:`\\alpha_0`, :math:`\\delta_0`).\n\n .. math::\n \\cases{\n l = \\cos(\\delta) \\sin( \\Delta \\alpha)\\\\\n m = \\sin(\\delta) \\cos(\\delta_0) - \\cos(\\delta) \\sin(\\delta_0) \\cos(\\Delta \\alpha)\\\\\n n = \\sqrt{ 1 - l^2 - m^2 }\n }\n\n where :math:`\\Delta \\alpha = \\alpha - \\alpha_0`.\n\n :param phase_center:\n Image phase center.\n :type phase_center:\n :class:`~astropy.coordinates.SkyCoord`\n :param coordinate_mask:\n Mask applied to coordinates before computing (l,m,n) values.\n :type coordinate_mask:\n :class:`~numpy.ndarray`\n\n :returns: (l, m, n)\n :rtype: `tuple` of 3 :class:`~numpy.ndarray`\n \"\"\"\n ra = self.coordinates[coordinate_mask].ra.rad\n dec = self.coordinates[coordinate_mask].dec.rad\n ra_0 = phase_center.ra.rad\n dec_0 = phase_center.dec.rad\n ra_delta = ra - ra_0\n # ra_delta = ra_0 - ra\n l = np.cos(dec)*np.sin(ra_delta)\n m = np.sin(dec)*np.cos(dec_0) -\\\n np.cos(dec)*np.sin(dec_0)*np.cos(ra_delta)\n n = np.sqrt(1 - l**2 - m**2)\n return l, m, n\n\n\n # --------------------------------------------------------- #\n # ----------------------- Internal ------------------------ #\n\n# ============================================================= #\n# ============================================================= #\n\n\n# ============================================================= #\n# -------------------------- HpxSky --------------------------- #\n# ============================================================= #\nclass HpxSky(Sky):\n \"\"\" \"\"\"\n\n def __init__(self,\n resolution: u.Quantity = 1*u.deg,\n time: Time = Time.now(),\n frequency: u.Quantity = 50*u.MHz,\n polarization: np.ndarray = np.array([0]),\n value: Union[float, np.ndarray] = 0.,\n observer: EarthLocation = nenufar_position\n ):\n\n if hpx is None:\n log.error(\n f\"Unable to create an instance of {self.__qualname__} since 'healpy' does not work.\"\n )\n\n self.nside, self.resolution = self._resol2nside(resolution=resolution)\n\n # Construct the Healpix coordinates map\n ra, dec = hpx.pix2ang(\n nside=self.nside,\n ipix=np.arange(\n hpx.nside2npix(self.nside),\n dtype=np.int64\n ),\n lonlat=True,\n nest=False\n )\n\n super().__init__(\n coordinates=SkyCoord(ra, dec, unit=\"deg\"),\n time=time,\n frequency=frequency,\n polarization=polarization,\n value=value,\n observer=observer\n )\n\n\n def __getitem__(self, n):\n \"\"\" \"\"\"\n val = self.value[n]\n if val.ndim != 1:\n raise IndexError(\n \"<class 'HpxSky'>: wrong index selection on <arg \"\n f\"'value'> of shape {self.value.shape} (time, \"\n \"frequency, healpix_cells). A 1D array is \"\n \"expected as a result of the selection.\"\n )\n return HpxSkySlice(\n coordinates=self.coordinates,\n value=val,\n time=self.time[n[0]],\n frequency=self.frequency[n[1]],\n polarization=self.polarization[n[2]],\n observer=self.observer\n )\n\n\n # --------------------------------------------------------- #\n # ------------------------ Methods ------------------------ #\n @classmethod\n def shaped_like(cls, other):\n \"\"\" \"\"\"\n if not isinstance(other, HpxSky):\n raise TypeError(\n f\"{cls.__class__} instance expected.\"\n )\n return cls(\n resolution=other.resolution,\n time=other.time,\n frequency=other.frequency,\n polarization=other.polarization,\n observer=other.observer\n )\n\n\n # --------------------------------------------------------- #\n # ----------------------- Internal ------------------------ #\n @staticmethod\n def _resol2nside(resolution: u.Quantity):\n \"\"\" Returns the HEALPix nside and effective resolution. \"\"\"\n \n # Get all nsides for all HEALPix oders\n healpix_nsides = hpx.order2nside(np.arange(30))\n\n # Convert them into angular resolutions\n available_resolutions = hpx.nside2resol(\n healpix_nsides,\n arcmin=True\n )*u.arcmin\n\n # Find the index of the closest matching HEALPix resolution\n order_index = np.argmin(\n np.abs(available_resolutions - resolution)\n )\n\n # Retrieve the corresponding nside and reoslution\n nside = healpix_nsides[order_index]\n effective_resolution = available_resolutions[order_index]\n\n return nside, effective_resolution\n# ============================================================= #\n# ============================================================= #\n"
] |
[
[
"numpy.arange",
"numpy.array",
"numpy.isin"
],
[
"numpy.nanmax",
"numpy.expand_dims",
"numpy.sqrt",
"numpy.nanmin",
"numpy.arange",
"numpy.sin",
"matplotlib.ticker.LinearLocator",
"matplotlib.pyplot.subplot",
"matplotlib.patheffects.withStroke",
"matplotlib.pyplot.close",
"numpy.repeat",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.ndim",
"numpy.log10",
"numpy.floor",
"numpy.errstate",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.add.at",
"numpy.abs",
"numpy.cos",
"matplotlib.colors.Normalize",
"numpy.ones",
"numpy.isscalar",
"matplotlib.cm.get_cmap"
]
] |
dimaclimate/geocat-comp
|
[
"dcb55e22d69d96762b683652cf83f6b9ef4fcc38"
] |
[
"src/geocat/comp/eofunc.py"
] |
[
"import warnings\nfrom typing import Iterable\n\nimport numpy as np\nimport xarray as xr\nfrom eofs.xarray import Eof\n\n\ndef _generate_eofs_solver(data, time_dim=0, weights=None, center=True, ddof=1):\n \"\"\"Convenience function to be used in both `eofunc_eofs` and `eofunc_pcs`\n functions.\"\"\"\n\n # ''' Start of boilerplate\n if not isinstance(data, xr.DataArray):\n\n data = np.asarray(data)\n\n if (time_dim >= data.ndim) or (time_dim < -data.ndim):\n raise ValueError(\"ERROR eofunc_efs: `time_dim` out of bound.\")\n\n # Transpose data if time_dim is not 0 (i.e. the first/left-most dimension)\n dims_to_transpose = np.arange(data.ndim).tolist()\n dims_to_transpose.insert(\n 0, dims_to_transpose.pop(dims_to_transpose.index(time_dim)))\n data = np.transpose(data, axes=dims_to_transpose)\n\n dims = [f\"dim_{i}\" for i in range(data.ndim)]\n dims[0] = 'time'\n\n data = xr.DataArray(\n data,\n dims=dims,\n )\n\n solver = Eof(data, weights=weights, center=center, ddof=ddof)\n\n return data, solver\n\n\ndef eofunc_eofs(data,\n neofs=1,\n time_dim=0,\n eofscaling=0,\n weights=None,\n center=True,\n ddof=1,\n vfscaled=False,\n meta=False):\n \"\"\"Computes empirical orthogonal functions (EOFs, aka: Principal Component\n Analysis).\n\n Note: `eofunc_eofs` allows to perform the EOF analysis that was previously done via the NCL function `eofunc`.\n However, there are a few changes to the NCL flow such as : (1) Only `np.nan` is supported as missing value,\n (2) EOFs are computed only from covariance matrix and there is no support for computation from correlation matrix,\n (3) percentage of non-missing points that must exist at any single point is no longer an input.\n\n This implementation uses `eofs` package (https://anaconda.org/conda-forge/eofs), which is built upon the\n following study: Dawson, Andrew, \"eofs: A library for EOF analysis of meteorological, oceanographic, and\n climate data,\" Journal of Open Research Software, vol. 4, no. 1, 2016. Further information about this\n package can be found at: https://ajdawson.github.io/eofs/latest/index.html#\n\n This implementation provides a few conveniences to the user on top of `eofs` package that are described below\n in the Parameters section.\n\n Parameters\n ----------\n data : (:class:`xarray.DataArray` or :class:`numpy.ndarray` or :class:`list`)\n Should contain numbers or `np.nan` for missing value representation. It must be at least a 2-dimensional array.\n\n When input data is of type `xarray.DataArray`, `eofs.xarray` interface assumes the left-most dimension\n (i.e. `dim_0`) is the `time` dimension. In this case, that dimension should have the name \"time\".\n\n When input data is of type `numpy.ndarray` or `list`, this function still assumes the leftmost dimension\n to be the number of observations or `time` dimension: however, in this case, user is allowed to input otherwise.\n If the input do not have its leftmost dimension as the `time` or number of observations, then the user should\n specify with `time_dim=x` to define which dimension must be treated as time or number of observations\n\n neofs : (:class:`int`, Optional)\n A scalar integer that specifies the number of empirical orthogonal functions (i.e. eigenvalues and\n eigenvectors) to be returned. This is usually less than or equal to the minimum number of observations or\n number of variables. Defaults to 1.\n\n time_dim : (:class:`int`, Optional)\n An integer defining the time dimension if it is not the leftmost dimension. When input data is of type\n `xarray.DataArray`, this is ignored (assuming `xarray.DataArray` has its leftmost dimension with the exact\n name 'time'). It must be between ``0`` and ``data.ndim - 1`` or it could be ``-1`` indicating the last\n dimension. Defaults to 0.\n\n Note: The `time_dim` argument allows to perform the EOF analysis that was previously done via the NCL\n function `eofunc_n`.\n\n eofscaling : (:class:`int`, Optional)\n (From `eofs` package): Sets the scaling of the EOFs. The following values are accepted:\n\n - 0 : Un-scaled EOFs (default).\n - 1 : EOFs are divided by the square-root of their eigenvalues.\n - 2 : EOFs are multiplied by the square-root of their eigenvalues.\n\n weights : (:class:`array_like`, Optional)\n (From `eofs` package): An array of weights whose shape is compatible with those of the input array dataset.\n The weights can have the same shape as dataset or a shape compatible with an array broadcast (i.e., the shape\n of the weights can can match the rightmost parts of the shape of the input array dataset). If the input array\n dataset does not require weighting then the value None may be used. Defaults to None (no weighting).\n\n center : (:class:`bool`, Optional)\n (From `eofs` package): If True, the mean along the first axis of dataset (the time-mean) will be removed prior\n to analysis. If False, the mean along the first axis will not be removed. Defaults to True (mean is removed).\n\n The covariance interpretation relies on the input data being anomaly data with a time-mean of 0. Therefore this\n option should usually be set to True. Setting this option to True has the useful side effect of propagating\n missing values along the time dimension, ensuring that a solution can be found even if missing values occur\n in different locations at different times.\n\n ddof : (:class:`int`, Optional)\n (From `eofs` package): ‘Delta degrees of freedom’. The divisor used to normalize the covariance matrix is\n N - ddof where N is the number of samples. Defaults to 1.\n\n vfscaled : (:class:`bool`, Optional)\n (From `eofs` package): If True, scale the errors by the sum of the eigenvalues. This yields typical errors\n with the same scale as the values returned by Eof.varianceFraction. If False then no scaling is done.\n Defaults to False.\n\n meta : (:class:`bool`, Optional)\n If set to True and the input array is an Xarray, the metadata from the input array will be copied to the\n output array. Defaults to False.\n\n Returns\n -------\n A multi-dimensional array containing EOFs. The returned array will be of the same size as data with the\n leftmost dimension removed and an additional dimension of the size `neofs` added.\n\n The return variable will have associated with it the following attributes:\n\n eigenvalues:\n A one-dimensional array of size `neofs` that contains the eigenvalues associated with each EOF.\n\n northTest:\n (From `eofs` package): Typical errors for eigenvalues.\n\n The method of North et al. (1982) is used to compute the typical error for each eigenvalue. It is\n assumed that the number of times in the input data set is the same as the number of independent\n realizations. If this assumption is not valid then the result may be inappropriate.\n\n Note: The `northTest` attribute allows to perform the error analysis that was previously done via the NCL\n function `eofunc_north`.\n\n totalAnomalyVariance:\n (From `eofs` package): Total variance associated with the field of anomalies (the sum of the eigenvalues).\n\n varianceFraction:\n (From `eofs` package): Fractional EOF mode variances.\n\n The fraction of the total variance explained by each EOF mode, values between 0 and 1 inclusive..\n \"\"\"\n\n data, solver = _generate_eofs_solver(data,\n time_dim=time_dim,\n weights=weights,\n center=center,\n ddof=ddof)\n\n # Checking number of EOFs\n if neofs <= 0:\n raise ValueError(\n \"ERROR eofunc_eofs: num_eofs must be a positive non-zero integer value.\"\n )\n\n eofs = solver.eofs(neofs=neofs, eofscaling=eofscaling)\n\n # Populate attributes for output\n attrs = {}\n\n if meta:\n attrs = data.attrs\n\n attrs['eigenvalues'] = solver.eigenvalues(neigs=neofs)\n attrs['northTest'] = solver.northTest(neigs=neofs, vfscaled=vfscaled)\n attrs['totalAnomalyVariance'] = solver.totalAnomalyVariance()\n attrs['varianceFraction'] = solver.varianceFraction(neigs=neofs)\n\n if meta:\n dims = [\"eof\"\n ] + [data.dims[i] for i in range(data.ndim) if i != time_dim]\n coords = {\n k: v for (k, v) in data.coords.items() if k != data.dims[time_dim]\n }\n else:\n dims = [\"eof\"] + [f\"dim_{i}\" for i in range(data.ndim) if i != time_dim]\n coords = {}\n\n return xr.DataArray(eofs, attrs=attrs, dims=dims, coords=coords)\n\n\ndef eofunc_pcs(data,\n npcs=1,\n time_dim=0,\n pcscaling=0,\n weights=None,\n center=True,\n ddof=1,\n meta=False):\n \"\"\"Computes the principal components (time projection) in the empirical\n orthogonal function analysis.\n\n Note: `eofunc_pcs` allows to perform the analysis that was previously done via the NCL function `eofunc_ts`.\n However, there are a few changes to the NCL flow such as : (1) Only `np.nan` is supported as missing value,\n (2) EOFs are computed only from covariance matrix and there is no support for computation from correlation matrix,\n (3) percentage of non-missing points that must exist at any single point is no longer an input.\n\n This implementation uses `eofs` package (https://anaconda.org/conda-forge/eofs), which is built upon the\n following study: Dawson, Andrew, \"eofs: A library for EOF analysis of meteorological, oceanographic, and\n climate data,\" Journal of Open Research Software, vol. 4, no. 1, 2016. Further information about this\n package can be found at: https://ajdawson.github.io/eofs/latest/index.html#\n\n This implementation provides a few conveniences to the user on top of `eofs` package that are described below\n in the Parameters section.\n\n Parameters\n ----------\n data : :class:`xarray.DataArray` or :class:`numpy.ndarray` or :class:`list`\n Should contain numbers or `np.nan` for missing value representation. It must be at least a 2-dimensional array.\n\n When input data is of type `xarray.DataArray`, `eofs.xarray` interface assumes the left-most dimension\n (i.e. `dim_0`) is the `time` dimension. In this case, that dimension should have the name \"time\".\n\n When input data is of type `numpy.ndarray` or `list`, this function still assumes the leftmost dimension\n to be the number of observations or `time` dimension: however, in this case, user is allowed to input otherwise.\n If the input do not have its leftmost dimension as the `time` or number of observations, then the user should\n specify with `time_dim=x` to define which dimension must be treated as time or number of observations\n\n npcs : (:class:`int`, Optional)\n A scalar integer that specifies the number of principal components (i.e. eigenvalues and eigenvectors) to be\n returned. This is usually less than or equal to the minimum number of observations or number of variables.\n Defaults to 1.\n\n time_dim : (:class:`int`, Optional)\n An integer defining the time dimension if it is not the leftmost dimension. When input data is of type\n `xarray.DataArray`, this is ignored (assuming `xarray.DataArray` has its leftmost dimension with the exact\n name 'time'). It must be between ``0`` and ``data.ndim - 1`` or it could be ``-1`` indicating the last\n dimension. Defaults to 0.\n\n Note: The `time_dim` argument allows to perform the EOF analysis that was previously done via the NCL\n function `eofunc_ts_n`.\n\n pcscaling : (:class:`int`, Optional)\n (From `eofs` package): Sets the scaling of the retrieved PCs. The following values are accepted:\n - 0 : Un-scaled PCs (default).\n - 1 : PCs are divided by the square-root of their eigenvalues.\n - 2 : PCs are multiplied by the square-root of their eigenvalues.\n\n weights : (:class:`array_like`, Optional)\n (From `eofs` package): An array of weights whose shape is compatible with those of the input array dataset.\n The weights can have the same shape as dataset or a shape compatible with an array broadcast (i.e., the shape\n of the weights can can match the rightmost parts of the shape of the input array dataset). If the input array\n dataset does not require weighting then the value None may be used. Defaults to None (no weighting).\n\n center : (:class:`bool`, Optional)\n (From `eofs` package): If True, the mean along the first axis of dataset (the time-mean) will be removed prior\n to analysis. If False, the mean along the first axis will not be removed. Defaults to True (mean is removed).\n\n The covariance interpretation relies on the input data being anomaly data with a time-mean of 0. Therefore this\n option should usually be set to True. Setting this option to True has the useful side effect of propagating\n missing values along the time dimension, ensuring that a solution can be found even if missing values occur\n in different locations at different times.\n\n ddof : (:class:`int`, Optional)\n (From `eofs` package): ‘Delta degrees of freedom’. The divisor used to normalize the covariance matrix is\n N - ddof where N is the number of samples. Defaults to 1.\n\n meta : (:class:`bool`, Optional)\n If set to True and the input array is an Xarray, the metadata from the input array will be copied to the\n output array. Defaults to False.\n\n Returns\n -------\n \"\"\"\n\n data, solver = _generate_eofs_solver(data,\n time_dim=time_dim,\n weights=weights,\n center=center,\n ddof=ddof)\n\n # Checking number of EOFs\n if npcs <= 0:\n raise ValueError(\n \"ERROR eofunc_pcs: num_pcs must be a positive non-zero integer value.\"\n )\n\n solver = Eof(data, weights=weights, center=center, ddof=ddof)\n\n pcs = solver.pcs(npcs=npcs, pcscaling=pcscaling)\n pcs = pcs.transpose()\n\n # Populate attributes for output\n attrs = {}\n\n if meta:\n attrs = data.attrs\n\n dims = [\"pc\", \"time\"]\n if meta:\n coords = {\"time\": data.coords[data.dims[time_dim]]}\n else:\n coords = {}\n\n return xr.DataArray(pcs, attrs=attrs, dims=dims, coords=coords)\n\n\n# Transparent wrappers for geocat.comp backwards compatibility\n\n\ndef eofunc(data: Iterable, neval, **kwargs) -> xr.DataArray:\n warnings.warn(\n \"eofunc will be deprecated soon in a future version and may not currently generate proper results for some of \"\n \"its arguments including `pcrit`, `jopt=\"\n \"correlation\"\n \"`, and 'missing_value' other than np.nan. The output \"\n \" and its attributes may thus not be as expected, too. Use `eofunc_eofs` instead.\",\n PendingDeprecationWarning)\n\n if not isinstance(data, xr.DataArray) or not isinstance(data, np.ndarray):\n data = np.asarray(data)\n\n time_dim = int(kwargs.get(\"time_dim\", data.ndim - 1))\n meta = bool(kwargs.get(\"meta\"))\n\n return eofunc_eofs(data, neofs=neval, time_dim=time_dim, meta=meta)\n\n\ndef eofunc_ts(data: Iterable, evec, **kwargs) -> xr.DataArray:\n warnings.warn(\n \"eofunc_ts will be deprecated soon in a future version and may not currently generate proper results for \"\n \"some of its arguments including `evec`, `jopt=\"\n \"correlation\"\n \"`, and 'missing_value' other than np.nan. The output \"\n \" and its attributes may thus not be as expected, too. Use `eofunc_pcs` instead.\",\n PendingDeprecationWarning)\n\n if not isinstance(data, xr.DataArray) or not isinstance(data, np.ndarray):\n data = np.asarray(data)\n\n time_dim = int(kwargs.get(\"time_dim\", data.ndim - 1))\n meta = bool(kwargs.get(\"meta\"))\n\n return eofunc_pcs(data, npcs=evec.shape[0], time_dim=time_dim, meta=meta)\n"
] |
[
[
"numpy.asarray",
"numpy.arange",
"numpy.transpose"
]
] |
Vottivott/mildnet
|
[
"4787dc756f1abcab4f7cb57ffb8701dcaf27edba"
] |
[
"trainer/image/directory_iterator.py"
] |
[
"import logging\nimport multiprocessing\nimport os\nfrom functools import partial\n\nimport numpy as np\n\nfrom trainer.image.iterator import MildBatchFromFilesMixin, MildIterator\n\n\ndef _count_valid_files_in_directory(directory, white_list_formats, follow_links):\n \"\"\"Count files with extension in `white_list_formats` contained in a directory.\n # Arguments\n directory: absolute path to the directory containing files to be counted\n white_list_formats: set of strings containing allowed extensions for\n the files to be counted.\n # Returns\n the count of files with extension in `white_list_formats` contained in\n the directory.\n \"\"\"\n\n def _recursive_list(subpath):\n return sorted(os.walk(subpath))\n # return sorted(os.walk(subpath, followlinks=follow_links), key=lambda tpl: tpl[0])\n\n samples = 0\n for root, _, files in _recursive_list(directory):\n for fname in files:\n is_valid = False\n for extension in white_list_formats:\n if fname.lower().endswith('.' + extension):\n is_valid = True\n break\n if is_valid:\n samples += 1\n return samples\n\n\ndef _list_valid_filenames_in_directory(directory, white_list_formats,\n class_indices, follow_links, triplet_path):\n \"\"\"List paths of files in `subdir` relative from `directory` whose extensions are in `white_list_formats`.\n # Arguments\n directory: absolute path to a directory containing the files to list.\n The directory name is used as class label and must be a key of `class_indices`.\n white_list_formats: set of strings containing allowed extensions for\n the files to be counted.\n class_indices: dictionary mapping a class name to its index.\n # Returns\n classes: a list of class indices\n filenames: the path of valid files in `directory`, relative from\n `directory`'s parent (e.g., if `directory` is \"dataset/class1\",\n the filenames will be [\"class1/file1.jpg\", \"class1/file2.jpg\", ...]).\n \"\"\"\n\n def _recursive_list(subpath):\n f = open(triplet_path)\n f_read = f.read()\n triplets = f_read.split('\\n')\n f.close()\n filenames = []\n for triplet in triplets:\n triplet = triplet.split(',')\n for i in range(len(triplet)): # HANNES FIX\n triplet[i] = triplet[i].replace('\\r','')\n if len(triplet) != 3:\n continue\n filenames += triplet\n to_return_tuple = tuple()\n to_return_tuple = (os.path.abspath(subpath), [], filenames,)\n return [to_return_tuple]\n\n classes = []\n filenames = []\n subdir = os.path.basename(directory)\n basedir = os.path.dirname(directory)\n debug = subdir == \"q\" and False\n if debug:\n print(\"directory: \" + directory)\n print(\"subdir: \" + subdir)\n print(\"basedir: \" + basedir)\n for root, _, files in _recursive_list(directory):\n if debug:\n print(files)\n for i,fname in enumerate(files):\n if debug:\n print(i)\n print(\"- %s\" % fname)\n is_valid = False\n for extension in white_list_formats:\n if fname.lower().endswith('.' + extension):\n is_valid = True\n break\n if is_valid:\n classes.append(class_indices[subdir])\n filenames.append(fname)\n else:\n logging.warning(fname + \" is not valid\")\n if debug:\n print(i)\n print(\"-- %s\" % fname)\n print(\" is not valid!\")\n return [\"q\", \"p\", \"n\"], filenames\n\n\nclass MildDirectoryIterator(MildBatchFromFilesMixin, MildIterator):\n \"\"\"Iterator capable of reading images from a directory on disk.\n # Arguments\n directory: Path to the directory to read images from.\n Each subdirectory in this directory will be\n considered to contain images from one class,\n or alternatively you could specify class subdirectories\n via the `classes` argument.\n image_data_generator: Instance of `ImageDataGenerator`\n to use for random transformations and normalization.\n target_size: tuple of integers, dimensions to resize input images to.\n color_mode: One of `\"rgb\"`, `\"grayscale\"`. Color mode to read images.\n classes: Optional list of strings, names of subdirectories\n containing images from each class (e.g. `[\"dogs\", \"cats\"]`).\n It will be computed automatically if not set.\n class_mode: Mode for yielding the targets:\n `\"binary\"`: binary targets (if there are only two classes),\n `\"categorical\"`: categorical targets,\n `\"sparse\"`: integer targets,\n `\"input\"`: targets are images identical to input images (mainly\n used to work with autoencoders),\n `None`: no targets get yielded (only input images are yielded).\n batch_size: Integer, size of a batch.\n shuffle: Boolean, whether to shuffle the data between epochs.\n seed: Random seed for data shuffling.\n data_format: String, one of `channels_first`, `channels_last`.\n save_to_dir: Optional directory where to save the pictures\n being yielded, in a viewable format. This is useful\n for visualizing the random transformations being\n applied, for debugging purposes.\n save_prefix: String prefix to use for saving sample\n images (if `save_to_dir` is set).\n save_format: Format to use for saving sample images\n (if `save_to_dir` is set).\n \"\"\"\n allowed_class_modes = {'categorical', 'binary', 'sparse', 'input', None}\n\n def __init__(self,\n directory,\n image_data_generator,\n target_size=(256, 256),\n color_mode='rgb',\n class_mode='categorical',\n batch_size=32,\n shuffle=True,\n seed=None,\n data_format='channel_last',\n save_to_dir=None,\n save_prefix='',\n save_format='png',\n follow_links=False,\n subset=None,\n interpolation='nearest',\n dtype='float32'):\n super(MildDirectoryIterator, self).set_processing_attrs(image_data_generator,\n target_size,\n color_mode,\n data_format,\n save_to_dir,\n save_prefix,\n save_format,\n subset,\n interpolation)\n self.directory = directory\n if class_mode not in self.allowed_class_modes:\n raise ValueError('Invalid class_mode:', class_mode,\n '; expected one of \"categorical\", '\n '\"binary\", \"sparse\", \"input\"'\n ' or None.')\n\n self.class_mode = class_mode\n self.dtype = dtype\n # first, count the number of samples and classes\n self.samples = 0\n\n classes = [\"q\", \"p\", \"n\"]\n self.num_class = len(classes)\n self.class_indices = dict(zip(classes, range(len(classes))))\n\n pool = multiprocessing.pool.ThreadPool()\n\n white_list_formats = {'png', 'jpg', 'jpeg', 'bmp', 'ppm'}\n\n function_partial = partial(_count_valid_files_in_directory,\n white_list_formats=white_list_formats,\n follow_links=follow_links)\n self.samples = sum(pool.map(function_partial,\n (os.path.join(directory, subdir)\n for subdir in classes)))\n\n # second, build an index of the images in the different class subfolders\n results = []\n\n self.filenames = []\n self.classes = np.zeros((batch_size,), dtype='int32')\n i = 0\n for dirpath in (os.path.join(directory, subdir) for subdir in classes):\n results.append(pool.apply_async(_list_valid_filenames_in_directory,\n (dirpath, white_list_formats,\n self.class_indices, follow_links, self.image_data_generator.triplet_path)))\n for res in results:\n classes, filenames = res.get()\n self.filenames += filenames\n i += len(classes)\n pool.close()\n pool.join()\n\n self._filepaths = [\n os.path.join(self.directory, fname) for fname in self.filenames\n ]\n super(MildDirectoryIterator, self).__init__(batch_size,\n shuffle,\n seed,\n self.image_data_generator.triplet_path)\n @property\n def filepaths(self):\n return self._filepaths\n\n @property\n def labels(self):\n return self.classes\n\n @property # mixin needs this property to work\n def sample_weight(self):\n # no sample weights will be returned\n return None"
] |
[
[
"numpy.zeros"
]
] |
gbenznyc/neural-network
|
[
"84a7127cf5107facce0a5504f299e84135aa199f"
] |
[
"perceptron.py"
] |
[
"import numpy as np\n\nclass Perceptron:\n\tdef __init__(self, input_nodes, learning_rate, epochs=50, one_output=False):\n\t\tself.input_nodes = input_nodes\n\t\tself.learning_rate = learning_rate\n\t\tself.epochs = epochs\n\t\tself.one_output = one_output\n\n\t\t#Initialize weights, we add one to the input_nodes\n\t\t#to account for the extra bias node we will be including\n\t\tself.weights = np.random.uniform(low=-0.15, high=0.15, size=(input_nodes + 1,))\n\n\tdef activation_function(self, x):\n\t\t#We will be using a sigmoid function as our activation_function\n\t\treturn 1 / (1 + np.exp(-x + 0.5))\n\n\tdef predict(self, inputs):\n\t\tnumpy_inputs = np.asarray(inputs)\n\n\t\t#Add the bias node to the input layer\n\t\tnumpy_inputs = np.insert(numpy_inputs, 0, 1)\n\n\t\t#Calculate the summation of all of the inputs with the weights\n\t\tweighted_sum = np.dot(numpy_inputs, self.weights)\n\n\t\t#Return the weighted_sum passed through the activation function\n\t\tif self.one_output:\n\t\t\t#If we only have one output node multiply value by 10\n\t\t\tsigmoid_val = self.activation_function(weighted_sum)\n\t\t\tif sigmoid_val <= 0.1:\n\t\t\t\treturn 0, sigmoid_val\n\t\t\telif sigmoid_val <= 0.2:\n\t\t\t\treturn 1, sigmoid_val\n\t\t\telif sigmoid_val <= 0.3:\n\t\t\t\treturn 2, sigmoid_val\n\t\t\telif sigmoid_val <= 0.4:\n\t\t\t\treturn 3, sigmoid_val\n\t\t\telif sigmoid_val <= 0.5:\n\t\t\t\treturn 4, sigmoid_val\n\t\t\telif sigmoid_val <= 0.6:\n\t\t\t\treturn 5, sigmoid_val\n\t\t\telif sigmoid_val <= 0.7:\n\t\t\t\treturn 6, sigmoid_val\n\t\t\telif sigmoid_val <= 0.8:\n\t\t\t\treturn 7, sigmoid_val\n\t\t\telif sigmoid_val <= 0.9:\n\t\t\t\treturn 8, sigmoid_val\n\t\t\telse:\n\t\t\t\treturn 9, sigmoid_val\n\t\telse:\n\t\t\t#If we have 10 output nodes, keep the value between 0 and 1\n\t\t\treturn self.activation_function(weighted_sum)\n\n\n\tdef learn(self, training_data, testing_data=None, output=0, error=0):\n\t\t#Iterate through amount of epochs individually\n\t\tif self.one_output:\n\t\t\tfor i in range(0, self.epochs):\n\t\t\t\tprint(\"Current epoch: \" + str(i))\n\t\t\t\tfor data in training_data:\n\t\t\t\t\t#Predict a result based on some data\n\t\t\t\t\tprediction, output = self.predict(data[\"data\"])\n\n\t\t\t\t\t#Find the difference between the prediction and the actual label\n\t\t\t\t\terror = (data[\"answer\"] - (output * 10.0))/10\n\n\t\t\t\t\t#Update the weight of the bias node\n\t\t\t\t\tself.weights[0] += error * self.learning_rate\n\n\t\t\t\t\t#Update the weight of all other inputs\n\t\t\t\t\tself.weights[1:] += error * self.learning_rate * np.asarray(data[\"data\"])\n\t\t\t\t\t\n\t\t\t\t#Testing each epoch\n\t\t\t\tcorrect = 0\n\t\t\t\tfor data in testing_data:\n\t\t\t\t\tif self.predict(data[\"data\"])[0] == data[\"answer\"]:\n\t\t\t\t\t\tcorrect += 1\n\n\t\t\t\tprint(\"Accuracy: \" + str(correct/len(testing_data)))\n\t\t\n\t\t#If the perceptron is part of a group of others\n\t\telse:\n\t\t\t#Update the weight of the bias node\n\t\t\tself.weights[0] += error * self.learning_rate \n\n\t\t\t#Update the weight of all other inputs\n\t\t\tself.weights[1:] += error * self.learning_rate * np.asarray(training_data[\"data\"]) \n"
] |
[
[
"numpy.dot",
"numpy.asarray",
"numpy.insert",
"numpy.random.uniform",
"numpy.exp"
]
] |
JochenZoellner/tf_neiss-1
|
[
"c91019e5bce6d3c7512237eec5ea997fd95304ac"
] |
[
"input_fn/input_fn_2d/data_gen_2dt/data_gen_t2d_util/triangle_2d_helper.py"
] |
[
"import logging\n\nimport numpy as np\n\n\nclass Fcalculator:\n def __init__(self, p1=np.array([0.0, 0.0]), p2=np.array([1.0, 0.0]), p3=np.array([0.0, 1.0]),\n epsilon=np.array(0.0001), no_check=False, complex_phi=False):\n self._p1 = np.array(p1, dtype=np.float128)\n self._p2 = np.array(p2, dtype=np.float128)\n self._p3 = np.array(p3, dtype=np.float128)\n if not no_check: # skip check if valid input is ensured for better performance\n assert np.sum(np.square(np.abs(self._p1 - self._p2))) > (10 * epsilon) ** 2\n assert np.sum(np.square(np.abs(self._p2 - self._p3))) > (10 * epsilon) ** 2\n assert np.sum(np.square(np.abs(self._p3 - self._p1))) > (10 * epsilon) ** 2\n\n self._jacobi_det = np.abs((self._p2[0] - self._p1[0]) * (self._p3[1] - self._p1[1]) -\n (self._p3[0] - self._p1[0]) * (self._p2[1] - self._p1[1]))\n self._epsilon = np.array(epsilon, dtype=np.float128)\n self._complex_phi = complex_phi\n\n @staticmethod\n def _case1(a, b):\n logging.info(\"case1, a!=b, a!=0, b!=0\")\n with np.errstate(divide='ignore', invalid='ignore'): # nan values not used by np.where-condition\n return 1.0 / (a * b * (b - a)) * (b * (np.exp(1.0j * a) - 1.0) - a * (np.exp(1.0j * b) - 1.0))\n\n @staticmethod\n def _case2(b):\n logging.info(\"case2, a!=b, a=0, b!=0\")\n with np.errstate(divide='ignore', invalid='ignore'): # nan values not used by np.where-condition\n return 1.0j / b - 1 / b ** 2 * (np.exp(1.0j * b) - 1.0)\n\n @staticmethod\n def _case3(a):\n logging.info(\"case3, a!=b, b=0, a!=0\")\n with np.errstate(divide='ignore', invalid='ignore'): # nan values not used by np.where-condition\n return 1.0j / a - 1 / a ** 2 * (np.exp(1.0j * a) - 1.0)\n\n @staticmethod\n def _case5(a):\n logging.info(\"case5, a=b, b!=0, a!=0\")\n with np.errstate(divide='ignore', invalid='ignore'): # nan values not used by np.where-condition\n return np.exp(1.0j * a) / (1.0j * a) + (np.exp(1.0j * a) - 1.0) / a ** 2\n\n def set_triangle(self, p1=np.array([0.0, 0.0], dtype=np.float128), p2=np.array([1.0, 0.0], dtype=np.float128),\n p3=np.array([0.0, 1.0], dtype=np.float128), no_check=False):\n self.__init__(p1, p2, p3, self._epsilon, no_check)\n\n def call_on_array(self, phi_array):\n if not self._complex_phi:\n phi_array = np.array(phi_array, dtype=np.float128)\n a_ = np.cos(phi_array, dtype=np.float128)\n b_ = np.sin(phi_array, dtype=np.float128) - 1.0\n else:\n phi_array = np.array(phi_array, dtype=np.complex128)\n a_ = phi_array.real\n b_ = phi_array.imag\n # print(\"a_\", a_)\n # print(\"b_\", b_)\n\n a = a_ * (self._p2[0] - self._p1[0]) + b_ * (self._p2[1] - self._p1[1])\n b = a_ * (self._p3[0] - self._p1[0]) + b_ * (self._p3[1] - self._p1[1])\n c = a_ * self._p1[0] + b_ * self._p1[1]\n\n f_array = np.full_like(phi_array, np.nan, dtype=np.complex256)\n\n a_not_b = np.abs(a - b) > self._epsilon\n a_is_b = np.abs(a - b) <= self._epsilon\n a_not_0 = np.abs(a) - self._epsilon > 0\n b_not_0 = np.abs(b) - self._epsilon > 0\n a_is_0 = np.abs(a) <= self._epsilon\n b_is_0 = np.abs(b) <= self._epsilon\n\n cond1 = np.logical_and(np.logical_and(a_not_b, a_not_0), b_not_0)\n cond2 = np.logical_and(np.logical_and(a_not_b, a_is_0), b_not_0)\n cond3 = np.logical_and(np.logical_and(a_not_b, b_is_0), a_not_0)\n cond4 = np.logical_or(np.logical_or(np.logical_and(a_is_0, a_is_b), np.logical_and(b_is_0, a_is_b)),\n np.logical_and(b_is_0, a_is_0))\n cond5 = np.logical_and(np.logical_and(a_is_b, b_not_0), a_not_0)\n assert (np.logical_xor(cond1, np.logical_xor(cond2, np.logical_xor(cond3, np.logical_xor(cond4,\n cond5))))).all() == True\n\n f_array = np.where(cond1, self._case1(a, b), f_array)\n f_array = np.where(cond2, self._case2(b), f_array)\n f_array = np.where(cond3, self._case3(a), f_array)\n f_array = np.where(cond4, 0.5, f_array)\n f_array = np.where(cond5, self._case5(a), f_array)\n\n assert np.isnan(f_array).any() == False\n return self._jacobi_det * np.exp(1.0j * c) * f_array\n\n\ndef multi_triangle_f(phi, p1=np.array([0.0, 0.0], dtype=np.float64), p2=np.array([1.0, 0.0], dtype=np.float64),\n p3=np.array([0.0, 1.0], dtype=np.float64), epsilon=0.001,\n no_check=False):\n \"\"\"slow straight forward version of Fcalculator, for scalar values of phi only\"\"\"\n phi = np.array(phi, dtype=np.float64)\n if not no_check: # skip check if valid input is ensured for better performance\n assert np.sum(np.square(np.abs(p1 - p2))) > (10 * epsilon) ** 2\n assert np.sum(np.square(np.abs(p2 - p3))) > (10 * epsilon) ** 2\n assert np.sum(np.square(np.abs(p3 - p1))) > (10 * epsilon) ** 2\n\n if phi < 0 or phi > np.pi:\n logging.error(\"input phi is out of range; phi: {}\".format(phi))\n return np.nan\n\n jacobi_det = np.abs((p2[0] - p1[0]) * (p3[1] - p1[1]) - (p3[0] - p1[0]) * (p2[1] - p1[1]))\n a_ = np.cos(phi)\n b_ = np.sin(phi) - 1.0\n a = a_ * (p2[0] - p1[0]) + b_ * (p2[1] - p1[1])\n b = a_ * (p3[0] - p1[0]) + b_ * (p3[1] - p1[1])\n c = a_ * p1[0] + b_ * p1[1]\n\n if np.abs(a - b) > epsilon and np.abs(a - epsilon) > 0 and np.abs(b - epsilon) > 0:\n logging.info(\"case1, a!=b, a!=0, b!=0\")\n f_ = 1.0 / (a * b * (b - a)) * (b * (np.exp(1.0j * a) - 1) -\n a * (np.exp(1.0j * b) - 1.0))\n elif np.abs(a - b) > epsilon and np.abs(b - epsilon) > 0:\n logging.info(\"case2, a!=b, a=0, b!=0\")\n f_ = 1.0j / b - 1 / b ** 2 * (np.exp(1.0j * b) - 1.0)\n elif np.abs(a - b) > epsilon and np.abs(a - epsilon) > 0:\n logging.info(\"case3, a!=b, b=0, a!=0\")\n f_ = 1.0j / a - 1 / a ** 2 * (np.exp(1.0j * a) - 1.0)\n elif np.abs(a) <= epsilon and np.abs(b) - epsilon <= 0:\n assert np.abs(a - b) <= epsilon # a and b have same monotonie for phi > pi\n logging.info(\"case4, a=b, a=0, b=0\")\n f_ = 0.5\n elif np.abs(a - b) <= epsilon and np.abs(a - epsilon) > 0 and np.abs(b - epsilon):\n logging.info(\"case5, a=b, b!=0, a!=0\")\n f_ = np.exp(1.0j * a) / (1.0j * a) + (np.exp(1.0j * a) - 1.0) / a ** 2\n else:\n logging.error(\"unexpected values for a and b!; a={}; b={}\".format(a, b))\n return np.nan\n\n return jacobi_det * np.exp(1.0j * c) * f_\n\n\ndef make_scatter_data(points, epsilon=0.002, phi_arr=None, dphi=0.001, complex_phi=False):\n if phi_arr is None:\n phi_arr = np.arange(0, np.pi, dphi)\n fcalc = Fcalculator(p1=points[0], p2=points[1], p3=points[2], epsilon=np.array(epsilon), complex_phi=complex_phi)\n f = fcalc.call_on_array(phi_arr)\n # print(f.real)\n # print(f.imag)\n if not complex_phi:\n one_data = np.stack((phi_arr, f.real, f.imag), axis=0).astype(np.float32)\n else:\n one_data = np.stack((phi_arr.real, phi_arr.imag, f.real, f.imag), axis=0).astype(np.float32)\n\n return one_data\n\n\ndef generate_target(center_of_weight=False, x_sorted=True, min_area=10):\n import random\n while True:\n rnd_triangle = np.reshape([random.uniform(-50.0, 50) for x in range(6)], (3, 2)).astype(np.float32)\n points = rnd_triangle\n a_x = points[0, 0]\n a_y = points[0, 1]\n b_x = points[1, 0]\n b_y = points[1, 1]\n c_x = points[2, 0]\n c_y = points[2, 1]\n\n area = np.abs((a_x * (b_y - c_y) + b_x * (c_y - a_y) + c_x * (a_y - b_y)) / 2.0)\n # area = np.abs(np.dot((rnd_triangle[0] - rnd_triangle[1]), (rnd_triangle[1] - rnd_triangle[2])) / 2.0)\n if area >= min_area:\n break\n\n if center_of_weight:\n rnd_triangle[0], rnd_triangle[1], rnd_triangle[2] = cent_triangle(p1=rnd_triangle[0],\n p2=rnd_triangle[1],\n p3=rnd_triangle[2])\n if x_sorted:\n rnd_triangle = rnd_triangle[rnd_triangle[:, 0].argsort()]\n return rnd_triangle.astype(np.float32)\n\n\ndef rotate_triangle(p1, p2, p3, phi):\n p_list = np.zeros((3, 2))\n for index, p in enumerate([p1, p2, p3]):\n p_list[index, 0] = p[0] * np.cos(phi) + p[1] * np.sin(phi)\n p_list[index, 1] = -1.0 * p[0] * np.sin(phi) + p[1] * np.cos(phi)\n return p_list[0], p_list[1], p_list[2]\n\n\ndef cent_triangle(p1, p2, p3):\n x_c = (p1[0] + p2[0] + p3[0]) / 3.0\n y_c = (p1[1] + p2[1] + p3[1]) / 3.0\n p_list = np.zeros((3, 2))\n for index, p in enumerate([p1, p2, p3]):\n p_list[index, 0] = p[0] - x_c\n p_list[index, 1] = p[1] - y_c\n\n return p_list[0], p_list[1], p_list[2]\n\n\ndef translation(p1, p2, p3, delta_x_y):\n p_list = np.zeros((3, 2))\n for index, p in enumerate([p1, p2, p3]):\n p_list[index, 0] = p[0] + delta_x_y[0]\n p_list[index, 1] = p[1] + delta_x_y[1]\n return p_list[0], p_list[1], p_list[2]\n\n\ndef limited_f(phi, p1=np.array([0.0, 0.0]), p2=np.array([1.0, 0.0]), p3=np.array([0.0, 1.0]), epsilon=0.001,\n no_check=False):\n \"\"\"legacy version of case_f\"\"\"\n if not no_check: # skip check if valid input is ensured for better performance\n assert np.sum(np.square(np.abs(p1 - p2))) > (10 * epsilon) ** 2\n assert np.sum(np.square(np.abs(p2 - p3))) > (10 * epsilon) ** 2\n assert np.sum(np.square(np.abs(p3 - p1))) > (10 * epsilon) ** 2\n\n if phi - epsilon < 0:\n logging.warning(\"input phi is smaller zero; phi: {}\".format(phi))\n return np.nan\n elif phi + epsilon > np.pi / 2.0 > phi - epsilon:\n logging.warning(\"input phi to close to pi/2; phi: {}\".format(phi))\n return np.nan\n elif phi - epsilon > np.pi:\n logging.warning(\"input phi greater pi; phi: {}\".format(phi))\n return np.nan\n\n a = np.cos(phi)\n b = np.sin(phi) - 1.0\n\n f = 1.0 / (a * b * (b - a)) * (b * (np.exp(1.0j * a) - 1) - a * (np.exp(1.0j * b) - 1.0))\n\n return f\n\n\ndef case_f(phi, p1=np.array([0.0, 0.0]), p2=np.array([1.0, 0.0]), p3=np.array([0.0, 1.0]), epsilon=0.001,\n no_check=False):\n \"\"\"legacy version of multi_triangle_f\"\"\"\n if not no_check: # skip check if valid input is ensured for better performance\n assert np.sum(np.square(np.abs(p1 - p2))) > (10 * epsilon) ** 2\n assert np.sum(np.square(np.abs(p2 - p3))) > (10 * epsilon) ** 2\n assert np.sum(np.square(np.abs(p3 - p1))) > (10 * epsilon) ** 2\n if phi < 0 or phi > np.pi:\n logging.error(\"input phi is out of range; phi: {}\".format(phi))\n return np.nan\n\n a = np.cos(phi)\n b = np.sin(phi) - 1.0\n\n if np.abs(a - b) > epsilon and np.abs(a - epsilon) > 0 and np.abs(b - epsilon) > 0:\n logging.info(\"case1, a!=b, a!=0, b!=0\")\n f_ = 1.0 / (a * b * (b - a)) * (b * (np.exp(1.0j * a) - 1) -\n a * (np.exp(1.0j * b) - 1.0))\n elif np.abs(a - b) > epsilon and np.abs(b - epsilon) > 0:\n logging.info(\"case2, a!=b, a=0, b!=0\")\n f_ = 1.0j / b - 1 / b ** 2 * (np.exp(1.0j * b) - 1.0)\n elif np.abs(a - b) > epsilon and np.abs(a - epsilon) > 0:\n logging.info(\"case3, a!=b, b=0, a!=0\")\n f_ = 1.0j / a - 1 / a ** 2 * (np.exp(1.0j * a) - 1.0)\n elif np.abs(a) <= epsilon and np.abs(b) - epsilon <= 0:\n assert np.abs(a - b) <= epsilon # a and b have same monotonie for phi > pi\n logging.info(\"case4, a=b, a=0, b=0\")\n f_ = 0.5\n elif np.abs(a - b) <= epsilon and np.abs(a - epsilon) > 0 and np.abs(b - epsilon):\n logging.info(\"case5, a=b, b!=0, a!=0\")\n f_ = np.exp(1.0j * a) / (1.0j * a) + (np.exp(1.0j * a) - 1.0) / a ** 2\n else:\n logging.error(\"unexpected values for a and b!; a={}; b={}\".format(a, b))\n return np.nan\n\n return f_\n"
] |
[
[
"numpy.logical_xor",
"numpy.abs",
"numpy.logical_and",
"numpy.isnan",
"numpy.arange",
"numpy.cos",
"numpy.stack",
"numpy.sin",
"numpy.full_like",
"numpy.exp",
"numpy.errstate",
"numpy.array",
"numpy.zeros",
"numpy.where"
]
] |
athiede13/free_speech
|
[
"bde32c2d48724c98f089376876cf9888f67a9f20"
] |
[
"stats/contrast_cluster_perm/cluster_correction.py"
] |
[
"\"\"\"\nApply cluster correction for independent-samples T-test based on spatial proximity and cluster size.\n\nInspired by MNE tutorial.\n\nCreated on Fri Feb 22 13:21:40 2019\n@author: Anja Thiede <[email protected]>\n\"\"\"\n\nimport numpy as np\nfrom scipy import stats\nfrom scipy.io import loadmat\nimport matplotlib.pyplot as plt\nimport os\n\nfrom permutation_cluster_test_AT import _permutation_cluster_test_AT\n\nprint(__doc__)\n\n#%% file paths\n\nconn = '/media/cbru/SMEDY/scripts_speech_rest/stats/mantel/connectivity.npy'\nresults_dir = '/media/cbru/SMEDY/results/dys_con_contrast/2020_02_redo_subject_perm/'\nread_dir = '/media/cbru/SMEDY/DATA/group_fake_iscs/'\n\n#%% read connectivity\n\nprint('Read connectivity.')\nconnectivity = np.load(conn)\n\nconnectivity_sparse = connectivity[()]\n\n#%% cluster correction\n\n# for each permutation:\n# 1. Compute the test statistic for each voxel individually.\n# 2. Threshold the test statistic values.\n# 3. Cluster voxels that exceed this threshold (with the same sign) based on adjacency.\n# 4. Retain the size of the largest cluster (measured, e.g., by a simple voxel count,\n# or by the sum of voxel t-values within the cluster) to build the null distribution.\n\n# define conditions\ncons = '_1' # '_1' listening to speech\nfreqs = {'5.000000e-01-4Hz', '4-8Hz', '8-12Hz', '12-25Hz', '25-45Hz', '55-90Hz'}\n\nif cons == '_1':\n window = '_613'\nelif cons == '_2':\n window = '_579'\nelse:\n print('Check condition!')\n\nfor freq in freqs:\n if os.path.isfile(read_dir + 'fake_t_vals_' + freq + window + cons + '.mat'):\n print(cons + ' ' + freq)\n # read in fake and actual T-test results\n fake_values = loadmat(read_dir + 'fake_t_vals_' + freq + \n window + cons + '.mat')['fake_t_vals']\n real_values = loadmat(read_dir + 'real_t_vals_' + freq + window + cons +\n '.mat')['real_t_vals']\n \n # get threshold\n threshold = loadmat(read_dir + 'tthreshold_uncorrected_' + freq +\n window + cons + '.mat')['tthreshold_uncorrected']\n print(threshold)\n \n # reshape fake_values to (n_observations, n_times, n_vertices)\n fake_values = fake_values[:, :, np.newaxis]\n fake_values = fake_values.reshape((5000, 1, 20484))\n \n # reshape real_values\n real_values = real_values[:, :, np.newaxis]\n real_values = real_values.reshape((1, 1, 20484))\n \n # search for clusters (only once)\n# max_clu_lens, clusters = _permutation_cluster_test_AT(fake_values,\n# threshold=threshold[0][0],\n# n_permutations=5000,\n# tail=0,\n# connectivity=connectivity_sparse,\n# n_jobs=4, seed=10,\n# max_step=1, t_power=1,\n# out_type='indices',\n# exclude=None,\n# step_down_p=0,\n# check_disjoint=False,\n# buffer_size=1000)\n# \n# np.save(results_dir + 'max_clu_lens_' + freq + window + cons, max_clu_lens)\n \n max_clu_lens = np.load(results_dir + 'max_clu_lens_' + freq + window + cons + '.npy')\n \n # null distribution\n plt.figure(0)\n plt.hist(max_clu_lens)\n kde = stats.gaussian_kde(max_clu_lens)\n x = np.linspace(max_clu_lens.min(), max_clu_lens.max(), 100)\n p = kde(x)\n # cutoff for a cluster size that is significant\n plt.figure(1)\n plt.plot(x, p)\n plt.hlines(0.095, 0, 14) # visualization of cutoff\n # take maximum across all freq bands\n cutoff = np.max(max_clu_lens)\n print(['cutoff length is ', cutoff])\n \n max_clu_lens2, clusters = _permutation_cluster_test_AT(real_values,\n threshold=threshold[0][0],\n n_permutations=1,\n tail=0,\n connectivity=connectivity_sparse,\n n_jobs=4, seed=10,\n max_step=1,\n t_power=1,\n out_type='indices',\n exclude=None,\n step_down_p=0,\n check_disjoint=False,\n buffer_size=1000)\n \n # length of all initial clusters\n clu_lens = np.zeros(len(clusters))\n for j in range(0, len(clusters)):\n clu_lens[j] = len(clusters[j][0])\n \n # hists\n plt.figure(1)\n plt.hist(max_clu_lens)\n plt.hist(clu_lens)\n \n # out in format required by MNE cluster function (for visualization)\n t_out = real_values.reshape(1, 20484)\n clusters_new = clusters\n for c, l, i in zip(clusters, clu_lens, range(0, len(clusters))):\n clusters_new[i] = np.zeros(np.int(l), dtype='int'), c[0]\n \n clu = t_out, clusters_new\n np.save(results_dir + 'clu_' + freq + window + cons, clu)\n \n # see how many clusters exceed the threshold (i.e. survive the correction)\n ids = np.where(clu_lens > cutoff)[0]\n clu_sig = clusters[0:len(ids)]\n for i in range(0, len(ids)):\n clu_sig[i] = clusters[ids[i]]\n \n sig_clu_lens = np.zeros(len(clu_sig))\n for j in range(0, len(clu_sig)):\n sig_clu_lens[j] = len(clu_sig[j][0])\n else: print('No uncorrected p-vals < 0.05 for ' + freq)\n"
] |
[
[
"matplotlib.pyplot.hlines",
"scipy.io.loadmat",
"numpy.save",
"matplotlib.pyplot.plot",
"numpy.max",
"scipy.stats.gaussian_kde",
"numpy.int",
"numpy.load",
"numpy.where",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.figure"
]
] |
osu-anticheat/wtc-lzma-compressor
|
[
"bc67eeea6928f505b65e90b2884067c23671bf9d"
] |
[
"wtc/wtc.py"
] |
[
"import lzma\r\nimport struct\r\n\r\nimport numpy as np\r\n\r\ndef unsorted_diff_pack_16_8(int16s):\r\n \"\"\"\r\n Packs the differential of the input to bytes in little endian order.\r\n\r\n Args:\r\n List ints16s: The list of shorts to differentially compress.\r\n\r\n Returns:\r\n The differential data as a list of bytes.\r\n \"\"\"\r\n\r\n escape = -2 ** 7\r\n small = 2 ** 7 - 1\r\n start = int16s[0]\r\n diff = np.diff(int16s)\r\n packed = []\r\n\r\n def pack(word):\r\n if abs(word) <= small:\r\n packed.append(word)\r\n else:\r\n packed.append(escape)\r\n packed.append(word & 0xFF)\r\n word = word >> 8\r\n packed.append(word)\r\n\r\n pack(start)\r\n for word in diff:\r\n pack(word)\r\n\r\n return np.int8(packed)\r\n\r\ndef unsorted_diff_unpack_8_16(ints8):\r\n \"\"\"\r\n Unpacks the differential data in little endian order to words.\r\n\r\n Args:\r\n List ints8s: The list of bytes to decompress.\r\n\r\n Returns:\r\n The decompressed shorts.\r\n \"\"\"\r\n\r\n escape = -2 ** 7\r\n decoded = []\r\n\r\n i = 0\r\n while i < len(ints8):\r\n byte = ints8[i]\r\n\r\n if byte == escape:\r\n i += 1\r\n word = ints8[i] & 0xFF\r\n i += 1\r\n word += ints8[i] << 8\r\n decoded.append(word)\r\n else:\r\n decoded.append(byte)\r\n\r\n i += 1\r\n\r\n decompressed = np.cumsum(decoded)\r\n\r\n return np.int16(decompressed)\r\n\r\ndef pack_32_8(ints32):\r\n \"\"\"\r\n Packs the input to bytes in little endian order.\r\n\r\n Args:\r\n List ints32s: The list of shorts to compress.\r\n\r\n Returns:\r\n The data as a list of bytes.\r\n \"\"\"\r\n\r\n escape = -2 ** 7\r\n small = 2 ** 7 - 1\r\n packed = []\r\n\r\n for dword in ints32:\r\n if abs(dword) <= small:\r\n packed.append(dword)\r\n else:\r\n packed.append(escape)\r\n packed.append(dword & 0xFF)\r\n dword = dword >> 8\r\n packed.append(dword & 0xFF)\r\n dword = dword >> 8\r\n packed.append(dword & 0xFF)\r\n dword = dword >> 8\r\n packed.append(dword)\r\n\r\n return np.int8(packed)\r\n\r\ndef unpack_8_32(ints8):\r\n \"\"\"\r\n Unpacks the data in little endian order.\r\n\r\n Args:\r\n List ints8s: The list of bytes to decompress.\r\n\r\n Returns:\r\n The decompressed integers.\r\n \"\"\"\r\n\r\n escape = -2 ** 7\r\n unpacked = []\r\n\r\n i = 0\r\n while i < len(ints8):\r\n byte = ints8[i]\r\n\r\n if byte == escape:\r\n i += 1\r\n dword = ints8[i] & 0xFF\r\n i += 1\r\n dword += (ints8[i] << 8) & 0xFF00\r\n i += 1\r\n dword += (ints8[i] << 16) & 0xFF0000\r\n i += 1\r\n dword += ints8[i] << 24\r\n unpacked.append(dword)\r\n else:\r\n unpacked.append(byte)\r\n\r\n i += 1\r\n\r\n return np.int32(unpacked)\r\n\r\ndef compress(lzma_stream):\r\n \"\"\"\r\n Packs replay into a more compact format.\r\n\r\n Args:\r\n String lzma_stream: An lzma stream from a replay.\r\n\r\n Returns:\r\n An lzma compressed bytestring\r\n \"\"\"\r\n\r\n #separate the lzma stream to apply different compression for each datatype\r\n xs, ys, zs, ws = separate(lzma_stream)\r\n\r\n xs = unsorted_diff_pack_16_8(xs)\r\n ys = unsorted_diff_pack_16_8(ys)\r\n\r\n ws = pack_32_8(ws)\r\n zs = np.int8(zs)\r\n #store all data as arrays of bytes with their lenght stored in the first 4 bytes\r\n def pack_bytes(bs):\r\n return struct.pack(f'<I{len(bs)}b', len(bs), *bs)\r\n\r\n buf = b''.join([pack_bytes(bs) for bs in (xs, ys, zs, ws)])\r\n\r\n return lzma.compress(buf, format=2)\r\n\r\ndef decompress(compressed_lzma, decompressed_lzma=False):\r\n \"\"\"\r\n Decompresses a separated and compressed lzma into an lzma stream.\r\n\r\n Args:\r\n String compressed_lzma: A separated and compressed representation of replay data.\r\n Boolean decompressed_lzma: Whether to return decompressed and decoded lzma, after decompressing from wtc.\r\n\r\n Returns:\r\n An lzma compressed bytestring, identical to the (decoded) string returned by the get_replay api endpoint.\r\n \"\"\"\r\n\r\n data = lzma.decompress(compressed_lzma)\r\n\r\n def unpack_bytes(data):\r\n size, = struct.unpack('<I', data[:4])\r\n data = data[4:]\r\n bs = struct.unpack(f'<{size}b', data[:size])\r\n data = data[size:]\r\n\r\n return bs, data\r\n\r\n xs, data = unpack_bytes(data)\r\n ys, data = unpack_bytes(data)\r\n zs, data = unpack_bytes(data)\r\n ws, data = unpack_bytes(data)\r\n\r\n xs = unsorted_diff_unpack_8_16(xs)\r\n ys = unsorted_diff_unpack_8_16(ys)\r\n\r\n ws = unpack_8_32(ws)\r\n ret = combine(xs, ys, zs, ws)\r\n if decompressed_lzma:\r\n return ret\r\n # format 1 is FORMAT_XZ, an implementation of lzma2, the most recent lzma\r\n # standard. However I've been told (but have not tested) by wtc that osu!\r\n # only accepts replays in format 2 (aka FORMAT_ALONE), the older lzma\r\n # standard.\r\n ret = lzma.compress(ret.encode('UTF-8'), format=2)\r\n return ret\r\n\r\ndef separate(lzma_stream):\r\n \"\"\"\r\n Separates the lzma stream of frames into separate lists of x, y, z and w.\r\n\r\n Args:\r\n String lzma_stream: The lzma to separate.\r\n\r\n Returns:\r\n The lists of x, y, z, w.\r\n \"\"\"\r\n text = lzma.decompress(lzma_stream).decode('UTF-8')\r\n\r\n xs = []\r\n ys = []\r\n zs = []\r\n ws = []\r\n\r\n for frame in text.split(','):\r\n if not frame:\r\n continue\r\n w, x, y, z = frame.split('|')\r\n w = int(w)\r\n x = float(x)\r\n y = float(y)\r\n z = int(z)\r\n\r\n #Everything we need from Z is in the first byte\r\n z = z & 0xFF\r\n\r\n #To fit x and y into shorts, they can be scaled to retain more precision.\r\n x = int(round(x * 16))\r\n y = int(round(y * 16))\r\n\r\n #Prevent the coordinates from being too large for a short. If this happens, the cursor is way offscreen anyway.\r\n if x <= -0x8000: x = -0x8000\r\n elif x >= 0x7FFF: x = 0x7FFF\r\n if y <= -0x8000: y = -0x8000\r\n elif y >= 0x7FFF: y = 0x7FFF\r\n\r\n #w: signed 24bit integer\r\n #x: signed short\r\n #y: signed short\r\n #z: unsigned char\r\n xs.append(x)\r\n ys.append(y)\r\n zs.append(z)\r\n ws.append(w)\r\n\r\n return xs, ys, zs, ws\r\n\r\ndef combine(xs, ys, zs, ws):\r\n \"\"\"\r\n Combines the lists of x, y, z and w into a lzma stream.\r\n\r\n Args:\r\n List x: All x datapoints.\r\n List y: All y datapoints.\r\n List z: All z datapoints.\r\n List w: All w datapoints.\r\n\r\n Returns:\r\n The combination as a string.\r\n \"\"\"\r\n\r\n if not len(xs) == len(ys) == len(zs) == len(ws):\r\n raise ValueError(\"The bytearrays are of unequal lengths\")\r\n\r\n xs = np.array(xs) / 16\r\n ys = np.array(ys) / 16\r\n\r\n frames = zip(xs, ys, zs, ws)\r\n\r\n frames = [f'{w}|{x}|{y}|{z},' for x, y, z, w in frames]\r\n\r\n return ''.join(frames)\r\n"
] |
[
[
"numpy.int32",
"numpy.int8",
"numpy.cumsum",
"numpy.int16",
"numpy.diff",
"numpy.array"
]
] |
Just-DIRECT-Capstone/Protein-Purification-Model-Public
|
[
"cf18d9669dfc4d49e53bb3e6e78c06cb42bf404a"
] |
[
"visualization/simple_data_vis.py"
] |
[
"\"\"\"imports\"\"\"\nfrom inspect import isdatadescriptor\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport utils\nfrom surrogate_models.nn_defs import multi_mse\n\ndef histograms(data,x_data,y_data):\n \"\"\"builds histograms\"\"\"\n n_outputs = len(y_data)\n n_inputs = len(x_data)\n n_gen = int(np.ceil(n_inputs**0.5))\n\n fig, axis = plt.subplots(n_gen, n_gen, figsize = (12,12))\n fig.tight_layout()\n\n for a_n,var in zip(axis.flat,x_data):\n data[var].hist(axis = a_n)\n a_n.set_title(var)\n\n fig, axis = plt.subplots(n_outputs, 1, figsize = (3,6))\n fig.tight_layout()\n\n for a_n,var in zip(axis.flat,y_data):\n data[var].hist(axis = a_n)\n a_n.set_title(var)\n\n return fig,axis\n\ndef scatter_hats(models, train, test = None, settings = {}, n_points = 50, display_info = True, plot = 'yield,purity', index = False):\n \"\"\"main function for visualizing model prediction v. true value complete with mse\"\"\"\n y_data = train[1].columns\n markers = ['.','^'] # ok for two outputs only\n\n sample_inputs_train = train[0].sample(n_points)\n sample_outputs_train = train[1].loc[sample_inputs_train.index]\n if test is not None: sample_inputs_test = test[0].sample(n_points)\n if test is not None: sample_outputs_test = test[1].loc[sample_inputs_test.index]\n\n if len(models) == 1:\n f = plt.figure(figsize = (4*len(models),4*len(models)))\n else:\n f = plt.figure(figsize = (10*len(models)/3,10*len(models)))\n\n for i,mod in enumerate(models):\n model_name = utils.get_model_name(mod, settings['dataset'])\n plt.subplot(1,len(models),i+1)\n\n train_hat = mod(sample_inputs_train.to_numpy())\n if test is not None: test_hat = mod(sample_inputs_test.to_numpy())\n\n for j in range(len(y_data)):\n if y_data[j] in plot:\n try:\n plt.errorbar(sample_outputs_train.to_numpy()[:,j],\n train_hat[j].numpy(),\n marker = markers[j], color = 'k', alpha = 0.5,\n ls = 'none', label = y_data[j])\n if test is not None: plt.errorbar(sample_outputs_test.to_numpy()[:,j],\n test_hat[j].numpy(),\n marker = markers[j], color = 'r', alpha = 0.5,\n ls = 'none')\n except:\n plt.errorbar(sample_outputs_train.to_numpy()[:,j], train_hat[j].mean().numpy(),\n yerr = train_hat[j].stddev().numpy().squeeze(),\n marker = markers[j], color = 'k', alpha = 0.5, ls = 'none',\n label = y_data[j])\n if test is not None: plt.errorbar(sample_outputs_test.to_numpy()[:,j], test_hat[j].mean().numpy(),\n yerr = test_hat[j].stddev().numpy().squeeze(),\n marker = markers[j], color = 'r', alpha = 0.5, ls = 'none')\n\n xmax = 1.1\n plt.gca().set_aspect('equal')\n plt.xlim([0,xmax])\n plt.ylim([0,xmax])\n plt.plot([0,xmax],[0,xmax],'k',alpha=0.25)\n plt.xlabel('true')\n plt.ylabel('predicted')\n if i == 0 and plot == 'yield,purity':\n plt.legend(frameon = True)\n\n rmse = multi_mse(train[1], mod(train[0].to_numpy()))\n plt.text(0.5,0.12,f\"MSE: {round(np.sum(rmse)*100, 2)}\", color = 'k')\n if test is not None:\n rmse = multi_mse(test[1], mod(test[0].to_numpy()))\n plt.text(0.5,0.04,f\"Test MSE: {round(np.sum(rmse)*100, 2)}\", color = 'r')\n\n plt.tight_layout()\n if display_info:\n plt.title(model_name + ' ({} params)'.format(utils.count_parameters(mod)))\n plt.text(2, .80, '.', fontsize=1)\n # could be a loop\n plt.text(1.20, .90, 'Dataset: '+settings['dataset'], fontsize=12)\n plt.text(1.20, .70, 'Optimizer: '+settings['optimizer'], fontsize=12)\n plt.text(1.20, .50, 'Learning Rate: '+str(settings['learning_rate']), fontsize=12)\n plt.text(1.20, .30, 'Loss Weights: '+str(settings['loss_weights']), fontsize=12)\n plt.text(1.20, .10, 'Epochs: '+str(settings['epochs']), fontsize=12)\n \n if index:\n return f, sample_inputs_train.index\n else:\n return f\n \ndef training_curves(models, y_data, settings, histories, smoothing = 1):\n \"\"\"function for building training curves\"\"\"\n epochs = np.arange(settings['epochs'])\n markers = ['.','^'] # ok for two outputs only\n if len(models) == 1:\n f = plt.figure(figsize = (4*len(models),4*len(models)))\n else:\n f = plt.figure(figsize = (10*len(models)/3,10*len(models)))\n for i,mod in enumerate(models):\n\n model_name = utils.get_model_name(mod, settings['dataset'])\n plt.subplot(1,len(models),i+1)\n for j in range(len(y_data)):\n plt.plot(np.convolve(np.log(\n histories[model_name].history[y_data[j]+'_mse']),\n np.ones(smoothing)/smoothing, mode='valid'),\n 'k--', alpha = 0.5)\n plt.plot(np.convolve(np.log(\n histories[model_name].history['val_'+y_data[j]+'_mse']),\n np.ones(smoothing)/smoothing, mode='valid'),\n 'r--', alpha = 0.5)\n\n plt.plot(epochs[::smoothing], np.convolve(np.log(\n histories[model_name].history[y_data[j]+'_mse']),\n np.ones(smoothing)/smoothing, mode='valid')[::smoothing],\n 'k'+markers[j], label = y_data[j], alpha = 0.5)\n plt.plot(epochs[::smoothing], np.convolve(np.log(\n histories[model_name].history['val_'+y_data[j]+'_mse']),\n np.ones(smoothing)/smoothing, mode='valid')[::smoothing],\n 'r'+markers[j], alpha = 0.5)\n\n if i == 0:\n plt.legend(frameon = False)\n plt.xlabel('epochs')\n plt.ylabel('log MSE')\n plt.title(model_name + ' ({} params)'.format(utils.count_parameters(mod)))\n plt.gca().set_aspect(1./plt.gca().get_data_ratio())\n\n plt.tight_layout()\n return f"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.tight_layout",
"numpy.log",
"numpy.arange",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.subplots",
"numpy.ones",
"matplotlib.pyplot.plot",
"numpy.ceil",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.text",
"numpy.sum",
"matplotlib.pyplot.ylabel"
]
] |
Jonbroad15/cpg-transformer
|
[
"2d2a1ca787bb25869f3981ba977632fc0d2b5eb1"
] |
[
"cpg_transformer/camelia.py"
] |
[
"from catboost import CatBoostClassifier\nimport pandas as pd\nimport numpy as np\n\nclass CaMeliaModel():\n def __init__(self, dropnans=False, learning_rate=0.1, max_depth=7, verbose=100,\n eval_metric='AUC', device='GPU', train_dir=None):\n self.dropnans = dropnans\n self.model = CatBoostClassifier(learning_rate=learning_rate, max_depth=max_depth,\n verbose=verbose, eval_metric=eval_metric, train_dir=train_dir,\n task_type=device, cat_features=['DNA'+str(i) for i in range(20)])\n \n def fit(self, X_train, y_train): \n if self.dropnans:\n drop = pd.isnull(X_train).values.sum(-1) == 0\n X_train = X_train.iloc[drop]\n y_train = y_train[drop]\n \n ix = np.arange(len(y_train))\n np.random.shuffle(ix)\n X_train, y_train = X_train.iloc[ix].reset_index(drop=True), y_train[ix]\n \n self.model.fit(X_train, y_train)\n \n def test(self, X_test, y_test, pos_test, save_location):\n if self.dropnans:\n drop = pd.isnull(X_test).values.sum(-1) == 0\n X_test = X_test.iloc[drop]\n y_test = y_test[drop]\n pos_test = pos_test[drop]\n \n y_pred = self.model.predict_proba(X_test)[:,1] \n outs = np.stack([y_pred, y_test])\n with open(save_location, 'wb') as f:\n np.savez_compressed(f, outs, pos_test)\n \n def save(self, save_location):\n self.model.save_model(save_location)"
] |
[
[
"pandas.isnull",
"numpy.savez_compressed",
"numpy.stack",
"numpy.random.shuffle"
]
] |
felizang/IIC-pytorch3
|
[
"c16928fd497089b3776c7dc3a2ac89b863314a62"
] |
[
"iic/cluster_sobel_twohead.py"
] |
[
"from __future__ import print_function\n\nimport argparse\n# import itertools\nimport os\nimport pickle\nimport sys\nfrom datetime import datetime\n\nimport matplotlib\nimport numpy as np\nimport torch\n\nimport matplotlib.pyplot as plt\nfrom torch.optim import Adam\n\nimport archs as archs\nfrom utils_cluster import config_to_str, get_opt, update_lr, nice\nfrom utils_cluster import sobel_process\nfrom utils_cluster import cluster_eval, get_subhead_using_loss\nfrom utils_cluster import cluster_twohead_create_dataloaders\nfrom utils_cluster import iid_loss, iic_loss\n\n# matplotlib.use('Agg')\nmatplotlib.use('TkAgg')\n\n\"\"\"\n Fully unsupervised clustering (\"IIC\" = \"IID\").\n Train and test script (coloured datasets).\n Network has two heads, for overclustering and final clustering.\n\"\"\"\n\n# Options ----------------------------------------------------------------------\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--model_ind\", type=int, required=True)\nparser.add_argument(\"--arch\", type=str, required=True)\nparser.add_argument(\"--opt\", type=str, default=\"Adam\")\nparser.add_argument(\"--mode\", type=str, default=\"IID\")\n\nparser.add_argument(\"--dataset\", type=str, required=True)\nparser.add_argument(\"--dataset_root\", type=str, required=True)\n\nparser.add_argument(\"--gt_k\", type=int, required=True)\nparser.add_argument(\"--output_k_A\", type=int, required=True)\nparser.add_argument(\"--output_k_B\", type=int, required=True)\n\nparser.add_argument(\"--lamb\", type=float, default=1.0)\nparser.add_argument(\"--lr\", type=float, default=0.01)\nparser.add_argument(\"--lr_schedule\", type=int, nargs=\"+\", default=[])\nparser.add_argument(\"--lr_mult\", type=float, default=0.1)\n\nparser.add_argument(\"--num_epochs\", type=int, default=1000)\nparser.add_argument(\"--batch_sz\", type=int, required=True) # num pairs\nparser.add_argument(\"--num_dataloaders\", type=int, default=3)\nparser.add_argument(\"--num_sub_heads\", type=int, default=5) # per head...\n\nparser.add_argument(\"--out_root\", type=str,\n default=\"/home/asc/experi/IIC-master/out\")\nparser.add_argument(\"--restart\", dest=\"restart\", default=False,\n action=\"store_true\")\nparser.add_argument(\"--restart_from_best\", dest=\"restart_from_best\",\n default=False, action=\"store_true\")\nparser.add_argument(\"--test_code\", dest=\"test_code\", default=False,\n action=\"store_true\")\n\nparser.add_argument(\"--stl_leave_out_unlabelled\", default=False,\n action=\"store_true\")\n\nparser.add_argument(\"--save_freq\", type=int, default=10)\n\nparser.add_argument(\"--double_eval\", default=False, action=\"store_true\")\n\nparser.add_argument(\"--head_A_first\", default=False, action=\"store_true\")\nparser.add_argument(\"--head_A_epochs\", type=int, default=1)\nparser.add_argument(\"--head_B_epochs\", type=int, default=1)\n\nparser.add_argument(\"--batchnorm_track\", default=False, action=\"store_true\")\n\nparser.add_argument(\"--select_sub_head_on_loss\", default=False,\n action=\"store_true\")\n\n# transforms\nparser.add_argument(\"--mix_train\", dest=\"mix_train\", default=False,\n action=\"store_true\")\nparser.add_argument(\"--include_rgb\", dest=\"include_rgb\", default=False,\n action=\"store_true\")\n\nparser.add_argument(\"--demean\", dest=\"demean\", default=False,\n action=\"store_true\")\nparser.add_argument(\"--per_img_demean\", dest=\"per_img_demean\", default=False,\n action=\"store_true\")\nparser.add_argument(\"--data_mean\", type=float, nargs=\"+\", default=[])\nparser.add_argument(\"--data_std\", type=float, nargs=\"+\", default=[])\n\nparser.add_argument(\"--crop_orig\", dest=\"crop_orig\", default=False,\n action=\"store_true\")\nparser.add_argument(\"--rand_crop_sz\", type=int, default=84)\nparser.add_argument(\"--input_sz\", type=int, default=96)\n\nparser.add_argument(\"--fluid_warp\", dest=\"fluid_warp\", default=False,\n action=\"store_true\")\nparser.add_argument(\"--rand_crop_szs_tf\", type=int, nargs=\"+\",\n default=[]) # only used if fluid warp true\nparser.add_argument(\"--rot_val\", type=float,\n default=0.) # only used if fluid warp true\n\nparser.add_argument(\"--cutout\", default=False, action=\"store_true\")\nparser.add_argument(\"--cutout_p\", type=float, default=0.5)\nparser.add_argument(\"--cutout_max_box\", type=float, default=0.5)\n\nconfig = parser.parse_args()\n\n# Setup ------------------------------------------------------------------------\n\n# setup override ----------\nsys.stdout = open('train640.log', 'w')\n\nconfig.model_ind = 640\nconfig.arch = 'ClusterNet5gTwoHead'\nconfig.dataset = 'CIFAR10'\nconfig.dataset_root = '/home/asc/experi/IIC-master/datasets/CIFAR'\nconfig.gt_k = 10\nconfig.output_k_A = 70\nconfig.output_k_B = 10\nconfig.lamb = 1.0\nconfig.lr = 0.001\nconfig.num_epochs = 1000\nconfig.batch_sz = 660\nconfig.num_dataloaders = 3\nconfig.num_sub_heads = 5\nconfig.crop_orig = 'crop_orig'\nconfig.rand_crop_sz = 20\nconfig.input_sz = 32\nconfig.head_A_first = True\nconfig.head_B_epochs = 2\n# -------------------------\nconfig.twohead = True\n\nif not config.include_rgb:\n config.in_channels = 2\nelse:\n config.in_channels = 5\n\nconfig.out_dir = os.path.join(config.out_root, str(config.model_ind))\nassert (config.batch_sz % config.num_dataloaders == 0)\nconfig.dataloader_batch_sz = int(config.batch_sz / config.num_dataloaders)\n\nassert (config.mode == \"IID\")\nassert (\"TwoHead\" in config.arch)\nassert (config.output_k_B == config.gt_k)\nconfig.output_k = config.output_k_B # for eval code\nassert (config.output_k_A >= config.gt_k)\n\nconfig.eval_mode = \"hung\"\n\nif not os.path.exists(config.out_dir):\n os.makedirs(config.out_dir)\n\nif config.restart:\n config_name = \"config.pickle\"\n net_name = \"latest_net.pytorch\"\n opt_name = \"latest_optimiser.pytorch\"\n\n if config.restart_from_best:\n config_name = \"best_config.pickle\"\n net_name = \"best_net.pytorch\"\n opt_name = \"best_optimiser.pytorch\"\n\n given_config = config\n reloaded_config_path = os.path.join(given_config.out_dir, config_name)\n print(\"Loading restarting config from: %s\" % reloaded_config_path)\n with open(reloaded_config_path, \"rb\") as config_f:\n config = pickle.load(config_f)\n assert (config.model_ind == given_config.model_ind)\n config.restart = True\n config.restart_from_best = given_config.restart_from_best\n\n # copy over new num_epochs and lr schedule\n config.num_epochs = given_config.num_epochs\n config.lr_schedule = given_config.lr_schedule\n\n if not hasattr(config, \"cutout\"):\n config.cutout = False\n config.cutout_p = 0.5\n config.cutout_max_box = 0.5\n\n if not hasattr(config, \"batchnorm_track\"):\n config.batchnorm_track = True # before we added in false option\n\nelse:\n print(\"Config: %s\" % config_to_str(config))\n\n# Model ------------------------------------------------------------------------\n\ndataloaders_head_A, dataloaders_head_B, mapping_assignment_dataloader, \\\n mapping_test_dataloader = cluster_twohead_create_dataloaders(config)\n\nnet = archs.__dict__[config.arch](config)\nif config.restart:\n model_path = os.path.join(config.out_dir, net_name)\n net.load_state_dict(\n torch.load(model_path, map_location=lambda storage, loc: storage)) # to CPU\n\nnet.cuda()\nnet = torch.nn.DataParallel(net)\nnet.train()\n\noptimiser = get_opt(config.opt)(net.module.parameters(), lr=config.lr)\n# optimiser = get_opt(config.opt)(net.parameters(), lr=config.lr)\nif config.restart:\n opt_path = os.path.join(config.out_dir, opt_name)\n optimiser.load_state_dict(torch.load(opt_path))\n\nheads = [\"B\", \"A\"]\nif config.head_A_first:\n heads = [\"A\", \"B\"]\n\n# head_epochs = {}\nhead_epochs = dict()\nhead_epochs[\"A\"] = config.head_A_epochs\nhead_epochs[\"B\"] = config.head_B_epochs\n\n# Results ----------------------------------------------------------------------\n\nif config.restart:\n if not config.restart_from_best:\n next_epoch = config.last_epoch + 1 # corresponds to last saved model\n else:\n next_epoch = np.argmax(np.array(config.epoch_acc)) + 1 # epoch_acc = list of accuracy @ epoch ?\n # find epoch with best accuracy?\n print(\"starting from epoch %d\" % next_epoch)\n\n config.epoch_acc = config.epoch_acc[:next_epoch] # in case we overshot\n config.epoch_avg_subhead_acc = config.epoch_avg_subhead_acc[:next_epoch]\n config.epoch_stats = config.epoch_stats[:next_epoch]\n\n if config.double_eval:\n config.double_eval_acc = config.double_eval_acc[:next_epoch]\n config.double_eval_avg_subhead_acc = config.double_eval_avg_subhead_acc[\n :next_epoch]\n config.double_eval_stats = config.double_eval_stats[:next_epoch]\n\n config.epoch_loss_head_A = config.epoch_loss_head_A[:(next_epoch - 1)]\n config.epoch_loss_no_lamb_head_A = config.epoch_loss_no_lamb_head_A[\n :(next_epoch - 1)]\n config.epoch_loss_head_B = config.epoch_loss_head_B[:(next_epoch - 1)]\n config.epoch_loss_no_lamb_head_B = config.epoch_loss_no_lamb_head_B[\n :(next_epoch - 1)]\nelse:\n config.epoch_acc = []\n config.epoch_avg_subhead_acc = []\n config.epoch_stats = []\n\n if config.double_eval:\n config.double_eval_acc = []\n config.double_eval_avg_subhead_acc = []\n config.double_eval_stats = []\n\n config.epoch_loss_head_A = []\n config.epoch_loss_no_lamb_head_A = []\n\n config.epoch_loss_head_B = []\n config.epoch_loss_no_lamb_head_B = []\n\n sub_head = None\n if config.select_sub_head_on_loss:\n sub_head = get_subhead_using_loss(config, dataloaders_head_B, net,\n sobel=True, lamb=config.lamb)\n _ = cluster_eval(config, net,\n mapping_assignment_dataloader=mapping_assignment_dataloader,\n mapping_test_dataloader=mapping_test_dataloader,\n sobel=True,\n use_sub_head=sub_head)\n\n print(\"Pre: time %s: \\n %s\" % (datetime.now(), nice(config.epoch_stats[-1])))\n if config.double_eval:\n print(\"double eval: \\n %s\" % (nice(config.double_eval_stats[-1])))\n sys.stdout.flush()\n next_epoch = 1\n\nfig, axarr = plt.subplots(6 + 2 * int(config.double_eval),\n figsize=(20, 20))\n\n# Train ------------------------------------------------------------------------\n\nfor e_i in range(next_epoch, config.num_epochs):\n print(\"Starting e_i: %d\" % e_i)\n\n if e_i in config.lr_schedule:\n optimiser = update_lr(optimiser, lr_mult=config.lr_mult)\n\n for head_i in range(2):\n head = heads[head_i]\n if head == \"A\":\n dataloaders = dataloaders_head_A\n epoch_loss = config.epoch_loss_head_A\n epoch_loss_no_lamb = config.epoch_loss_no_lamb_head_A\n elif head == \"B\":\n dataloaders = dataloaders_head_B\n epoch_loss = config.epoch_loss_head_B\n epoch_loss_no_lamb = config.epoch_loss_no_lamb_head_B\n\n avg_loss = 0. # over heads and head_epochs (and sub_heads)\n avg_loss_no_lamb = 0.\n avg_loss_count = 0\n\n for head_i_epoch in range(head_epochs[head]):\n sys.stdout.flush()\n\n iterators = (d for d in dataloaders)\n\n b_i = 0\n for tup in zip(*iterators):\n net.module.zero_grad()\n # net.zero_grad()\n\n # one less because this is before sobel\n all_imgs = torch.zeros(config.batch_sz, config.in_channels - 1,\n config.input_sz,\n config.input_sz).cuda()\n all_imgs_tf = torch.zeros(config.batch_sz, config.in_channels - 1,\n config.input_sz,\n config.input_sz).cuda()\n\n imgs_curr = tup[0][0] # always the first\n curr_batch_sz = imgs_curr.size(0)\n for d_i in range(config.num_dataloaders):\n imgs_tf_curr = tup[1 + d_i][0] # from 2nd to last\n assert (curr_batch_sz == imgs_tf_curr.size(0))\n\n actual_batch_start = d_i * curr_batch_sz\n actual_batch_end = actual_batch_start + curr_batch_sz\n all_imgs[actual_batch_start:actual_batch_end, :, :, :] = \\\n imgs_curr.cuda()\n all_imgs_tf[actual_batch_start:actual_batch_end, :, :, :] = \\\n imgs_tf_curr.cuda()\n\n if not (curr_batch_sz == config.dataloader_batch_sz):\n print(\"last batch sz %d\" % curr_batch_sz)\n\n curr_total_batch_sz = curr_batch_sz * config.num_dataloaders\n all_imgs = all_imgs[:curr_total_batch_sz, :, :, :]\n all_imgs_tf = all_imgs_tf[:curr_total_batch_sz, :, :, :]\n\n all_imgs = sobel_process(all_imgs, config.include_rgb)\n all_imgs_tf = sobel_process(all_imgs_tf, config.include_rgb)\n\n x_outs = net(all_imgs, head=head)\n x_tf_outs = net(all_imgs_tf, head=head)\n # x_tf_outs = x_outs\n # x_tf_outs[0] = 1\n\n avg_loss_batch = None # avg over the sub_heads\n avg_loss_no_lamb_batch = None\n for i in range(config.num_sub_heads):\n loss, loss_no_lamb, p_i_j = iid_loss(x_outs[i], x_tf_outs[i], lamb=config.lamb)\n\n if avg_loss_batch is None:\n avg_loss_batch = loss\n avg_loss_no_lamb_batch = loss_no_lamb\n else:\n avg_loss_batch += loss\n avg_loss_no_lamb_batch += loss_no_lamb\n\n avg_loss_batch /= config.num_sub_heads\n avg_loss_no_lamb_batch /= config.num_sub_heads\n\n if ((b_i % 100) == 0) or (e_i == next_epoch and b_i < 10):\n print(\"Model ind %d epoch %d head %s head_i_epoch %d batch %d: avg \"\n \"loss %f avg loss no lamb %f time %s\" %\n (config.model_ind, e_i, head, head_i_epoch, b_i,\n avg_loss_batch.item(), avg_loss_no_lamb_batch.item(),\n datetime.now()))\n # print(\"p_i_j \", p_i_j)\n sys.stdout.flush()\n\n if not np.isfinite(avg_loss_batch.item()):\n print(\"Loss is not finite... %s:\" % str(avg_loss_batch))\n exit(1)\n\n avg_loss += avg_loss_batch.item()\n avg_loss_no_lamb += avg_loss_no_lamb_batch.item()\n avg_loss_count += 1\n\n avg_loss_batch.backward()\n optimiser.step()\n\n b_i += 1\n if b_i == 2 and config.test_code:\n break\n\n avg_loss = float(avg_loss / avg_loss_count)\n avg_loss_no_lamb = float(avg_loss_no_lamb / avg_loss_count)\n\n epoch_loss.append(avg_loss)\n epoch_loss_no_lamb.append(avg_loss_no_lamb)\n\n # Eval -----------------------------------------------------------------------\n\n # Can also pick the subhead using the evaluation process (to do this,\n # set use_sub_head=None)\n sub_head = None\n if config.select_sub_head_on_loss:\n sub_head = get_subhead_using_loss(config, dataloaders_head_B, net,\n sobel=True, lamb=config.lamb)\n is_best = cluster_eval(config, net,\n mapping_assignment_dataloader=mapping_assignment_dataloader,\n mapping_test_dataloader=mapping_test_dataloader,\n sobel=True,\n use_sub_head=sub_head)\n\n print(\"Pre: time %s: \\n %s\" % (datetime.now(), nice(config.epoch_stats[-1])))\n if config.double_eval:\n print(\" double eval: \\n %s\" % (nice(config.double_eval_stats[-1])))\n sys.stdout.flush()\n\n axarr[0].clear()\n axarr[0].plot(config.epoch_acc)\n axarr[0].set_title(\"acc (best), top: %f\" % max(config.epoch_acc))\n\n axarr[1].clear()\n axarr[1].plot(config.epoch_avg_subhead_acc)\n axarr[1].set_title(\"acc (avg), top: %f\" % max(config.epoch_avg_subhead_acc))\n\n axarr[2].clear()\n axarr[2].plot(config.epoch_loss_head_A)\n axarr[2].set_title(\"Loss head A\")\n\n axarr[3].clear()\n axarr[3].plot(config.epoch_loss_no_lamb_head_A)\n axarr[3].set_title(\"Loss no lamb head A\")\n\n axarr[4].clear()\n axarr[4].plot(config.epoch_loss_head_B)\n axarr[4].set_title(\"Loss head B\")\n\n axarr[5].clear()\n axarr[5].plot(config.epoch_loss_no_lamb_head_B)\n axarr[5].set_title(\"Loss no lamb head B\")\n\n if config.double_eval:\n axarr[6].clear()\n axarr[6].plot(config.double_eval_acc)\n axarr[6].set_title(\"double eval acc (best), top: %f\" %\n max(config.double_eval_acc))\n\n axarr[7].clear()\n axarr[7].plot(config.double_eval_avg_subhead_acc)\n axarr[7].set_title(\"double eval acc (avg), top: %f\" %\n max(config.double_eval_avg_subhead_acc))\n\n fig.tight_layout()\n fig.canvas.draw_idle()\n fig.savefig(os.path.join(config.out_dir, \"plots.png\"))\n\n if is_best or (e_i % config.save_freq == 0):\n net.module.cpu()\n # net.cpu()\n\n if e_i % config.save_freq == 0:\n #torch.save(net.state_dict(),\n torch.save(net.module.state_dict(),\n os.path.join(config.out_dir, \"latest_net.pytorch\"))\n torch.save(optimiser.state_dict(),\n os.path.join(config.out_dir, \"latest_optimiser.pytorch\"))\n config.last_epoch = e_i # for last saved version\n\n if is_best:\n # torch.save(net.state_dict(),\n torch.save(net.module.state_dict(),\n os.path.join(config.out_dir, \"best_net.pytorch\"))\n torch.save(optimiser.state_dict(),\n os.path.join(config.out_dir, \"best_optimiser.pytorch\"))\n with open(os.path.join(config.out_dir, \"best_config.pickle\"),\n 'wb') as outfile:\n pickle.dump(config, outfile)\n\n with open(os.path.join(config.out_dir, \"best_config.txt\"),\n \"w\") as text_file:\n text_file.write(\"%s\" % config)\n\n net.module.cuda()\n # net.cuda()\n\n with open(os.path.join(config.out_dir, \"config.pickle\"),\n 'wb') as outfile:\n pickle.dump(config, outfile)\n\n with open(os.path.join(config.out_dir, \"config.txt\"),\n \"w\") as text_file:\n text_file.write(\"%s\" % config)\n\n if config.test_code:\n exit(0)\n"
] |
[
[
"torch.zeros",
"torch.load",
"matplotlib.use",
"torch.nn.DataParallel",
"numpy.array"
]
] |
carlomt/dicom_tools
|
[
"4fc7dae1eadce562894792cae441721deaf01b5f"
] |
[
"dicom_tools/ml_out_roi.py"
] |
[
"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 22 09:46:37 2018\n\n@author: andrea\n\"\"\"\n\nfrom __future__ import division, print_function\n\nimport cv2\nimport numpy as np\n\ntry:\n from keras import backend as K\n K.set_image_data_format('channels_last')\n keras_found = True\nexcept ImportError:\n keras_found = False\n print(\"WARNING ml_out_roi.py: keras not found\")\nimport SimpleITK as sitk\nfrom skimage.transform import resize\nfrom skimage.exposure import equalize_adapthist\n\nfrom dicom_tools.ml_models import *\n#from metrics import *\n\ndef check_import_keras():\n return keras_found\n\ndef get_model(img_rows, img_cols):\n #model = UNet((img_rows, img_cols,1), start_ch=16, depth=5, batchnorm=True, dropout=0.5, maxpool=True, residual=False,upconv=True)\n model = AllDropOut((img_rows, img_cols,1), out_ch=1, start_ch=64, activation='relu', dropout=0.1, batchnorm=True, residual=False)\n #model= InvertedNet((img_rows, img_cols,1), out_ch=1, start_ch=256, activation='elu', dropout=0.1, batchnorm=True, residual=False)\n model.load_weights('weights.h5')\n #model.compile( optimizer=Adam(), loss=dice_coef_loss, metrics=[dice_coef])\n return model\n\ndef single_img_resize(img, img_rows, img_cols, equalize=True):\n\n new_img = np.zeros([img_rows,img_cols])\n \n if equalize:\n img = equalize_adapthist( img, clip_limit=0.05 )\n # img = clahe.apply(cv2.convertScaleAbs(img))\n\n new_img = cv2.resize( img, (img_rows, img_cols), interpolation=cv2.INTER_NEAREST )\n\n return new_img\n\ndef smooth_images(imgs, t_step=0.125, n_iter=5):\n \"\"\"\n Curvature driven image denoising.\n In my experience helps significantly with segmentation.\n \"\"\"\n\n for mm in range(len(imgs)):\n img = sitk.GetImageFromArray(imgs[mm])\n img = sitk.CurvatureFlow(image1=img,\n timeStep=t_step,\n numberOfIterations=n_iter)\n\n imgs[mm] = sitk.GetArrayFromImage(img)\n\n\n return imgs\n\ndef pre_process_img(numpy_imgs):\n \n img_rows=256\n img_cols=256\n numpy_imgs=numpy_imgs[:,:,:,0]\n numpy_imgs=(numpy_imgs-numpy_imgs.min())/(numpy_imgs.max()-numpy_imgs.min())\n \n images=[]\n for i in range(numpy_imgs.shape[0]):\n \n imgs= single_img_resize(numpy_imgs[i,:,:], img_rows, img_cols, equalize=True)\n images.append( imgs )\n\n images=np.array(images).reshape(-1, img_rows, img_cols, 1)\n images = smooth_images(images)\n mu = np.mean(images)\n sigma = np.std(images)\n images = (images - mu)/sigma\n \n return images\n \ndef post_process_img(numpy_imgs,img_rows,img_cols):\n \n \n numpy_imgs=numpy_imgs[:,:,:,0]\n \n images=[]\n for i in range(numpy_imgs.shape[0]):\n \n imgs= single_img_resize(numpy_imgs[i,:,:], img_rows, img_cols, equalize=False)\n images.append( imgs )\n\n images=np.array(images)\n \n \n \n \n return images\n\ndef use_net( X_test,regenerate=True ):\n\n img_rows = X_test.shape[1]\n img_cols = X_test.shape[2]\n\n model = get_model(img_rows, img_cols)\n\n if regenerate:\n y_pred = model.predict( X_test, verbose=1,batch_size=4)\n else:\n y_pred=np.load('last_prediction.npy')\n return y_pred\n\n# if __name__=='__main__':\ndef ml_out_roi(data):\n # X_test_dt = np.load('X_test_dt.npy')\n X_test_dt = data\n X_test_dt=np.transpose(X_test_dt[:,:,::-1,:], (0, 2, 1,3))\n imgs=pre_process_img(X_test_dt)\n \n pred = use_net( imgs,regenerate=True )\n #np.save('last_prediction.npy',pred)\n \n x=post_process_img(pred,X_test_dt.shape[1],X_test_dt.shape[2]) \n # x=np.transpose(x[:,:,::-1,:], (0, 2, 1,3))\n x=np.transpose(x[:,::-1,:], (0, 2, 1))\n\n return x\n \n#NON necessario\n# import scipy.misc\n# for i in range(pred.shape[0]):\n# \n# scipy.misc.toimage( x[i,:,:], cmin=0.0, cmax=1.0).save('pred/outfile_'+str(i)+'.jpg')\n \n \n"
] |
[
[
"numpy.std",
"numpy.mean",
"numpy.transpose",
"numpy.load",
"numpy.array",
"numpy.zeros"
]
] |
Abhay242/language-identification-
|
[
"4b05f6cba588bc4862a3034911407f5f503db0d0"
] |
[
"keras/predict.py"
] |
[
"import argparse\nimport numpy as np\nimport os\nimport sys\nfrom keras.models import load_model\n\nfrom data_loaders.SpectrogramGenerator import SpectrogramGenerator\n\nclass_labels = [\"EN\", \"DE\", \"FR\", \"ES\", \"CN\", \"RU\"]\n\ndef predict(cli_args):\n\n config = {\"pixel_per_second\": 50, \"input_shape\": [129, 500, 1], \"num_classes\": 4}\n data_generator = SpectrogramGenerator(cli_args.input_file, config, shuffle=False, run_only_once=True).get_generator()\n data = [np.divide(image, 255.0) for image in data_generator]\n data = np.stack(data)\n\n # Model Generation\n model = load_model(cli_args.model_dir)\n\n probabilities = model.predict(data)\n\n classes = np.argmax(probabilities, axis=1)\n average_prob = np.mean(probabilities, axis=0)\n average_class = np.argmax(average_prob)\n\n print(classes, class_labels[average_class], average_prob)\n return probabilities\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--model', dest='model_dir', required=True)\n parser.add_argument('--input', dest='input_file', required=True)\n cli_args = parser.parse_args()\n\n if not os.path.isfile(cli_args.input_file):\n sys.exit(\"Input is not a file.\")\n\n\n predict(cli_args)\n"
] |
[
[
"numpy.divide",
"numpy.argmax",
"numpy.mean",
"numpy.stack"
]
] |
nuaa-QK/1_NAS
|
[
"1660a9bb259d6f3844fa34e394921fea7f4183c3"
] |
[
"1_nas/evaluator.py"
] |
[
"import os\r\nimport tensorflow as tf\r\nimport numpy as np\r\n\r\nfrom base import Cell, NetworkItem\r\nfrom info_str import NAS_CONFIG\r\nfrom utils import NAS_LOG, Logger\r\n\r\nimport pickle\r\nimport random\r\nimport sys\r\nimport time\r\nimport copy\r\n\r\nclass DataSet:\r\n\r\n def __init__(self, image_size=32, num_class=10):\r\n self.IMAGE_SIZE = image_size\r\n self.NUM_CLASSES = num_class\r\n self.NUM_EXAMPLES_FOR_TRAIN = 40000\r\n self.NUM_EXAMPLES_FOR_EVAL = 10000\r\n self.task = \"cifar-10\"\r\n self.data_path = './data/'\r\n return\r\n\r\n def inputs(self):\r\n '''\r\n Method for load data\r\n Returns:\r\n train_data, train_label, valid_data, valid_label, test_data, test_label\r\n '''\r\n print(\"======Loading data======\")\r\n if self.task == 'cifar-10':\r\n test_files = ['test_batch']\r\n train_files = ['data_batch_%d' % d for d in range(1, 6)]\r\n else:\r\n train_files = ['train']\r\n test_files = ['test']\r\n train_data, train_label = self._load(train_files)\r\n train_data, train_label, valid_data, valid_label = self._split(\r\n train_data, train_label)\r\n test_data, test_label = self._load(test_files)\r\n print(\"======Data Process Done======\")\r\n return train_data, train_label, valid_data, valid_label, test_data, test_label\r\n\r\n def _load_one(self, file):\r\n with open(file, 'rb') as fo:\r\n batch = pickle.load(fo, encoding='bytes')\r\n data = batch[b'data']\r\n label = batch[b'labels'] if self.task == 'cifar-10' else batch[b'fine_labels']\r\n return data, label\r\n\r\n def _load(self, files):\r\n file_name = 'cifar-10-batches-py' if self.task == 'cifar-10' else 'cifar-100-python'\r\n data_dir = os.path.join(self.data_path, file_name)\r\n data, label = self._load_one(os.path.join(data_dir, files[0]))\r\n for f in files[1:]:\r\n batch_data, batch_label = self._load_one(os.path.join(data_dir, f))\r\n data = np.append(data, batch_data, axis=0)\r\n label = np.append(label, batch_label, axis=0)\r\n label = np.array([[float(i == label)\r\n for i in range(self.NUM_CLASSES)] for label in label])\r\n data = data.reshape([-1, 3, self.IMAGE_SIZE, self.IMAGE_SIZE])\r\n data = data.transpose([0, 2, 3, 1])\r\n # pre-process\r\n data = self._normalize(data)\r\n\r\n return data, label\r\n\r\n def _split(self, data, label):\r\n # shuffle\r\n index = [i for i in range(len(data))]\r\n random.shuffle(index)\r\n data = data[index]\r\n label = label[index]\r\n return data[:self.NUM_EXAMPLES_FOR_TRAIN], label[:self.NUM_EXAMPLES_FOR_TRAIN], \\\r\n data[self.NUM_EXAMPLES_FOR_TRAIN:self.NUM_EXAMPLES_FOR_TRAIN + self.NUM_EXAMPLES_FOR_EVAL], \\\r\n label[self.NUM_EXAMPLES_FOR_TRAIN:self.NUM_EXAMPLES_FOR_TRAIN +\r\n self.NUM_EXAMPLES_FOR_EVAL]\r\n\r\n def _normalize(self, x_train):\r\n x_train = x_train.astype('float32')\r\n\r\n x_train[:, :, :, 0] = (\r\n x_train[:, :, :, 0] - np.mean(x_train[:, :, :, 0])) / np.std(x_train[:, :, :, 0])\r\n x_train[:, :, :, 1] = (\r\n x_train[:, :, :, 1] - np.mean(x_train[:, :, :, 1])) / np.std(x_train[:, :, :, 1])\r\n x_train[:, :, :, 2] = (\r\n x_train[:, :, :, 2] - np.mean(x_train[:, :, :, 2])) / np.std(x_train[:, :, :, 2])\r\n\r\n return x_train\r\n\r\n def process(self, x):\r\n '''\r\n Method for processing data\r\n Args:\r\n x: data needs processing\r\n Returns:\r\n x: data that has been processed\r\n '''\r\n x = self._random_flip_leftright(x)\r\n x = self._random_crop(x, [32, 32], 4)\r\n x = self._cutout(x)\r\n return x\r\n\r\n def _random_crop(self, batch, crop_shape, padding=None):\r\n oshape = np.shape(batch[0])\r\n if padding:\r\n oshape = (oshape[0] + 2 * padding, oshape[1] + 2 * padding)\r\n new_batch = []\r\n npad = ((padding, padding), (padding, padding), (0, 0))\r\n for i in range(len(batch)):\r\n new_batch.append(batch[i])\r\n if padding:\r\n new_batch[i] = np.lib.pad(batch[i], pad_width=npad,\r\n mode='constant', constant_values=0)\r\n nh = random.randint(0, oshape[0] - crop_shape[0])\r\n nw = random.randint(0, oshape[1] - crop_shape[1])\r\n new_batch[i] = new_batch[i][nh:nh + crop_shape[0],\r\n nw:nw + crop_shape[1]]\r\n return np.array(new_batch)\r\n\r\n def _random_flip_leftright(self, batch):\r\n for i in range(len(batch)):\r\n if bool(random.getrandbits(1)):\r\n batch[i] = np.fliplr(batch[i])\r\n return batch\r\n\r\n def _cutout(self, x):\r\n for i in range(len(x)):\r\n cut_size = random.randint(0, self.IMAGE_SIZE // 2)\r\n s = random.randint(0, self.IMAGE_SIZE - cut_size)\r\n x[i, s:s + cut_size, s:s + cut_size, :] = 0\r\n return x\r\n\r\n\r\nclass Evaluator:\r\n def __init__(self):\r\n image_size = 32\r\n num_class = 10\r\n self.data_set = DataSet(image_size, num_class)\r\n # don't change the parameters below\r\n os.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\r\n self.block_num = 0\r\n self.log = ''\r\n self.model_path = \"./model\"\r\n\r\n # change the value of parameters below\r\n self.batch_size = 50\r\n self.input_shape = [self.batch_size, image_size, image_size, 3]\r\n self.output_shape = [self.batch_size, num_class]\r\n self.train_data, self.train_label, self.valid_data, self.valid_label, self.test_data, self.test_label = self.data_set.inputs()\r\n\r\n self.INITIAL_LEARNING_RATE = 0.025\r\n self.weight_decay = 0.0003\r\n self.momentum_rate = 0.9\r\n\r\n def _set_epoch(self, e):\r\n self.epoch = e\r\n return\r\n\r\n def _inference(self, images, graph_part, cell_list, train_flag):\r\n '''Method for recovering the network model provided by graph_part and cellist.\r\n Args:\r\n images: Images returned from Dataset() or inputs().\r\n graph_part: The topology structure of th network given by adjacency table\r\n cellist:\r\n Returns:\r\n Logits.'''\r\n topo_order = self._toposort(graph_part)\r\n nodelen = len(graph_part)\r\n # input list for every cell in network\r\n inputs = [images for _ in range(nodelen)]\r\n # bool list for whether this cell has already got input or not\r\n getinput = [False for _ in range(nodelen)]\r\n getinput[0] = True\r\n\r\n for node in topo_order:\r\n layer = self._make_layer(inputs[node], cell_list[node], node, train_flag)\r\n\r\n # update inputs information of the cells below this cell\r\n for j in graph_part[node]:\r\n if getinput[j]: # if this cell already got inputs from other cells precedes it\r\n inputs[j] = self._pad(inputs[j], layer)\r\n else:\r\n inputs[j] = layer\r\n getinput[j] = True\r\n\r\n # give last layer a name\r\n last_layer = tf.identity(layer, name=\"last_layer\" + str(self.block_num))\r\n return last_layer\r\n\r\n def _toposort(self, graph):\r\n node_len = len(graph)\r\n in_degrees = dict((u, 0) for u in range(node_len))\r\n for u in range(node_len):\r\n for v in graph[u]:\r\n in_degrees[v] += 1\r\n queue = [u for u in range(node_len) if in_degrees[u] == 0]\r\n result = []\r\n while queue:\r\n u = queue.pop()\r\n result.append(u)\r\n for v in graph[u]:\r\n in_degrees[v] -= 1\r\n if in_degrees[v] == 0:\r\n queue.append(v)\r\n return result\r\n\r\n def _make_layer(self, inputs, cell, node, train_flag):\r\n '''Method for constructing and calculating cell in tensorflow\r\n Args:\r\n inputs: the input tensor of this operation\r\n cell: Class Cell(), hyper parameters for building this layer\r\n node: int, the index of this operation\r\n train_flag: boolean, indicating whether this is a training process or not\r\n Returns:\r\n layer: tensor.'''\r\n if cell.type == 'conv':\r\n layer = self._makeconv(inputs, cell, node, train_flag)\r\n elif cell.type == 'pooling':\r\n layer = self._makepool(inputs, cell)\r\n elif cell.type == 'id':\r\n layer = tf.identity(inputs)\r\n elif cell.type == 'sep_conv':\r\n layer = self._makesep_conv(inputs, cell, node, train_flag)\r\n # TODO add any other new operations here\r\n # use the form as shown above\r\n # '''elif cell.type == 'operation_name':\r\n # layer = self._name_your_function_here(inputs, cell, node)'''\r\n # The \"_name_your_function_here\" is a function take (inputs, cell, node) or any other needed parameter as\r\n # input, and output the corresponding tensor calculated use tensorflow, see self._makeconv as an example.\r\n # The \"inputs\" is the input tensor, and \"cell\" is the hyper parameters for building this layer, given by\r\n # class Cell(). The \"node\" is the index of this layer, mainly for the nomination of the output tensor.\r\n else:\r\n assert False, \"Wrong cell type!\"\r\n\r\n return layer\r\n\r\n def _name_your_function_here(self, inputs, cell, node):\r\n \"\"\"\r\n the operation defined by user,\r\n Args:\r\n inputs: the input tensor of this operation\r\n cell: Class Cell(), hyper parameters for building this layer\r\n node: int, the index of this operation\r\n Returns:\r\n layer: the output tensor\r\n \"\"\"\r\n # TODO add your function here if any new operation was added, see _makeconv as an example\r\n return\r\n\r\n def _makeconv(self, x, hplist, node, train_flag):\r\n \"\"\"Generates a convolutional layer according to information in hplist\r\n Args:\r\n x: inputing data.\r\n hplist: hyperparameters for building this layer\r\n node: int, the index of this operation\r\n Returns:\r\n conv_layer: the output tensor\r\n \"\"\"\r\n with tf.variable_scope('block' + str(self.block_num) + 'conv' + str(node)) as scope:\r\n inputdim = x.shape[3]\r\n kernel = self._get_variable('weights',\r\n shape=[hplist.kernel_size, hplist.kernel_size, inputdim, hplist.filter_size])\r\n x = self._activation_layer(hplist.activation, x, scope)\r\n x = tf.nn.conv2d(x, kernel, [1, 1, 1, 1], padding='SAME')\r\n biases = self._get_variable('biases', hplist.filter_size)\r\n x = self._batch_norm(tf.nn.bias_add(x, biases), train_flag)\r\n return x\r\n\r\n def _makesep_conv(self, inputs, hplist, node, train_flag):\r\n with tf.variable_scope('block' + str(self.block_num) + 'conv' + str(node)) as scope:\r\n inputdim = inputs.shape[3]\r\n dfilter = self._get_variable('weights', shape=[hplist.kernel_size, hplist.kernel_size, inputdim, 1])\r\n pfilter = self._get_variable('pointwise_filter', [1, 1, inputdim, hplist.filter_size])\r\n conv = tf.nn.separable_conv2d(inputs, dfilter, pfilter, strides=[1, 1, 1, 1], padding='SAME')\r\n biases = self._get_variable('biases', hplist.filter_size)\r\n bn = self._batch_norm(tf.nn.bias_add(conv, biases), train_flag)\r\n conv_layer = self._activation_layer(hplist.activation, bn, scope)\r\n return conv_layer\r\n\r\n def _batch_norm(self, input, train_flag):\r\n return tf.contrib.layers.batch_norm(input, decay=0.9, center=True, scale=True, epsilon=1e-3,\r\n updates_collections=None, is_training=train_flag)\r\n\r\n def _get_variable(self, name, shape):\r\n if name == \"weights\":\r\n ini = tf.contrib.keras.initializers.he_normal()\r\n else:\r\n ini = tf.constant_initializer(0.0)\r\n return tf.get_variable(name, shape, initializer=ini)\r\n\r\n def _activation_layer(self, type, inputs, scope):\r\n if type == 'relu':\r\n layer = tf.nn.relu(inputs, name=scope.name)\r\n elif type == 'relu6':\r\n layer = tf.nn.relu6(inputs, name=scope.name)\r\n elif type == 'tanh':\r\n layer = tf.tanh(inputs, name=scope.name)\r\n elif type == 'sigmoid':\r\n layer = tf.sigmoid(inputs, name=scope.name)\r\n elif type == 'leakyrelu':\r\n layer = tf.nn.leaky_relu(inputs, name=scope.name)\r\n else:\r\n layer = tf.identity(inputs, name=scope.name)\r\n\r\n return layer\r\n\r\n def _makepool(self, inputs, hplist):\r\n \"\"\"Generates a pooling layer according to information in hplist\r\n Args:\r\n inputs: inputing data.\r\n hplist: hyperparameters for building this layer\r\n Returns:\r\n tensor.\r\n \"\"\"\r\n if hplist.pooling_type == 'avg':\r\n return tf.nn.avg_pool(inputs, ksize=[1, hplist.kernel_size, hplist.kernel_size, 1],\r\n strides=[1, hplist.kernel_size, hplist.kernel_size, 1], padding='SAME')\r\n elif hplist.pooling_type == 'max':\r\n return tf.nn.max_pool(inputs, ksize=[1, hplist.kernel_size, hplist.kernel_size, 1],\r\n strides=[1, hplist.kernel_size, hplist.kernel_size, 1], padding='SAME')\r\n elif hplist.pooling_type == 'global':\r\n return tf.reduce_mean(inputs, [1, 2], keep_dims=True)\r\n\r\n def _makedense(self, inputs, hplist):\r\n \"\"\"Generates dense layers according to information in hplist\r\n Args:\r\n inputs: inputing data.\r\n hplist: hyperparameters for building layers\r\n node: number of this cell\r\n Returns:\r\n tensor.\r\n \"\"\"\r\n inputs = tf.reshape(inputs, [self.batch_size, -1])\r\n\r\n for i, neural_num in enumerate(hplist[1]):\r\n with tf.variable_scope('block' + str(self.block_num) + 'dense' + str(i)) as scope:\r\n weights = self._get_variable('weights', shape=[inputs.shape[-1], neural_num])\r\n biases = self._get_variable('biases', [neural_num])\r\n mul = tf.matmul(inputs, weights) + biases\r\n if neural_num == self.output_shape[-1]:\r\n local3 = self._activation_layer('', mul, scope)\r\n else:\r\n local3 = self._activation_layer(hplist[2], mul, scope)\r\n inputs = local3\r\n return inputs\r\n\r\n def _pad(self, inputs, layer):\r\n # padding\r\n a = tf.shape(layer)[1]\r\n b = tf.shape(inputs)[1]\r\n pad = tf.abs(tf.subtract(a, b))\r\n output = tf.where(tf.greater(a, b), tf.concat([tf.pad(inputs, [[0, 0], [0, pad], [0, pad], [0, 0]]), layer], 3),\r\n tf.concat([inputs, tf.pad(layer, [[0, 0], [0, pad], [0, pad], [0, 0]])], 3))\r\n return output\r\n\r\n def evaluate(self, network, pre_block=[], is_bestNN=False, update_pre_weight=False):\r\n '''Method for evaluate the given network.\r\n Args:\r\n network: NetworkItem()\r\n pre_block: The pre-block structure, every block has two parts: graph_part and cell_list of this block.\r\n is_bestNN: Symbol for indicating whether the evaluating network is the best network of this round, default False.\r\n update_pre_weight: Symbol for indicating whether to update previous blocks' weight, default by False.\r\n Returns:\r\n Accuracy'''\r\n assert self.train_num >= self.batch_size\r\n tf.reset_default_graph()\r\n self.block_num = len(pre_block)\r\n\r\n self.log = \"-\" * 20 + str(network.id) + \"-\" * 20 + '\\n'\r\n for block in pre_block:\r\n self.log = self.log + str(block.graph) + str(block.cell_list) + '\\n'\r\n self.log = self.log + str(network.graph) + str(network.cell_list) + '\\n'\r\n\r\n config = tf.ConfigProto()\r\n config.gpu_options.allow_growth = True\r\n with tf.Session(config=config) as sess:\r\n data_x, data_y, block_input, train_flag = self._get_input(sess, pre_block, update_pre_weight)\r\n\r\n graph_full, cell_list = self._recode(network.graph, network.cell_list,\r\n NAS_CONFIG['nas_main']['repeat_num'])\r\n # a pooling layer for last repeat block\r\n graph_full = graph_full + [[]]\r\n if NAS_CONFIG['nas_main']['link_node']:\r\n # a pooling layer for last repeat block\r\n cell_list = cell_list + [Cell('pooling', 'max', 2)]\r\n else:\r\n cell_list = cell_list + [Cell('id', 'max', 1)]\r\n logits = self._inference(block_input, graph_full, cell_list, train_flag)\r\n\r\n precision, log = self._eval(sess, logits, data_x, data_y, train_flag)\r\n self.log += log\r\n\r\n saver = tf.train.Saver(tf.global_variables())\r\n\r\n if is_bestNN: # save model\r\n if not os.path.exists(os.path.join(self.model_path)):\r\n os.makedirs(os.path.join(self.model_path))\r\n saver.save(sess, os.path.join(self.model_path, 'model' + str(network.id)))\r\n NAS_LOG = Logger()\r\n NAS_LOG << ('eva_eva', self.log)\r\n return precision\r\n\r\n def retrain(self, pre_block):\r\n '''\r\n Method for retrain the whole network\r\n :param pre_block:\r\n :return:\r\n '''\r\n tf.reset_default_graph()\r\n self.train_num = 50000\r\n self.block_num = len(pre_block) * NAS_CONFIG['nas_main']['repeat_num']\r\n\r\n retrain_log = \"-\" * 20 + \"retrain\" + \"-\" * 20 + '\\n'\r\n\r\n data_x, labels, block_input, train_flag = self._get_input('', [])\r\n for block in pre_block:\r\n self.block_num += 1\r\n cell_list = []\r\n for cell in block.cell_list:\r\n if cell.type == 'conv':\r\n cell_list.append(Cell(cell.type, cell.filter_size * 2, cell.kernel_size, cell.activation))\r\n else:\r\n cell_list.append(cell)\r\n # repeat search\r\n graph_full, cell_list = self._recode(block.graph, block.cell_list, NAS_CONFIG['nas_main']['repeat_num'])\r\n # add pooling layer only in last repeat block\r\n cell_list.append(Cell('pooling', 'max', 2))\r\n graph_full.append([])\r\n retrain_log = retrain_log + str(graph_full) + str(cell_list) + '\\n'\r\n block_input = self._inference(block_input, graph_full, cell_list, train_flag)\r\n\r\n sess = tf.Session()\r\n precision, log = self._eval(sess, block_input, data_x, labels, train_flag, retrain=True)\r\n sess.close()\r\n retrain_log += log\r\n NAS_LOG = Logger()\r\n NAS_LOG << ('eva_eva', retrain_log)\r\n return float(precision)\r\n\r\n def _get_input(self, sess, pre_block, update_pre_weight=False):\r\n '''Get input for _inference'''\r\n # if it got previous blocks\r\n if len(pre_block) > 0:\r\n tmp = os.path.join(self.model_path, 'model' + str(pre_block[-1].id) + '.meta')\r\n assert os.path.exists(tmp)\r\n new_saver = tf.train.import_meta_graph(tmp)\r\n new_saver.restore(sess, os.path.join(\r\n self.model_path, 'model' + str(pre_block[-1].id)))\r\n graph = tf.get_default_graph()\r\n data_x = graph.get_tensor_by_name(\"input:0\")\r\n data_y = graph.get_tensor_by_name(\"label:0\")\r\n train_flag = graph.get_tensor_by_name(\"train_flag:0\")\r\n block_input = graph.get_tensor_by_name(\"last_layer\" + str(self.block_num - 1) + \":0\")\r\n # only when there's not so many network in the pool will we update the previous blocks' weight\r\n if not update_pre_weight:\r\n block_input = tf.stop_gradient(block_input, name=\"stop_gradient\")\r\n # if it's the first block\r\n else:\r\n data_x = tf.placeholder(tf.float32, self.input_shape, name='input')\r\n data_y = tf.placeholder(tf.int32, self.output_shape, name=\"label\")\r\n train_flag = tf.placeholder(tf.bool, name='train_flag')\r\n block_input = tf.identity(data_x)\r\n return data_x, data_y, block_input, train_flag\r\n\r\n def _recode(self, graph_full, cell_list, repeat_num):\r\n new_graph = [] + graph_full\r\n new_cell_list = [] + cell_list\r\n add = 0\r\n for i in range(repeat_num - 1):\r\n new_cell_list += cell_list\r\n add += len(graph_full)\r\n for sub_list in graph_full:\r\n new_graph.append([x + add for x in sub_list])\r\n return new_graph, new_cell_list\r\n\r\n def _eval(self, sess, logits, data_x, data_y, train_flag, retrain=False):\r\n # TODO change here to run training step and evaluation step\r\n \"\"\"\r\n The actual training process, including the definination of loss and train optimizer\r\n Args:\r\n sess: tensorflow session\r\n logits: output tensor of the model, 2-D tensor of shape [self.batch_size, self.NUM_CLASS]\r\n data_x: input image\r\n data_y: input label, 2-D tensor of shape [self.batch_size, self.NUM_CLASS]\r\n Returns:\r\n targets: float, the optimization target, could be the accuracy or the combination of both time and accuracy, etc\r\n saver: Tensorflow Saver class\r\n log: string, log to be write and saved\r\n \"\"\"\r\n logits = tf.nn.dropout(logits, keep_prob=1.0)\r\n logits = self._makedense(logits, ('', [self.output_shape[-1]], ''))\r\n global_step = tf.Variable(0, trainable=False, name='global_step' + str(self.block_num))\r\n accuracy = self._cal_accuracy(logits, data_y)\r\n loss = self._loss(data_y, logits)\r\n train_op = self._train_op(global_step, loss)\r\n\r\n sess.run(tf.global_variables_initializer())\r\n\r\n if retrain:\r\n self.train_data = np.concatenate(\r\n (np.array(self.train_data), np.array(self.valid_data)), axis=0).tolist()\r\n self.train_label = np.concatenate(\r\n (np.array(self.train_label), np.array(self.valid_label)), axis=0).tolist()\r\n max_steps = len(list(self.train_label)) // self.batch_size\r\n test_data = copy.deepcopy(self.test_data)\r\n test_label = copy.deepcopy(self.test_label)\r\n num_iter = len(test_label) // self.batch_size\r\n else:\r\n max_steps = self.train_num // self.batch_size\r\n test_data = copy.deepcopy(self.valid_data)\r\n test_label = copy.deepcopy(self.valid_label)\r\n num_iter = len(self.valid_label) // self.batch_size\r\n\r\n log = ''\r\n cost_time = 0\r\n precision = np.zeros([self.epoch])\r\n for ep in range(self.epoch):\r\n # print(\"epoch\", ep, \":\")\r\n # train step\r\n start_time = time.time()\r\n for step in range(max_steps):\r\n batch_x = self.train_data[step * self.batch_size:(step + 1) * self.batch_size]\r\n batch_y = self.train_label[step * self.batch_size:(step + 1) * self.batch_size]\r\n batch_x = DataSet().process(batch_x)\r\n _, loss_value, acc = sess.run([train_op, loss, accuracy],\r\n feed_dict={data_x: batch_x, data_y: batch_y, train_flag: True})\r\n if np.isnan(loss_value):\r\n return -1, log\r\n # sys.stdout.write(\"\\r>> train %d/%d loss %.4f acc %.4f\" % (step, max_steps, loss_value, acc))\r\n # sys.stdout.write(\"\\n\")\r\n\r\n # evaluation step\r\n for step in range(num_iter):\r\n batch_x = test_data[step *\r\n self.batch_size:(step + 1) * self.batch_size]\r\n batch_y = test_label[step *\r\n self.batch_size:(step + 1) * self.batch_size]\r\n l, acc_ = sess.run([loss, accuracy],\r\n feed_dict={data_x: batch_x, data_y: batch_y, train_flag: False})\r\n precision[ep] += acc_ / num_iter\r\n # sys.stdout.write(\"\\r>> valid %d/%d loss %.4f acc %.4f\" % (step, num_iter, l, acc_))\r\n # sys.stdout.write(\"\\n\")\r\n\r\n # early stop\r\n if ep > 5 and not retrain:\r\n if abs(2 * precision[ep] - precision[ep - 5] - precision[ep - 1]) < 0.001 / DataSet().NUM_CLASSES:\r\n precision = precision[:ep]\r\n log += 'early stop at %d epoch\\n' % ep\r\n break\r\n\r\n cost_time += (float(time.time() - start_time)) / self.epoch\r\n log += 'epoch %d: precision = %.3f, cost time %.3f\\n' % (ep, precision[ep], float(time.time() - start_time))\r\n # print('precision = %.3f, cost time %.3f' %\r\n # (precision[ep], float(time.time() - start_time)))\r\n\r\n # target = self._cal_multi_target(precision[-1], cost_time)\r\n return precision[-1], log\r\n\r\n def _cal_accuracy(self, logits, labels):\r\n \"\"\"\r\n calculate the target of this task\r\n Args:\r\n logits: Logits from softmax.\r\n labels: Labels from distorted_inputs or inputs(). 2-D tensor of shape [self.batch_size, self.NUM_CLASS]\r\n Returns:\r\n Target tensor of type float.\r\n \"\"\"\r\n correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))\r\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\r\n return accuracy\r\n\r\n def _loss(self, labels, logits):\r\n \"\"\"\r\n Args:\r\n logits: Logits from softmax.\r\n labels: Labels from distorted_inputs or inputs(). 1-D tensor of shape [self.batch_size]\r\n Returns:\r\n Loss tensor of type float.\r\n \"\"\"\r\n cross_entropy = tf.reduce_mean(\r\n tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits))\r\n l2 = tf.add_n([tf.nn.l2_loss(var) for var in tf.trainable_variables()])\r\n loss = cross_entropy + l2 * self.weight_decay\r\n return loss\r\n\r\n def _train_op(self, global_step, loss):\r\n num_batches_per_epoch = self.train_num / self.batch_size\r\n decay_steps = int(num_batches_per_epoch * self.epoch)\r\n lr = tf.train.cosine_decay(self.INITIAL_LEARNING_RATE, global_step, decay_steps, 0.0001)\r\n\r\n # Build a Graph that trains the model with one batch of examples and\r\n # updates the model parameters.\r\n opt = tf.train.MomentumOptimizer(lr, self.momentum_rate, name='Momentum' + str(self.block_num),\r\n use_nesterov=True)\r\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\r\n with tf.control_dependencies(update_ops):\r\n train_op = opt.minimize(loss, global_step=global_step)\r\n return train_op\r\n\r\n def _stats_graph(self):\r\n graph = tf.get_default_graph()\r\n flops = tf.profiler.profile(graph, options=tf.profiler.ProfileOptionBuilder.float_operation())\r\n params = tf.profiler.profile(graph, options=tf.profiler.ProfileOptionBuilder.trainable_variables_parameter())\r\n return flops.total_float_ops, params.total_parameters\r\n\r\n def _cal_multi_target(self, precision, time):\r\n flops, model_size = self._stats_graph()\r\n return precision + 1 / time + 1 / flops + 1 / model_size\r\n\r\n def _set_data_size(self, num):\r\n if num > len(list(self.train_label)) or num < 0:\r\n num = len(list(self.train_label))\r\n print('Warning! Data size has been changed to', num, ', all data is loaded.')\r\n self.train_num = num\r\n # print('************A NEW ROUND************')\r\n self.max_steps = self.train_num // self.batch_size\r\n return\r\n\r\n\r\nif __name__ == '__main__':\r\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\r\n eval = Evaluator()\r\n eval._set_data_size(1000)\r\n eval._set_epoch(10)\r\n # graph_full = [[1], [2], [3], []]\r\n # cell_list = [Cell('conv', 64, 5, 'relu'), Cell('pooling', 'max', 3), Cell('conv', 64, 5, 'relu'),\r\n # Cell('pooling', 'max', 3)]\r\n # lenet = NetworkItem(0, graph_full, cell_list, \"\")\r\n # e = eval.evaluate(lenet, [], is_bestNN=True)\r\n # Network.pre_block.append(lenet)\r\n\r\n # graph_full = [[1, 3], [2, 3], [3], [4]]\r\n # cell_list = [Cell('conv', 24, 3, 'relu'), Cell('conv', 32, 3, 'relu'), Cell('conv', 24, 3, 'relu'),\r\n # Cell('conv', 32, 3, 'relu')]\r\n graph_full = [[1, 3], [2, 4], [4], [2]]\r\n cell_list = [Cell('sep_conv', 32, 5, 'relu6'), Cell('sep_conv', 32, 3, 'relu6'), Cell('pooling', 'avg', 3),\r\n Cell('pooling', 'avg', 8)]\r\n network1 = NetworkItem(0, graph_full, cell_list, \"\")\r\n network2 = NetworkItem(1, graph_full, cell_list, \"\")\r\n e = eval.evaluate(network1, is_bestNN=True)\r\n print(e)\r\n # eval.set_data_size(10000)\r\n # e = eval.evaluate(network2, [network1], is_bestNN=True)\r\n # print(e)\r\n eval._set_epoch(1)\r\n print(eval.retrain([network1, network2]))\r\n # eval.add_data(5000)\r\n # print(eval._toposort([[1, 3, 6, 7], [2, 3, 4], [3, 5, 7, 8], [\r\n # 4, 5, 6, 8], [5, 7], [6, 7, 9, 10], [7, 9], [8], [9, 10], [10]]))\r\n # cellist=[('conv', 128, 1, 'relu'), ('conv', 32, 1, 'relu'), ('conv', 256, 1, 'relu'), ('pooling', 'max', 2), ('pooling', 'global', 3), ('conv', 32, 1, 'relu')]\r\n # cellist=[('pooling', 'global', 2), ('pooling', 'max', 3), ('conv', 21, 32, 'leakyrelu'), ('conv', 16, 32, 'leakyrelu'), ('pooling', 'max', 3), ('conv', 16, 32, 'leakyrelu')]\r\n # graph_part = [[1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11], [12], [13], [14], [15], [16], [17], []]\r\n # cell_list = [('conv', 64, 3, 'relu'), ('conv', 64, 3, 'relu'), ('pooling', 'max', 2), ('conv', 128, 3, 'relu'),\r\n # ('conv', 128, 3, 'relu'), ('pooling', 'max', 2), ('conv', 256, 3, 'relu'),\r\n # ('conv', 256, 3, 'relu'), ('conv', 256, 3, 'relu'), ('pooling', 'max', 2),\r\n # ('conv', 512, 3, 'relu'), ('conv', 512, 3, 'relu'), ('conv', 512, 3, 'relu'),\r\n # ('pooling', 'max', 2), ('conv', 512, 3, 'relu'), ('conv', 512, 3, 'relu'),\r\n # ('conv', 512, 3, 'relu'), ('dense', [4096, 4096, 1000], 'relu')]\r\n # pre_block = [network]\r\n # Network.pre_block.append(network1)\r\n # network2 = NetworkItem(1, graph_full, cell_list, \"\")\r\n # e = eval.evaluate(network2, is_bestNN=True)\r\n # Network.pre_block.append(network2)\r\n # network3 = NetworkItem(2, graph_full, cell_list, \"\")\r\n # e = eval.evaluate(network3, is_bestNN=True)\r\n # e=eval.train(network.graph_full,cellist)\r\n # print(e)\r\n"
] |
[
[
"tensorflow.get_variable",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.contrib.keras.initializers.he_normal",
"tensorflow.control_dependencies",
"tensorflow.nn.max_pool",
"tensorflow.profiler.ProfileOptionBuilder.trainable_variables_parameter",
"tensorflow.cast",
"tensorflow.global_variables",
"tensorflow.nn.l2_loss",
"tensorflow.tanh",
"numpy.mean",
"tensorflow.pad",
"tensorflow.profiler.ProfileOptionBuilder.float_operation",
"tensorflow.get_default_graph",
"tensorflow.nn.conv2d",
"tensorflow.train.cosine_decay",
"tensorflow.greater",
"tensorflow.get_collection",
"numpy.fliplr",
"tensorflow.train.import_meta_graph",
"tensorflow.subtract",
"tensorflow.ConfigProto",
"tensorflow.stop_gradient",
"numpy.std",
"tensorflow.reset_default_graph",
"tensorflow.Session",
"tensorflow.trainable_variables",
"tensorflow.argmax",
"numpy.zeros",
"tensorflow.nn.dropout",
"numpy.lib.pad",
"tensorflow.matmul",
"tensorflow.shape",
"numpy.isnan",
"tensorflow.identity",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"numpy.append",
"tensorflow.nn.avg_pool",
"tensorflow.contrib.layers.batch_norm",
"numpy.array",
"tensorflow.nn.leaky_relu",
"tensorflow.nn.bias_add",
"tensorflow.nn.relu",
"tensorflow.nn.relu6",
"tensorflow.reduce_mean",
"tensorflow.reshape",
"tensorflow.nn.separable_conv2d",
"tensorflow.sigmoid",
"tensorflow.constant_initializer",
"numpy.shape"
]
] |
metacogpe/python
|
[
"f7b2bcd0fae9ae9899399e3a314ffea3955896b0"
] |
[
"crypto/bitcoinScraping.py"
] |
[
"import requests # python -m pip install requests\nimport datetime \n \nr = requests.get(\"https://api.korbit.co.kr/v1/ticker/detailed?currency_pair=btc_krw\")\nbitcoin = r.json() \n\ntimestamp = bitcoin['timestamp'] \ndate = datetime.datetime.fromtimestamp(timestamp/1000)\nprint(date)\nprint(bitcoin['bid'])\nprint(bitcoin['ask'])\nprint(bitcoin['volume'])\nprint(r.text)\n\n# pandas series 사용 이유\nfrom pandas import Series # python -m pip install pandas\ns = Series([100, 200, 300, 400])\nprint(s /10) # list 와 달리 사칙 연산 가능\n\n\n# dataframe \nfrom pandas import DataFrame\ndata = {\"open\": [737, 750], \"high\": [755, 780], \"low\": [700, 710], \"close\": [750, 770]} \ndf = DataFrame(data , index=[\"2018-01-01\", \"2018-01-02\"]) \nprint(df)\n\n\n\nimport requests\nimport pandas as pd\nmy_url=\"https://stockplus.com/m/stocks/KOREA-A005930/analysis\"\ntable=pd.read_html(my_url)\n \n\n \nimport pandas as pd \nfrom bs4 import BeautifulSoup as bs\nurl = \"https://finance.naver.com/item/sise_day.nhn?code=066570\" \n\nresponse = requests.get(url, headers=headers)\n\nhtml = bs(response.text, \"lxml\")\nhtml_table = html.select(\"table\")\nlen(html_table)\n\ndf = pd.read_html(url) \n#print(df[0])"
] |
[
[
"pandas.Series",
"pandas.DataFrame",
"pandas.read_html"
]
] |
Ushk/fourier-feature-networks
|
[
"af4947e137e31c5e3a887d800f1995485414297d"
] |
[
"demo.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torchvision\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom dataset import ImageDataset\n\n\nclass Swish(nn.Module):\n\n def __init__(self):\n super().__init__()\n\n def forward(self, x):\n return x * torch.sigmoid(x)\n\n\nclass SirenLayer(nn.Module):\n def __init__(self, in_f, out_f, w0=30, is_first=False, is_last=False):\n super().__init__()\n self.in_f = in_f\n self.w0 = w0\n self.linear = nn.Linear(in_f, out_f)\n self.is_first = is_first\n self.is_last = is_last\n self.init_weights()\n\n def init_weights(self):\n b = 1 / \\\n self.in_f if self.is_first else np.sqrt(6 / self.in_f) / self.w0\n with torch.no_grad():\n self.linear.weight.uniform_(-b, b)\n\n def forward(self, x):\n x = self.linear(x)\n return x if self.is_last else torch.sin(self.w0 * x)\n\n\ndef input_mapping(x, B):\n if B is None:\n return x\n else:\n x_proj = (2. * np.pi * x) @ B.t()\n return torch.cat([torch.sin(x_proj), torch.cos(x_proj)], dim=-1)\n\n\ndef make_network(num_layers, input_dim, hidden_dim):\n layers = [nn.Linear(input_dim, hidden_dim), Swish()]\n for i in range(1, num_layers - 1):\n layers.append(nn.Linear(hidden_dim, hidden_dim))\n layers.append(Swish())\n\n layers.append(nn.Linear(hidden_dim, 3))\n layers.append(nn.Sigmoid())\n return nn.Sequential(*layers)\n\n\ndef gon_model(num_layers, input_dim, hidden_dim):\n layers = [SirenLayer(input_dim, hidden_dim, is_first=True)]\n for i in range(1, num_layers - 1):\n layers.append(SirenLayer(hidden_dim, hidden_dim))\n layers.append(SirenLayer(hidden_dim, 3, is_last=True))\n\n return nn.Sequential(*layers)\n\n\ndef train_model(network_size, learning_rate, iters, B, train_data, test_data, device=\"cpu\"):\n model = gon_model(*network_size).to(device)\n\n optim = torch.optim.Adam(model.parameters(), lr=learning_rate)\n loss_fn = torch.nn.MSELoss()\n\n train_psnrs = []\n test_psnrs = []\n xs = []\n for i in range(iters):\n #for i in tqdm(range(iters), desc='train iter', leave=False):\n model.train()\n optim.zero_grad()\n\n t_o = model(input_mapping(train_data[0], B))\n t_loss = .5 * loss_fn(t_o, train_data[1])\n\n t_loss.backward()\n optim.step()\n\n # print(f\"---[steps: {i}]: train loss: {t_loss.item():.6f}\")\n\n train_psnrs.append(- 10 * torch.log10(2 * t_loss).item())\n\n if i % 25 == 0:\n model.eval()\n with torch.no_grad():\n v_o = model(input_mapping(test_data[0], B))\n save_img = torch.zeros((1,3,256,256)).to(device)\n test_inds = (test_data[0]*255).round().long()\n save_img[0, :, test_inds[:,0], test_inds[:,1]] = v_o.T\n \n #v_loss = loss_fn(v_o, test_data[1])\n #v_psnrs = - 10 * torch.log10(2 * v_loss).item()\n #test_psnrs.append(v_psnrs)\n #xs.append(i)\n torchvision.utils.save_image(save_img, f\"imgs/{i}.jpeg\")\n # print(f\"---[steps: {i}]: valid loss: {v_loss.item():.6f}\")\n\n return {\n 'state': model.state_dict(),\n 'train_psnrs': train_psnrs,\n 'test_psnrs': test_psnrs,\n }\n\n\nif __name__ == '__main__':\n device = \"cuda:0\"\n\n network_size = (4, 512, 256)\n learning_rate = 1e-4\n iters = 250\n mapping_size = 256\n\n B_gauss = torch.randn((mapping_size, 2)).to(device) * 10\n\n ds = ImageDataset(\"data/fox.jpg\", 512)\n\n grid, image = ds[0]\n grid = grid.unsqueeze(0).to(device)\n image = image.unsqueeze(0).to(device)\n\n test_data = (grid, image)\n train_data = (grid[:, ::2, ::2], image[:, ::2, :: 2])\n\n output = train_model(network_size, learning_rate, iters, B_gauss,\n train_data=train_data, test_data=(grid, image), device=device)\n"
] |
[
[
"torch.nn.Sequential",
"torch.sigmoid",
"numpy.sqrt",
"torch.sin",
"torch.zeros",
"torch.randn",
"torch.nn.Sigmoid",
"torch.nn.Linear",
"torch.no_grad",
"torch.log10",
"torch.nn.MSELoss",
"torch.cos"
]
] |
jpvelsamy/hotdog
|
[
"df45cdc0b9e6abfecd16a43f75f1671e51cbc47c"
] |
[
"dataingestion.py"
] |
[
"import pandas as pd\nimport logging\n\nlogger = logging.getLogger(\"ACE\")\n\n\nclass DataIngestion:\n\n def __init__(self, file_name):\n self.file_name = file_name\n\n def prepUp(self):\n try:\n data = pd.read_csv(self.file_name)\n logger.info(f'column listing #{data.columns}')\n except(RuntimeError) as error:\n logger.error(\"Error preparing data \", error.original_traceback)\n pass\n"
] |
[
[
"pandas.read_csv"
]
] |
ashley062190/pycpa_taskchain
|
[
"4274371b90407fe9715ca2d5d5793bf4736f53e2"
] |
[
"experiments/plot_hist_compare.py"
] |
[
"#!/usr/bin/env python\nimport matplotlib.pyplot as pyplot\nimport matplotlib\n\nimport numpy as np\nimport argparse\nimport csv\n\nparser = argparse.ArgumentParser(description='Print statistics of path latency results.')\nparser.add_argument('file', metavar='csv_file', type=str, \n help='csv file to be processed')\nparser.add_argument('--output', type=str,\n help='Output format/file')\nparser.add_argument('--delimiter', default='\\t', type=str,\n help='CSV delimiter')\nparser.add_argument('--xlabel', default=None, type=str)\nparser.add_argument('--ylabel', default='# cases', type=str)\nparser.add_argument('--bins', default=20, type=int,\n help='Number of bins')\nparser.add_argument('--log', action='store_true',\n help='Logarithmic scale')\nparser.add_argument('--yticks', default=[30, 50, 70, 100, 600], type=int, nargs='+',\n help='Ticks on the y axis for logarithmic scale')\nparser.add_argument('--xticks', default=list(), type=float, nargs='+',\n help='Ticks on the x axis')\nparser.add_argument('--original', default='lat', type=str,\n help='Identifier of original value')\nparser.add_argument('--improved', default='lat_sync', type=str,\n help='Identifier of improved value')\nparser.add_argument('--paths', type=str, nargs='+', required=True,\n help='Paths to be compared')\nparser.add_argument('--pathnames', type=str, nargs='+', required=True,\n help='Path names')\nparser.add_argument('--priority_cols', type=str, nargs='+', required=True)\nparser.add_argument('--relative', action='store_true',\n help='Use relative improvement')\nparser.add_argument('--fontsize', default=20, type=int)\n\nargs = parser.parse_args()\n\ndef parse_results(filename):\n results = dict()\n with open(filename, 'r') as csvfile:\n reader = csv.DictReader(csvfile, delimiter=args.delimiter)\n for row in reader:\n eid = \"\"\n for col in args.priority_cols:\n eid += row[col]\n\n if eid not in results:\n results[eid] = dict()\n\n orig = int(row[args.original])\n impr = int(row[args.improved])\n\n if 'Path' in row:\n name = row['Path']\n else:\n name = row['Chain']\n \n if name not in args.paths:\n continue\n\n if orig == 0 or impr == 0:\n results[eid][name] = 0\n else:\n if args.relative:\n results[eid][name] = float(impr)/float(orig)\n else:\n results[eid][name] = orig-impr\n\n return results\n\ndef prepare_results(results):\n points = list()\n for p in args.paths:\n points.append(list())\n\n for e in results.keys():\n for i in range(len(args.paths)):\n p = args.paths[i]\n points[i].append(results[e][p])\n\n return points\n\nresults = parse_results(args.file)\nx = prepare_results(results)\n\nfig, ax = pyplot.subplots(nrows=1, ncols=1)\n\nassert(len(args.pathnames) == len(args.paths))\n\nax.hist(x, args.bins, align='right', log=args.log, histtype='bar', label=args.pathnames)\nif args.log:\n ax.set_yticks(args.yticks)\n ax.get_yaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())\n\nif len(args.xticks) > 0:\n ax.set_xticks(args.xticks)\n\nif args.xlabel is not None:\n pyplot.xlabel(args.xlabel, fontsize=args.fontsize)\n\nif args.ylabel is not None:\n pyplot.ylabel(args.ylabel, fontsize=args.fontsize)\n\nax.legend(prop={'size': args.fontsize})\nax.xaxis.set_tick_params(labelsize=args.fontsize)\nax.yaxis.set_tick_params(labelsize=args.fontsize)\nmatplotlib.rcParams.update({'font.size': args.fontsize})\n\nif args.output is not None:\n pyplot.tight_layout(pad=0.5)\n pyplot.savefig(args.output)\nelse:\n pyplot.show()\n\n# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4\n"
] |
[
[
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"matplotlib.ticker.ScalarFormatter",
"matplotlib.rcParams.update",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] |
lintondf/MorrisonPolynomialFiltering
|
[
"f5713f9ed9a24c1382875d8ebdec00100f39e3a5"
] |
[
"Python/src/polynomialfiltering/PythonUtilities.py"
] |
[
"''' PolynomialFiltering.PythonUtilities\n (C) Copyright 2019 - Blue Lightning Development, LLC.\n D. F. Linton. [email protected]\n\n SPDX-License-Identifier: MIT\n See separate LICENSE file for full text\n'''\n\n\"\"\"***************** DO NOT TRANSPILE THIS MODULE *************************\"\"\"\nfrom numpy import exp, log, min, exp, array\nfrom scipy.stats import chi2, f;\nfrom typing import List;\n\n'''********************************************************************\nLcdPython Decorators\n'''\ndef virtual(funcobj):\n '''\n @virtual marks a function as virtual.\n \n Some in target languages only explicitly marked function can be overloaded\n '''\n return funcobj;\n\ndef constructor(funcobj):\n '''\n @constructor marks a function as a class constructor\n '''\n return funcobj;\n\ndef forcestatic(funcobj):\n '''\n @forcestatic marks a function as a static method; used when staticmethod doesn't work\n '''\n return funcobj;\n\ndef ignore(funcobj):\n '''\n Function marked @ignore are not transpiled\n '''\n return funcobj;\n\ndef inline(funcobj):\n '''\n @inline pass an inline hint to the target language where applicable\n '''\n return funcobj;\n\ndef testcase(funcobj):\n '''\n Function marked @testcase are transpiled into the test template when in test generation mode\n '''\n return funcobj;\n\ndef testmethod(funcobj):\n '''\n Function marked @testmethod are transpiled into the test template when in test generation mode\n '''\n return funcobj;\n\ndef testclass(funcobj):\n '''\n Classes marked @testclass are transpiled into the test template when in test generation mode\n '''\n return funcobj;\n\ndef testclassmethod(funcobj):\n '''\n Classes methods marked @testclassmethod are transpiled into the test template when in test generation mode\n '''\n return funcobj;\n\n\ndef assert_not_empty(list : List[str ]) -> None:\n assert( len(list) > 0);\n\n\n'''********************************************************************\nUtility functions for transpiled code; \n'''\n \ndef List() -> List:\n return []\n\n\ndef copy( A : array ) -> array:\n return A.copy()\n\ndef chi2Cdf(x : float, df : int) -> float:\n return chi2.cdf(1e-9 + x, df)\n\ndef chi2Ppf(p : float, df : int) -> float:\n return 1e-9 + chi2.ppf(p, df );\n\ndef fdistCdf(x : float, df1 : int, df2 : int) -> float:\n return f.cdf(1e-9 + x, df1, df2)\n\ndef fdistPpf(p : float, df1 : int, df2 : int) -> float:\n return 1e-9 + f.ppf(p, df1, df2)\n\n\nif __name__ == '__main__':\n pass\n# for order in range(0,5+1) :\n# for tau in [0.001, 0.01, 0.1, 1, 10, 100, 1000] :\n# print(order, tau, nUnitLastVRF(order, tau))\n# print(chi2Cdf(6.64, 1), chi2Cdf(2*4.61, 2), chi2Cdf(3*3.78, 3), chi2Cdf(4*3.32, 4), chi2Cdf(10*2.32, 10))\n# for i in range(1,21) :\n# print(i, chi2Ppf(0.99, i)/i)\n# print(chi2Cdf(3, 2), chi2Ppf(0.95, 2))\n# for m in range(0,5+1):\n# c = chi2Ppf(0.99, m+1);\n# print(m, c, exp(chi2Cdf(c,m+1)))"
] |
[
[
"scipy.stats.chi2.ppf",
"scipy.stats.chi2.cdf",
"scipy.stats.f.ppf",
"scipy.stats.f.cdf"
]
] |
jennyfothergill/msibi
|
[
"0e309eff836dc13016d87889fe8d8f6960a13599"
] |
[
"msibi/tests/test_pair.py"
] |
[
"import os\n\nimport numpy as np\nimport pytest\n\nfrom msibi import MSIBI, State, Pair, mie\n\nfrom .base_test import BaseTest\n\ndr = 0.1 / 6.0\nr = np.arange(0, 2.5 + dr, dr)\nr_range = np.asarray([0.0, 2.5 + dr])\nn_bins = 151\nk_B = 1.9872041e-3 # kcal/mol-K\nT = 298.0 # K\n\n\nclass TestPair(BaseTest):\n def test_pair_name(self, pair):\n assert pair.name == \"0-1\"\n\n def test_save_table_potential(self, tmp_path):\n pair = Pair(\"A\", \"B\", potential=mie(r, 1.0, 1.0))\n pair.potential_file = os.path.join(tmp_path, \"pot.txt\")\n pair.save_table_potential(r, dr)\n assert os.path.isfile(pair.potential_file)\n\n def test_add_state(self, pair, state0, rdf0, tmp_path):\n opt = MSIBI(2.5, n_bins, smooth_rdfs=True, rdf_exclude_bonded=True)\n opt.add_state(state0)\n opt.add_pair(pair)\n opt.optimize(\n n_iterations=0,\n _dir=tmp_path,\n integrator=\"hoomd.md.integrate.nvt\",\n integrator_kwargs={\"tau\": 0.1},\n dt=0.001,\n gsd_period=1000\n )\n assert isinstance(pair._states, dict)\n assert np.array_equal(pair._states[state0][\"target_rdf\"], rdf0)\n assert pair._states[state0][\"current_rdf\"] is None\n assert pair._states[state0][\"alpha\"] == 0.5\n assert pair._states[state0][\"pair_indices\"] is None\n assert len(pair._states[state0][\"f_fit\"]) == 0\n\n def test_current_rdf_no_smooth(self, state0, pair, tmp_path):\n opt = MSIBI(2.5, n_bins, smooth_rdfs=False)\n opt.add_state(state0)\n opt.add_pair(pair)\n opt.optimize(\n n_iterations=0,\n _dir=tmp_path,\n integrator=\"hoomd.md.integrate.nvt\",\n integrator_kwargs={\"tau\": 0.1},\n dt=0.001,\n gsd_period=1000\n )\n pair.compute_current_rdf(state0, opt.smooth_rdfs, query=False)\n assert pair._states[state0][\"current_rdf\"] is not None\n assert len(pair._states[state0][\"f_fit\"]) > 0\n\n def test_current_rdf_smooth(self, state0, pair, tmp_path):\n opt = MSIBI(2.5, n_bins, smooth_rdfs=True)\n opt.add_state(state0)\n opt.add_pair(pair)\n opt.optimize(\n n_iterations=0,\n _dir=tmp_path,\n integrator=\"hoomd.md.integrate.nvt\",\n integrator_kwargs={\"tau\": 0.1},\n dt=0.001,\n gsd_period=1000\n )\n pair.compute_current_rdf(state0, opt.smooth_rdfs, query=False)\n assert pair._states[state0][\"current_rdf\"] is not None\n assert len(pair._states[state0][\"f_fit\"]) > 0\n\n def test_save_current_rdf(self, state0, pair, tmp_path):\n opt = MSIBI(2.5, n_bins, smooth_rdfs=True)\n opt.add_state(state0)\n opt.add_pair(pair)\n opt.optimize(\n n_iterations=0,\n _dir=tmp_path,\n integrator=\"hoomd.md.integrate.nvt\",\n integrator_kwargs={\"tau\": 0.1},\n dt=0.001,\n gsd_period=1000\n )\n pair.compute_current_rdf(state0, opt.smooth_rdfs, query=False)\n pair.save_current_rdf(state0, 0, opt.dr)\n assert os.path.isfile(\n os.path.join(\n state0.dir, f\"pair_{pair.name}-state_{state0.name}-step0.txt\"\n )\n )\n\n def test_update_potential(self, state0, pair, tmp_path):\n \"\"\"Make sure the potential changes after calculating RDF\"\"\"\n opt = MSIBI(2.5, n_bins)\n opt.add_state(state0)\n opt.add_pair(pair)\n opt.optimize(\n n_iterations=0,\n _dir=tmp_path,\n integrator=\"hoomd.md.integrate.nvt\",\n integrator_kwargs={\"tau\": 0.1},\n dt=0.001,\n gsd_period=1000\n )\n pair.compute_current_rdf(state0, opt.smooth_rdfs, query=False)\n pair.update_potential(np.arange(0, 2.5 + dr, dr), r_switch=1.8)\n assert not np.array_equal(pair.potential, pair.previous_potential)\n"
] |
[
[
"numpy.asarray",
"numpy.arange",
"numpy.array_equal"
]
] |
v-cherian/GANComposer
|
[
"b02dd005467f8d366aeb4ddde6276de644fce6c6"
] |
[
"train.py"
] |
[
"# Copyright 2019 Christopher John Bayron\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n#\r\n# This file has been created by Christopher John Bayron based on \"rnn_gan.py\"\r\n# by Olof Mogren. The referenced code is available in:\r\n#\r\n# https://github.com/olofmogren/c-rnn-gan\r\n\r\nimport os\r\nfrom argparse import ArgumentParser\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch import optim\r\n\r\nfrom c_rnn_gan import Generator, Discriminator\r\nimport music_data_utils\r\nimport time\r\nimport matplotlib.pyplot as plt\r\n\r\nCKPT_DIR = 'models'\r\nCOMPOSER = 'sonata-ish'\r\n\r\nG_FN = 'c_rnn_gan_g.pth'\r\nD_FN = 'c_rnn_gan_d.pth'\r\n\r\nG_LRN_RATE = 0.001\r\nD_LRN_RATE = 0.001\r\nMAX_GRAD_NORM = 5.0\r\n# following values are modified at runtime\r\nMAX_SEQ_LEN = 200\r\nBATCH_SIZE = 32\r\n\r\n# value to use to approximate zero (to prevent undefined results)\r\nEPSILON = 1e-40\r\n\r\nSTART_TIME = time.strftime(\"%m%d%Y_%H%M%S\")\r\n\r\n\r\nclass GLoss(nn.Module):\r\n ''' C-RNN-GAN generator loss\r\n '''\r\n\r\n def __init__(self):\r\n super(GLoss, self).__init__()\r\n\r\n def forward(self, logits_gen):\r\n logits_gen = torch.clamp(logits_gen, EPSILON, 1.0)\r\n batch_loss = -torch.log(logits_gen)\r\n\r\n return torch.mean(batch_loss)\r\n\r\n\r\nclass DLoss(nn.Module):\r\n ''' C-RNN-GAN discriminator loss\r\n '''\r\n\r\n def __init__(self, label_smoothing=False):\r\n super(DLoss, self).__init__()\r\n self.label_smoothing = label_smoothing\r\n\r\n def forward(self, logits_real, logits_gen):\r\n ''' Discriminator loss\r\n\r\n logits_real: logits from D, when input is real\r\n logits_gen: logits from D, when input is from Generator\r\n\r\n loss = -(ylog(p) + (1-y)log(1-p))\r\n\r\n '''\r\n logits_real = torch.clamp(logits_real, EPSILON, 1.0)\r\n d_loss_real = -torch.log(logits_real)\r\n\r\n if self.label_smoothing:\r\n p_fake = torch.clamp((1 - logits_real), EPSILON, 1.0)\r\n d_loss_fake = -torch.log(p_fake)\r\n d_loss_real = 0.9*d_loss_real + 0.1*d_loss_fake\r\n\r\n logits_gen = torch.clamp((1 - logits_gen), EPSILON, 1.0)\r\n d_loss_gen = -torch.log(logits_gen)\r\n\r\n batch_loss = d_loss_real + d_loss_gen\r\n return torch.mean(batch_loss)\r\n\r\n\r\ndef run_training(model, optimizer, criterion, dataloader, freeze_g=False, freeze_d=False, n_critic=1):\r\n ''' Run single training epoch\r\n '''\r\n critic = 0\r\n num_feats = dataloader.get_num_song_features()\r\n dataloader.rewind(part='train')\r\n batch_meta, batch_song = dataloader.get_batch(BATCH_SIZE, MAX_SEQ_LEN, part='train')\r\n\r\n model['g'].train()\r\n model['d'].train()\r\n\r\n loss = {}\r\n g_loss_total = 0.0\r\n d_loss_total = 0.0\r\n num_corrects = 0\r\n num_sample = 0\r\n\r\n while batch_meta is not None and batch_song is not None:\r\n\r\n real_batch_sz = batch_song.shape[0]\r\n\r\n # get initial states\r\n # each batch is independent i.e. not a continuation of previous batch\r\n # so we reset states for each batch\r\n # POSSIBLE IMPROVEMENT: next batch is continuation of previous batch\r\n g_states = model['g'].init_hidden(real_batch_sz)\r\n d_state = model['d'].init_hidden(real_batch_sz)\r\n\r\n #### GENERATOR ####\r\n if critic % n_critic == 0:\r\n if not freeze_g:\r\n optimizer['g'].zero_grad()\r\n # prepare inputs\r\n z = torch.empty([real_batch_sz, MAX_SEQ_LEN, num_feats]).uniform_() # random vector\r\n batch_song = torch.Tensor(batch_song)\r\n\r\n # feed inputs to generator\r\n g_feats, _ = model['g'](z, g_states)\r\n\r\n # calculate loss, backprop, and update weights of G\r\n if isinstance(criterion['g'], GLoss):\r\n d_logits_gen, _, _ = model['d'](g_feats, d_state)\r\n loss['g'] = criterion['g'](d_logits_gen)\r\n else: # feature matching\r\n # feed real and generated input to discriminator\r\n _, d_feats_real, _ = model['d'](batch_song, d_state)\r\n _, d_feats_gen, _ = model['d'](g_feats, d_state)\r\n loss['g'] = criterion['g'](d_feats_real, d_feats_gen)\r\n\r\n if not freeze_g:\r\n loss['g'].backward()\r\n nn.utils.clip_grad_norm_(model['g'].parameters(), max_norm=MAX_GRAD_NORM)\r\n optimizer['g'].step()\r\n\r\n #### DISCRIMINATOR ####\r\n if not freeze_d:\r\n optimizer['d'].zero_grad()\r\n # feed real and generated input to discriminator\r\n d_logits_real, _, _ = model['d'](batch_song, d_state)\r\n # need to detach from operation history to prevent backpropagating to generator\r\n d_logits_gen, _, _ = model['d'](g_feats.detach(), d_state)\r\n # calculate loss, backprop, and update weights of D\r\n loss['d'] = criterion['d'](d_logits_real, d_logits_gen)\r\n if not freeze_d:\r\n loss['d'].backward()\r\n nn.utils.clip_grad_norm_(model['d'].parameters(), max_norm=MAX_GRAD_NORM)\r\n optimizer['d'].step()\r\n\r\n g_loss_total += loss['g'].item()\r\n d_loss_total += loss['d'].item()\r\n num_corrects += (d_logits_real > 0.5).sum().item() + (d_logits_gen < 0.5).sum().item()\r\n num_sample += real_batch_sz\r\n\r\n # fetch next batch\r\n batch_meta, batch_song = dataloader.get_batch(\r\n BATCH_SIZE, MAX_SEQ_LEN, part='train')\r\n \r\n critic += 1\r\n\r\n g_loss_avg, d_loss_avg = 0.0, 0.0\r\n d_acc = 0.0\r\n if num_sample > 0:\r\n g_loss_avg = g_loss_total / num_sample\r\n d_loss_avg = d_loss_total / num_sample\r\n # 2 because (real + generated)\r\n d_acc = 100 * num_corrects / (2 * num_sample)\r\n\r\n return model, g_loss_avg, d_loss_avg, d_acc\r\n\r\n\r\ndef run_validation(model, criterion, dataloader):\r\n ''' Run single validation epoch\r\n '''\r\n num_feats = dataloader.get_num_song_features()\r\n dataloader.rewind(part='validation')\r\n batch_meta, batch_song = dataloader.get_batch(\r\n BATCH_SIZE, MAX_SEQ_LEN, part='validation')\r\n\r\n model['g'].eval()\r\n model['d'].eval()\r\n\r\n g_loss_total = 0.0\r\n d_loss_total = 0.0\r\n num_corrects = 0\r\n num_sample = 0\r\n\r\n while batch_meta is not None and batch_song is not None:\r\n\r\n real_batch_sz = batch_song.shape[0]\r\n\r\n # initial states\r\n g_states = model['g'].init_hidden(real_batch_sz)\r\n d_state = model['d'].init_hidden(real_batch_sz)\r\n\r\n #### GENERATOR ####\r\n # prepare inputs\r\n z = torch.empty([real_batch_sz, MAX_SEQ_LEN, num_feats]).uniform_() # random vector\r\n batch_song = torch.Tensor(batch_song)\r\n\r\n # feed inputs to generator\r\n g_feats, _ = model['g'](z, g_states)\r\n # feed real and generated input to discriminator\r\n d_logits_real, d_feats_real, _ = model['d'](batch_song, d_state)\r\n d_logits_gen, d_feats_gen, _ = model['d'](g_feats, d_state)\r\n # calculate loss\r\n if isinstance(criterion['g'], GLoss):\r\n g_loss = criterion['g'](d_logits_gen)\r\n else: # feature matching\r\n g_loss = criterion['g'](d_feats_real, d_feats_gen)\r\n\r\n d_loss = criterion['d'](d_logits_real, d_logits_gen)\r\n\r\n g_loss_total += g_loss.item()\r\n d_loss_total += d_loss.item()\r\n num_corrects += (d_logits_real > 0.5).sum().item() + (d_logits_gen < 0.5).sum().item()\r\n num_sample += real_batch_sz\r\n\r\n # fetch next batch\r\n batch_meta, batch_song = dataloader.get_batch(BATCH_SIZE, MAX_SEQ_LEN, part='validation')\r\n\r\n g_loss_avg, d_loss_avg = 0.0, 0.0\r\n d_acc = 0.0\r\n if num_sample > 0:\r\n g_loss_avg = g_loss_total / num_sample\r\n d_loss_avg = d_loss_total / num_sample\r\n # 2 because (real + generated)\r\n d_acc = 100 * num_corrects / (2 * num_sample)\r\n\r\n return g_loss_avg, d_loss_avg, d_acc\r\n\r\n\r\ndef run_epoch(model, optimizer, criterion, dataloader, ep, num_ep,\r\n freeze_g=False, freeze_d=False, pretraining=False, n_critic=1):\r\n ''' Run a single epoch\r\n '''\r\n model, trn_g_loss, trn_d_loss, trn_acc = run_training(model, optimizer, criterion, dataloader, freeze_g=freeze_g, freeze_d=freeze_d, n_critic=n_critic)\r\n\r\n val_g_loss, val_d_loss, val_acc = run_validation(model, criterion, dataloader)\r\n\r\n if pretraining:\r\n print(\"Pretraining Epoch %d/%d \" % (ep+1, num_ep),\r\n \"[Freeze G: \", freeze_g, \", Freeze D: \", freeze_d, \"]\")\r\n else:\r\n print(\"Epoch %d/%d \" % (ep+1, num_ep),\r\n \"[Freeze G: \", freeze_g, \", Freeze D: \", freeze_d, \"]\")\r\n\r\n print(\"\\t[Training] G_loss: %0.8f, D_loss: %0.8f, D_acc: %0.2f\\n\"\r\n \"\\t[Validation] G_loss: %0.8f, D_loss: %0.8f, D_acc: %0.2f\" %\r\n (trn_g_loss, trn_d_loss, trn_acc,\r\n val_g_loss, val_d_loss, val_acc))\r\n\r\n # -- DEBUG --\r\n # This is for monitoring the current output from generator\r\n # generate from model then save to MIDI file\r\n g_states = model['g'].init_hidden(1)\r\n num_feats = dataloader.get_num_song_features()\r\n z = torch.empty([1, MAX_SEQ_LEN, num_feats]).uniform_() # random vector\r\n if torch.cuda.is_available():\r\n z = z.cuda()\r\n model['g'].cuda()\r\n\r\n model['g'].eval()\r\n g_feats, _ = model['g'](z, g_states)\r\n song_data = g_feats.squeeze().cpu()\r\n song_data = song_data.detach().numpy()\r\n\r\n if ep % (num_ep/5) == 0 or (ep+1) == num_ep:\r\n midi_data = dataloader.save_data('sample' + '_'+time.strftime(\"%m%d%Y_%H%M%S\")+'.mid', song_data)\r\n else:\r\n midi_data = dataloader.save_data(None, song_data)\r\n # print(midi_data[0][:16])\r\n # -- DEBUG --\r\n\r\n return model, trn_acc, trn_g_loss, trn_d_loss, val_g_loss, val_d_loss\r\n\r\n\r\ndef main(args):\r\n ''' Training sequence\r\n '''\r\n dataloader = music_data_utils.MusicDataLoader(args.data_dir, composers=args.composers, redo_split=args.redo_split)\r\n num_feats = dataloader.get_num_song_features()\r\n\r\n # First checking if GPU is available\r\n train_on_gpu = torch.cuda.is_available()\r\n if train_on_gpu:\r\n print('Training on GPU.')\r\n else:\r\n print('No GPU available, training on CPU.')\r\n\r\n model = {\r\n 'g': Generator(num_feats, use_cuda=train_on_gpu),\r\n 'd': Discriminator(num_feats, use_cuda=train_on_gpu)\r\n }\r\n\r\n if args.use_sgd:\r\n optimizer = {\r\n 'g': optim.SGD(model['g'].parameters(), lr=args.g_lrn_rate, momentum=0.9),\r\n 'd': optim.SGD(model['d'].parameters(), lr=args.d_lrn_rate, momentum=0.9)\r\n }\r\n else:\r\n optimizer = {\r\n 'g': optim.Adam(model['g'].parameters(), args.g_lrn_rate),\r\n 'd': optim.Adam(model['d'].parameters(), args.d_lrn_rate)\r\n }\r\n\r\n criterion = {\r\n 'g': nn.MSELoss(reduction='sum') if args.feature_matching else GLoss(),\r\n 'd': DLoss(args.label_smoothing)\r\n }\r\n\r\n if args.load_g:\r\n ckpt = torch.load(os.path.join(CKPT_DIR, G_FN))\r\n model['g'].load_state_dict(ckpt)\r\n print(\"Continue training of %s\" % os.path.join(CKPT_DIR, G_FN))\r\n\r\n if args.load_d:\r\n ckpt = torch.load(os.path.join(CKPT_DIR, D_FN))\r\n model['d'].load_state_dict(ckpt)\r\n print(\"Continue training of %s\" % os.path.join(CKPT_DIR, D_FN))\r\n\r\n if train_on_gpu:\r\n model['g'].cuda()\r\n model['d'].cuda()\r\n\r\n if not args.no_pretraining:\r\n for ep in range(args.d_pretraining_epochs):\r\n model, _, _, _, _, _ = run_epoch(model, optimizer, criterion, dataloader,\r\n ep, args.d_pretraining_epochs, freeze_g=True, pretraining=True)\r\n\r\n for ep in range(args.g_pretraining_epochs):\r\n model, _, _, _, _, _ = run_epoch(model, optimizer, criterion, dataloader,\r\n ep, args.g_pretraining_epochs, freeze_d=True, pretraining=True)\r\n\r\n freeze_d = False\r\n losses = []\r\n\r\n for ep in range(args.num_epochs):\r\n model, trn_acc, trn_g_loss, trn_d_loss, val_g_loss, val_d_loss = run_epoch(\r\n model, optimizer, criterion, dataloader, ep, args.num_epochs, freeze_d=freeze_d)\r\n\r\n losses.append([trn_g_loss, trn_d_loss, val_g_loss,\r\n val_d_loss]) # store losses\r\n\r\n if args.conditional_freezing:\r\n # conditional freezing\r\n freeze_d = False\r\n if trn_acc >= 95.0:\r\n freeze_d = True\r\n\r\n if not args.no_save_g:\r\n torch.save(model['g'].state_dict(), os.path.join(CKPT_DIR, G_FN))\r\n print(\"Saved generator: %s\" % os.path.join(CKPT_DIR, G_FN))\r\n\r\n if not args.no_save_d:\r\n torch.save(model['d'].state_dict(), os.path.join(CKPT_DIR, D_FN))\r\n print(\"Saved discriminator: %s\" % os.path.join(CKPT_DIR, D_FN))\r\n\r\n if args.plot_loss:\r\n _, ax = plt.subplots()\r\n ax.plot([loss[0] for loss in losses], label='G Training Loss')\r\n ax.plot([loss[1] for loss in losses], label='D Training Loss')\r\n ax.plot([loss[2] for loss in losses], label='G Validation Loss')\r\n ax.plot([loss[3] for loss in losses], label='D Validation Loss')\r\n plt.legend()\r\n # plt.show()\r\n plt.savefig('loss_' + str(args.num_epochs) + '_' + time.strftime(\"%m%d%Y_%H%M%S\") + '.png')\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n parser = ArgumentParser()\r\n parser.add_argument('--load_g', action='store_true', help=\"load G model parameters\")\r\n parser.add_argument('--load_d', action='store_true', help=\"load D model parameters\")\r\n parser.add_argument('--no_save_g', action='store_true', help=\"do not save G parameters\")\r\n parser.add_argument('--no_save_d', action='store_true', help=\"do not save D parameters\")\r\n\r\n parser.add_argument('--data_dir', default='data/maestro-v2.0.0', help=\"data directory path\")\r\n parser.add_argument('--composers', nargs='+', default=None, help=\"composers to train on\")\r\n # add in flag to use saved variable\r\n parser.add_argument('--redo_split', action='store_true', help=\"use saved variables\")\r\n parser.add_argument('--num_epochs', default=300, type=int, help=\"number of training epochs\")\r\n parser.add_argument('--seq_len', default=256, type=int, help=\"midi input sequence length\")\r\n parser.add_argument('--batch_size', default=16, type=int, help=\"batch size\")\r\n parser.add_argument('--g_lrn_rate', default=0.001, type=float, help=\"G learning rate\")\r\n parser.add_argument('--d_lrn_rate', default=0.001, type=float, help=\"D learning rate\")\r\n\r\n parser.add_argument('--no_pretraining', action='store_true', help=\"do not pretrain G or D\")\r\n parser.add_argument('--g_pretraining_epochs', default=5, type=int, help=\"# of G pretraining epochs\")\r\n parser.add_argument('--d_pretraining_epochs', default=5, type=int, help=\"# of D pretraining epochs\")\r\n # parser.add_argument('--freeze_d_every', default=5, type=int)\r\n parser.add_argument('--use_sgd', action='store_true', help=\"use stochastic gradient descent\")\r\n parser.add_argument('--conditional_freezing', action='store_true', help=\"freeze D when predictions are too good\")\r\n parser.add_argument('--label_smoothing', action='store_true', help=\"????????????????\") # TODO\r\n parser.add_argument('--feature_matching', action='store_true', help=\"????????????????\") # TODO\r\n parser.add_argument('--plot_loss', action='store_true', help=\"plot GAN training and validation losses\")\r\n parser.add_argument('--n_critic', default=1, type=int, help=\"how often to update G with D\")\r\n\r\n args = parser.parse_args()\r\n MAX_SEQ_LEN = args.seq_len\r\n BATCH_SIZE = args.batch_size\r\n\r\n main(args)\r\n"
] |
[
[
"torch.mean",
"matplotlib.pyplot.legend",
"torch.empty",
"torch.Tensor",
"matplotlib.pyplot.subplots",
"torch.log",
"torch.cuda.is_available",
"torch.clamp",
"torch.nn.MSELoss"
]
] |
danielgarm/FreeCodeCamp-Machine-Learning
|
[
"c0f7e808dba5a87cada8af1f5623283ac77c96a3"
] |
[
"6 - Natural Language Processing with RNNs/theatre_play_generation.py"
] |
[
"from keras.preprocessing import sequence\nimport keras\nimport tensorflow as tf\nimport os\nimport numpy as np\n\n# If we want to load a dataset of the Shakespeare script:\npath_to_file = tf.keras.utils.get_file('shakespeare.txt', 'https://storage.googleapis.com/download.tensorflow.org/data/shakespeare.txt')\n\n# If we want to load our own dataset:\nfrom google.colab import files\npath_to_file = list(files.upload().keys())[0]\n\n\n# Reading the contents...\n# Read, then decode for py2 compat.\ntext = open(path_to_file, 'rb').read().decode(encoding='utf-8')\n# length of text is the number of characters in it\nprint ('Length of text: {} characters'.format(len(text)))\n\n# Take a look at the first 250 characters in text\nprint(text[:250])\n\n\n# Encoding...\nvocab = sorted(set(text))\n# Creating a mapping from unique characters to indices\nchar2idx = {u:i for i, u in enumerate(vocab)}\nidx2char = np.array(vocab)\n\ndef text_to_int(text):\n\treturn np.array([char2idx[c] for c in text])\n\ntext_as_int = text_to_int(text)\n\n# lets look at how part of our text is encoded\nprint(\"Text:\", text[:13])\nprint(\"Encoded:\", text_to_int(text[:13]))\n\n# Function that can convert our numeric values to text\ndef int_to_text(ints):\n\ttry:\n\t\tints = ints.numpy()\n\texcept:\n\t\tpass\n\treturn ''.join(idx2char[ints])\n\nprint(int_to_text(text_as_int[:13]))\n\n\n# Creating Training Examples...\nseq_length = 100 # length of sequence for a training example\nexamples_per_epoch = len(text)//(seq_length+1)\n\n# Create training examples / targets\nchar_dataset = tf.data.Dataset.from_tensor_slices(text_as_int)\n\nsequences = char_dataset.batch(seq_length+1, drop_remainder=True)\n\ndef split_input_target(chunk): # for the example: hello\n\tinput_text = chunk[:-1] # hell\n\ttarget_text = chunk[1:] # ello\n\treturn input_text, target_text # hell, ello\n\ndataset = sequences.map(split_input_target) # we use map to apply the above function to every entry\n\nfor x, y in dataset.take(2):\n\tprint(\"\\n\\nEXAMPLE\\n\")\n\tprint(\"INPUT\")\n\tprint(int_to_text(x))\n\tprint(\"\\nOUTPUT\")\n\tprint(int_to_text(y))\n\n# Training batches...\nBATCH_SIZE = 64\nVOCAB_SIZE = len(vocab) # vocab is number of unique characters\nEMBEDDING_DIM = 256\nRNN_UNITS = 1024\n\n# Buffer size to shuffle the dataset\n# (TF data is designed to work with possibly infinite sequences,\n# so it doesn't attempt to shuffle the entire sequence in memory. Instead,\n# it maintains a buffer in which it shuffles elements).\nBUFFER_SIZE = 10000\n\n\n# Building the model...\ndef build_model(vocab_size, embedding_dim, rnn_units, batch_size):\n model = tf.keras.Sequential([\n tf.keras.layers.Embedding(vocab_size, embedding_dim,\n batch_input_shape=[batch_size, None]),\n tf.keras.layers.LSTM(rnn_units,\n return_sequences=True, # Return sequences needs to be true so that we see the output on each step (letter by letter)\n stateful=True,\n recurrent_initializer='glorot_uniform'),\n tf.keras.layers.Dense(vocab_size)\n ])\n return model\n\nmodel = build_model(VOCAB_SIZE,EMBEDDING_DIM, RNN_UNITS, BATCH_SIZE)\nmodel.summary()\n\n# Creating a loss function\nfor input_example_batch, target_example_batch in data.take(1):\n\texample_batch_predictions = model(input_example_batch) # ask our model for a prediction on our first batch of training data (64 entries)\n\tprint(example_batch_predictions.shape, \"# (batch_size, sequence_length, vocab_size)\") # print out the output shape\n\ndata = dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)\n\n# lets examine one prediction\npred = example_batch_predictions[0]\nprint(len(pred))\nprint(pred)\n# notice this is a 2d array of length 100, where each interior array is the prediction for the next character at each time step\n\n# and finally well look at a prediction at the first timestep\ntime_pred = pred[0]\nprint(len(time_pred))\nprint(time_pred)\n# and of course its 65 values representing the probabillity of each character occuring next\n\n# If we want to determine the predicted character we need to sample the output distribution (pick a value based on probability)\n# Sampling is not the same as simply picking up the character with the highest probability! Doing this could cause some unexpected behaviour\nsampled_indices = tf.random.categorical(pred, num_samples=1)\n\n# now we can reshape that array and convert all the integers to numbers to see the actual characters\nsampled_indices = np.reshape(sampled_indices, (1, -1))[0]\npredicted_chars = int_to_text(sampled_indices)\n\npredicted_chars # and this is what the model predicted for training sequence 1 (before training it on real data, just random weights and biases)\n\ndef loss(labels, logits):\n\treturn tf.keras.losses.sparse_categorical_crossentropy(labels, logits, from_logits=True)\n\n\n# Compiling the model...\nmodel.compile(optimizer='adam', loss=loss)\n\n\n# Creating checkpoints...\n# This will allow us to load our model from a checkpoint and continue training it.\n# Directory where the checkpoints will be saved\ncheckpoint_dir = './training_checkpoints'\n# Name of the checkpoint files\ncheckpoint_prefix = os.path.join(checkpoint_dir, \"ckpt_{epoch}\")\n\ncheckpoint_callback=tf.keras.callbacks.ModelCheckpoint(\n filepath=checkpoint_prefix,\n save_weights_only=True)\n\n\n# Training the model, recommended to use GPU\nhistory = model.fit(data, epochs=50, callbacks=[checkpoint_callback])\n\n# Loading the model\nmodel = build_model(VOCAB_SIZE, EMBEDDING_DIM, RNN_UNITS, batch_size=1)\n\n# Find latest checkpoint\nmodel.load_weights(tf.train.latest_checkpoint(checkpoint_dir))\nmodel.build(tf.TensorShape([1, None]))\n\n# Loading a checkpoint (if there are any)\ncheckpoint_num = 10\nmodel.load_weights(tf.train.load_checkpoint(\"./training_checkpoints/ckpt_\" + str(checkpoint_num)))\nmodel.build(tf.TensorShape([1, None]))\n\n\n# Function to use for text prediction:\ndef generate_text(model, start_string):\n # Evaluation step (generating text using the learned model)\n\n # Number of characters to generate\n num_generate = 800\n\n # Converting our start string to numbers (vectorizing)\n input_eval = [char2idx[s] for s in start_string]\n input_eval = tf.expand_dims(input_eval, 0)\n\n # Empty string to store our results\n text_generated = []\n\n # Low temperatures results in more predictable text.\n # Higher temperatures results in more surprising text.\n # Experiment to find the best setting.\n temperature = 1.0\n\n # Here batch size == 1\n model.reset_states()\n for i in range(num_generate):\n predictions = model(input_eval)\n # remove the batch dimension\n \n predictions = tf.squeeze(predictions, 0)\n\n # using a categorical distribution to predict the character returned by the model\n predictions = predictions / temperature\n predicted_id = tf.random.categorical(predictions, num_samples=1)[-1,0].numpy()\n\n # We pass the predicted character as the next input to the model\n # along with the previous hidden state\n input_eval = tf.expand_dims([predicted_id], 0)\n\n text_generated.append(idx2char[predicted_id])\n\n return (start_string + ''.join(text_generated))\n\n\n# Trying out our model by asking the user for input as a starting string and printing the output!\ninp = input(\"Type a starting string: \")\nprint(generate_text(model, inp))"
] |
[
[
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.TensorShape",
"tensorflow.train.latest_checkpoint",
"tensorflow.keras.layers.Embedding",
"tensorflow.random.categorical",
"numpy.reshape",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.keras.losses.sparse_categorical_crossentropy",
"tensorflow.keras.layers.Dense",
"tensorflow.expand_dims",
"tensorflow.squeeze",
"tensorflow.keras.layers.LSTM",
"tensorflow.keras.utils.get_file",
"numpy.array"
]
] |
navierula/Research-Fall-2017
|
[
"beec5f116d5487e6a4e0d48ec57ad80aaf2ec26f"
] |
[
"minMaxCalc/start_again.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 23 23:06:12 2017\n\n@author: navrajnarula\n\"\"\"\n\n#c = [0.5, 3, 6, 40, 90, 130.8, 129, 111, 8, 9, 0.01, 9, 40, 90, 130.1, 112, 108, 90, 77, 68, 0.9, 8, 40, 90, 92, 130.4]\n#c= [0, 10, 11, 48, 50.5, 0.48, 17, 18, 23, 29, 33, 34.67, 50.1, 0.09, 7, 41, 45, 50]\n\n#### METHOD 1: Create generator function\n\nlst = [-0.5, 44, 90, 132.22, 129.6, 89, 67.91, 12.5, 11, 0.0006, 10.2,\n 67, 89.07, 100, 132.224, 129.88, 120.1, 100, 89.5, 75, 40, 9.8, -0.4,\n 0.1, 90, 99, 112, 132.22,\n]\n\ndef get_groups(lst):\n up = False\n for i, (u, v) in enumerate(zip(lst, lst[1:])):\n if up:\n if v < u:\n yield 'End', i, u\n up = False\n else:\n if v > u:\n yield 'Start', i, u\n up = True\n if up:\n yield 'End', i + 1, lst[-1]\n\n#print(\"METHOD 1:\\n\")\n#for t in get_groups(lst):\n# print(t)\n \n#### METHOD 2: using numpy libraries\nfrom scipy.signal import argrelextrema\nimport numpy as np \n\nlst = [-0.5, 44, 90, 132.22, 129.6, 89, 67.91, 12.5, 11, 0.0006, 10.2, 67, 89.07, 100, 132.224, 129.88, 120.1, 100, 89.5, 75, 40, 9.8, -0.4, 0.1, 90, 99, 112, 132.22]\narr = np.array(lst)\n\n#Find local minimas index, add zero in the beginning\nminInd = np.insert(argrelextrema(arr, np.less),0,0)\n# Find local maximas index, add the length of arr - 1 at the end \nmaxInd = np.append(argrelextrema(arr,np.greater),[len(lst)-1])\n\n# numpy indexing and zip to combine the results\nend_arr = list(zip(zip(minInd,arr[minInd]),zip(maxInd,arr[maxInd])))\n\n##Printing the output\n#print(\"\\nMETHOD 2:\\n\")\n#for i in end_arr:\n# print('Start :' , i[0])\n# print('End:', i[1],'\\n')\n \n#### METHOD 3: Sorting\n \nload = lst\n \nload.sort(key=float) # previously key = int\n\ntotals = []\n\nfor count, items in enumerate(load):\n\n counter = count + 1\n last_object = (counter, load[count], load[(len(load)-1) - count])\n\n totals.append(last_object)\n \n#our_totals = totals[:3]\n#print(\"\\nMETHOD 3:\\n\")\n#print(our_totals)\n\n###############################################\nprint(\"\\nTrying on REAL data:\\n\")\n\nimport pandas as pd\n\n# read in dataset\nxl = pd.ExcelFile(\"data/130N_Cycles_1-47.xlsx\")\ndf = xl.parse(\"Specimen_RawData_1\")\ndf\n\n# append data from load column to list\nload = []\nfor item in df.index:\n load.append(df[\"Round\"][item])\n\n \n#### METHOD 1: Create generator function\n\nlst = load\n\ndef get_groups(lst):\n up = False\n for i, (u, v) in enumerate(zip(lst, lst[1:])):\n if up:\n if v < u:\n yield 'End', i, u\n up = False\n else:\n if v > u:\n yield 'Start', i, u\n up = True\n if up:\n yield 'End', i + 1, lst[-1]\n\nprint(\"METHOD 1:\\n\")\nfor t in get_groups(lst):\n print(t)\n#### METHOD 2: using numpy libraries\nfrom scipy.signal import argrelextrema\nimport numpy as np \n\nlst = load\narr = np.array(lst)\n\n#Find local minimas index, add zero in the beginning\nminInd = np.insert(argrelextrema(arr, np.less),0,0)\n# Find local maximas index, add the length of arr - 1 at the end \nmaxInd = np.append(argrelextrema(arr,np.greater),[len(lst)-1])\n\n# numpy indexing and zip to combine the results\nend_arr = list(zip(zip(minInd,arr[minInd]),zip(maxInd,arr[maxInd])))\n\n#Printing the output\n#print(\"\\nMETHOD 2:\\n\")\n#count = 0\n#for i in end_arr:\n# if count <= 47:\n# print('Start :' , i[0])\n# print('End:', i[1],'\\n')\n# count += 1\n \n#### METHOD 3: Sorting\n\n \nload.sort(key=float) # previously key = int\n\ntotals = []\n\nfor count, items in enumerate(load):\n\n counter = count + 1\n last_object = (counter, load[count], load[(len(load)-1) - count])\n\n totals.append(last_object)\n \n#our_totals = totals[:47]\n#print(\"\\nMETHOD 3:\\n\")\n#print(our_totals)\n \n# save the output in the list, min and the max\n# run it through my algorithm \n# compare them separately in algorithm\n \n# save rows in file --> rounding to 0 helps\n# play with rounding to comma 1,\n# compare it with the real data\n# check both minimin and maximum\n# do it manually for 2 - 3 different ones\n# clean the data from all bad values\n# use ROUND, not anything else!!!\n \n# redownloaded dataset\n"
] |
[
[
"numpy.array",
"scipy.signal.argrelextrema",
"pandas.ExcelFile"
]
] |
genfifth/cvopt
|
[
"1e55461fec937cd5af7a786a1b942cd846782b27"
] |
[
"cvopt/search_setting/_base.py"
] |
[
"import numpy as np, scipy as sp\nimport types\nfrom hyperopt import hp\nfrom hyperopt.pyll import scope\n\nclass ParamDist(dict):\n \"\"\"\n cvopt standard param setting class.\n \"\"\"\n pass\n\n\ndef search_category(categories):\n \"\"\"\n Set search target distribution for categorical variable.\n\n Parameters\n ----------\n categories: list\n search target categories.\n\n Returns\n ----------\n cvopt.search_setting.PramDist\n setting class\n \"\"\"\n if not isinstance(categories, list):\n raise ValueError(\"categories is must be list\")\n paramdist = ParamDist(valtype=\"category\", categories=categories)\n return paramdist\n\n\ndef search_numeric(low, high, dtype):\n \"\"\"\n Set search target distribution for numerical variable.\n\n Parameters\n ----------\n low: int or float\n lower limit of search range.\n\n high: int or float\n high limit of search range.\n\n dtype: \"int\" or \"float\"\n variable's dtype.\n\n Returns\n ----------\n cvopt.search_setting.PramDist\n setting class\n \"\"\"\n if not dtype in [\"int\", \"float\"]:\n raise ValueError('dtype is must be \"int\" or \"float\"')\n \n paramdist = ParamDist(valtype=\"numeric\", \n low=low, \n high=high, \n dtype=dtype, \n )\n return paramdist\n\n\ndef to_func(x):\n if isinstance(x, types.FunctionType):\n return x\n else:\n def f(_x):\n return x\n return f\n\n\nclass category_sampler:\n def __init__(self, categories):\n self.categories = categories\n self.dist = sp.stats.randint(low=0, high=len(categories))\n \n def rvs(self):\n return self.categories[self.dist.rvs()]\n\ndef get_params(param_distributions, tgt_key=None):\n \"\"\"\n get params from param_distributions (dict, key:param_name, val:scipy.stat class).\n \"\"\"\n if tgt_key is None:\n ret = {}\n for key in param_distributions.keys():\n ret[key] = param_distributions[key].rvs()\n return ret\n else:\n return {tgt_key:param_distributions[tgt_key].rvs()}\n\n\[email protected]\ndef hpint(low, high):\n return max(low, high)\n\ndef _conv_hyperopt_param_dist(param_name, param_dist):\n if param_dist[\"valtype\"] == \"numeric\":\n if param_dist[\"dtype\"] == \"int\":\n param_dist = scope.hpint(int(param_dist[\"low\"]), hp.randint(param_name, int(param_dist[\"high\"])))\n elif param_dist[\"dtype\"] == \"float\":\n param_dist = hp.uniform(param_name, \n param_dist[\"low\"], \n param_dist[\"high\"]) \n elif param_dist[\"valtype\"] == \"category\":\n param_dist = hp.choice(param_name, param_dist[\"categories\"])\n\n return param_dist\n\ndef _conv_gpyopt_param_dist(param_name, param_dist):\n if param_dist[\"valtype\"] == \"numeric\":\n if param_dist[\"dtype\"] == \"int\":\n param_dist = {\"name\":param_name, \"type\":\"discrete\", \n \"domain\":np.arange(int(param_dist[\"low\"]), int(param_dist[\"high\"])+1).astype(int)}\n elif param_dist[\"dtype\"] == \"float\":\n param_dist = {\"name\":param_name, \"type\":\"continuous\", \n \"domain\":(param_dist[\"low\"], param_dist[\"high\"])} \n elif param_dist[\"valtype\"] == \"category\":\n param_dist = {\"name\":param_name, \"type\":\"categorical\", \n \"domain\":np.arange(len(param_dist[\"categories\"])), \n \"categories\":param_dist[\"categories\"]}\n return param_dist\n\ndef _conv_ga_param_dist(param_name, param_dist):\n if param_dist[\"valtype\"] == \"numeric\":\n if param_dist[\"dtype\"] == \"int\":\n param_dist = sp.stats.randint(low=param_dist[\"low\"], high=param_dist[\"high\"])\n elif param_dist[\"dtype\"] == \"float\":\n param_dist = sp.stats.uniform(loc=param_dist[\"low\"], scale=param_dist[\"high\"]-param_dist[\"low\"]) \n elif param_dist[\"valtype\"] == \"category\":\n param_dist = category_sampler(categories=param_dist[\"categories\"])\n\n return param_dist\n\ndef conv_param_distributions(param_distributions, backend):\n \"\"\"\n Convert param_distributions from cvopt style to backend style.\n \"\"\"\n if backend == \"hyperopt\":\n ret = {}\n elif backend == \"bayesopt\":\n ret = []\n elif backend == \"gaopt\":\n ret = {}\n \n for param_name in param_distributions:\n if type(param_distributions[param_name]) == ParamDist:\n try:\n if backend == \"hyperopt\":\n ret[param_name] = _conv_hyperopt_param_dist(param_name, param_distributions[param_name])\n elif backend == \"bayesopt\":\n ret.append(_conv_gpyopt_param_dist(param_name, param_distributions[param_name]))\n elif backend == \"gaopt\":\n ret[param_name] = _conv_ga_param_dist(param_name, param_distributions[param_name])\n except Exception as e:\n raise ValueError(\"parameter:\"+ param_name + \"'s setting is not supported.\")\n \n else:\n if backend == \"hyperopt\":\n ret[param_name] = param_distributions[param_name]\n elif backend == \"bayesopt\":\n if(param_distributions[param_name][\"type\"]==\"categorical\") & (\"categories\" not in param_distributions[param_name]):\n raise Exception(\"If type is categorical, parameter_distributions's value must have `categories` key.\")\n ret.append(param_distributions[param_name])\n elif backend == \"gaopt\":\n if isinstance(param_distributions[param_name], sp.stats._distn_infrastructure.rv_frozen):\n ret[param_name] = param_distributions[param_name]\n else:\n raise Exception(\"parameter_distributions's value must be search_setting.search_numeric, search_setting.search_category, or scipy.stats class.\")\n \n return ret\n\n\ndef decode_params(params, param_distributions, backend):\n \"\"\"\n Decode params from backend style to dict(key:param name, value:param value).\n \"\"\"\n if backend == \"hyperopt\":\n return params\n elif backend == \"bayesopt\":\n ret = {}\n for i, param_dist in enumerate(param_distributions):\n if param_dist[\"type\"] == \"categorical\":\n ret[param_dist[\"name\"]] = param_dist[\"categories\"][int(params[0, i])]\n elif param_dist[\"type\"] == \"discrete\":\n ret[param_dist[\"name\"]] = int(params[0, i])\n else:\n ret[param_dist[\"name\"]] = params[0, i]\n return ret \n elif backend == \"gaopt\":\n return params\n"
] |
[
[
"scipy.stats.randint",
"scipy.stats.uniform"
]
] |
Steffy-zxf/HubModule
|
[
"40b0563f86634714033ab7712a08a58eba81bad1"
] |
[
"modules/text/semantic_model/simnet_bow/module.py"
] |
[
"# -*- coding:utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport ast\nimport json\nimport math\nimport os\nimport six\n\nimport numpy as np\nimport paddle.fluid as fluid\nfrom paddle.fluid.core import PaddleTensor, AnalysisConfig, create_paddle_predictor\nimport paddlehub as hub\nfrom paddlehub.common.paddle_helper import get_variable_info\nfrom paddlehub.common.utils import sys_stdin_encoding\nfrom paddlehub.io.parser import txt_parser\nfrom paddlehub.module.module import serving\nfrom paddlehub.module.module import moduleinfo\nfrom paddlehub.module.module import runnable\n\nfrom simnet_bow.processor import load_vocab, preprocess, postprocess\n\n\nclass DataFormatError(Exception):\n def __init__(self, *args):\n self.args = args\n\n\n@moduleinfo(\n name=\"simnet_bow\",\n version=\"1.1.0\",\n summary=\n \"Baidu's open-source similarity network model based on bow_pairwise.\",\n author=\"baidu-nlp\",\n author_email=\"\",\n type=\"nlp/sentiment_analysis\")\nclass SimnetBow(hub.Module):\n def _initialize(self):\n \"\"\"\n initialize with the necessary elements\n \"\"\"\n self.pretrained_model_path = os.path.join(self.directory, \"infer_model\")\n self.vocab_path = os.path.join(self.directory, \"assets\", \"vocab.txt\")\n self.vocab = load_vocab(self.vocab_path)\n self.param_file = os.path.join(self.directory, \"assets\", \"params.txt\")\n self._word_seg_module = None\n\n self._set_config()\n\n @property\n def word_seg_module(self):\n \"\"\"\n lac module\n \"\"\"\n if not self._word_seg_module:\n self._word_seg_module = hub.Module(name=\"lac\")\n return self._word_seg_module\n\n def _set_config(self):\n \"\"\"\n predictor config setting\n \"\"\"\n cpu_config = AnalysisConfig(self.pretrained_model_path)\n cpu_config.disable_glog_info()\n cpu_config.disable_gpu()\n cpu_config.switch_ir_optim(False)\n self.cpu_predictor = create_paddle_predictor(cpu_config)\n\n try:\n _places = os.environ[\"CUDA_VISIBLE_DEVICES\"]\n int(_places[0])\n use_gpu = True\n except:\n use_gpu = False\n if use_gpu:\n gpu_config = AnalysisConfig(self.pretrained_model_path)\n gpu_config.disable_glog_info()\n gpu_config.enable_use_gpu(memory_pool_init_size_mb=500, device_id=0)\n self.gpu_predictor = create_paddle_predictor(gpu_config)\n\n def context(self, trainable=False):\n \"\"\"\n Get the input ,output and program of the pretrained simnet_bow\n Args:\n trainable(bool): whether fine-tune the pretrained parameters of simnet_bow or not\n Returns:\n inputs(dict): the input variables of simnet_bow (words)\n outputs(dict): the output variables of simnet_bow (the sentiment prediction results)\n main_program(Program): the main_program of lac with pretrained prameters\n \"\"\"\n place = fluid.CPUPlace()\n exe = fluid.Executor(place)\n\n program, feed_target_names, fetch_targets = fluid.io.load_inference_model(\n dirname=self.pretrained_model_path, executor=exe)\n with open(self.param_file, 'r') as file:\n params_list = file.readlines()\n for param in params_list:\n param = param.strip()\n var = program.global_block().var(param)\n var_info = get_variable_info(var)\n\n program.global_block().create_parameter(\n shape=var_info['shape'],\n dtype=var_info['dtype'],\n name=var_info['name'])\n\n for param in program.global_block().iter_parameters():\n param.trainable = trainable\n inputs = {}\n for name, var in program.global_block().vars.items():\n if name == feed_target_names[0]:\n inputs[\"text_1\"] = var\n if name == feed_target_names[1]:\n inputs[\"text_2\"] = var\n # output of sencond layer from the end prediction layer (fc-softmax)\n outputs = {\n \"left_feature\": fetch_targets[0],\n \"similarity\": fetch_targets[1]\n }\n return inputs, outputs, program\n\n def texts2tensor(self, texts):\n \"\"\"\n Tranform the texts(dict) to PaddleTensor\n Args:\n texts(dict): texts\n Returns:\n tensor(PaddleTensor): tensor with texts data\n \"\"\"\n lod = [0]\n data = []\n for i, text in enumerate(texts):\n data += text['processed']\n lod.append(len(text['processed']) + lod[i])\n tensor = PaddleTensor(np.array(data).astype('int64'))\n tensor.name = \"words\"\n tensor.lod = [lod]\n tensor.shape = [lod[-1], 1]\n return tensor\n\n def to_unicode(self, texts):\n \"\"\"\n Convert each element's type(str) of texts(list) to unicode in python2.7\n Args:\n texts(list): each element's type is str in python2.7\n Returns:\n texts(list): each element's type is unicode in python2.7\n \"\"\"\n\n if six.PY2:\n unicode_texts = []\n for text in texts:\n if not isinstance(text, unicode):\n unicode_texts.append(\n text.decode(sys_stdin_encoding()).decode(\"utf8\"))\n else:\n unicode_texts.append(text)\n texts = unicode_texts\n return texts\n\n def check_data(self, texts=[], data={}):\n \"\"\"\n check input data\n Args:\n texts(list): the input texts to be predicted which the first element is text_1(list)\n and the second element is text_2(list), such as [['这道题很难'], ['这道题不简单']]\n if texts not data.\n data(dict): key must be 'text_1' and 'text_2', value is the texts(list) to be predicted\n Returns:\n results(dict): predicted data\n \"\"\"\n predicted_data = {'text_1': [], 'text_2': []}\n if texts != [] and isinstance(texts, list) and len(texts) == 2 and (len(\n texts[0]) == len(\n texts[1])) and texts[0] and texts[1] and data == {}:\n\n predicted_data['text_1'] = texts[0]\n predicted_data['text_2'] = texts[1]\n\n elif texts == [] and isinstance(data, dict) and isinstance(\n data.get('text_1', None), list) and isinstance(\n data.get('text_2', None),\n list) and (len(data['text_1']) == len(\n data['text_2'])) and data['text_1'] and data['text_2']:\n\n predicted_data = data\n\n else:\n raise ValueError(\n \"The input data is inconsistent with expectations.\")\n\n return predicted_data\n\n @serving\n def similarity(self, texts=[], data={}, use_gpu=False, batch_size=1):\n \"\"\"\n Get the sentiment prediction results results with the texts as input\n Args:\n texts(list): the input texts to be predicted which the first element is text_1(list)\n and the second element is text_2(list), such as [['这道题很难'], ['这道题不简单']]\n if texts not data.\n data(dict): key must be 'text_1' and 'text_2', value is the texts(list) to be predicted\n use_gpu(bool): whether use gpu to predict or not\n batch_size(int): the program deals once with one batch\n Returns:\n results(list): the word segmentation results\n \"\"\"\n try:\n _places = os.environ[\"CUDA_VISIBLE_DEVICES\"]\n int(_places[0])\n except:\n use_gpu = False\n\n data = self.check_data(texts, data)\n\n start_idx = 0\n iteration = int(math.ceil(len(data['text_1']) / batch_size))\n results = []\n for i in range(iteration):\n batch_data = {'text_1': [], 'text_2': []}\n if i < (iteration - 1):\n batch_data['text_1'] = data['text_1'][start_idx:(\n start_idx + batch_size)]\n batch_data['text_2'] = data['text_2'][start_idx:(\n start_idx + batch_size)]\n else:\n batch_data['text_1'] = data['text_1'][start_idx:(\n start_idx + batch_size)]\n batch_data['text_2'] = data['text_2'][start_idx:(\n start_idx + batch_size)]\n start_idx = start_idx + batch_size\n processed_results = preprocess(self.word_seg_module, self.vocab,\n batch_data, use_gpu, batch_size)\n\n tensor_words_1 = self.texts2tensor(processed_results[\"text_1\"])\n tensor_words_2 = self.texts2tensor(processed_results[\"text_2\"])\n\n if use_gpu:\n batch_out = self.gpu_predictor.run(\n [tensor_words_1, tensor_words_2])\n else:\n batch_out = self.cpu_predictor.run(\n [tensor_words_1, tensor_words_2])\n batch_result = postprocess(batch_out[1], processed_results)\n results += batch_result\n return results\n\n @runnable\n def run_cmd(self, argvs):\n \"\"\"\n Run as a command\n \"\"\"\n self.parser = argparse.ArgumentParser(\n description=\"Run the simnet_bow module.\",\n prog='hub run simnet_bow',\n usage='%(prog)s',\n add_help=True)\n\n self.arg_input_group = self.parser.add_argument_group(\n title=\"Input options\", description=\"Input data. Required\")\n self.arg_config_group = self.parser.add_argument_group(\n title=\"Config options\",\n description=\n \"Run configuration for controlling module behavior, not required.\")\n\n self.add_module_config_arg()\n self.add_module_input_arg()\n\n args = self.parser.parse_args(argvs)\n\n try:\n input_data = self.check_input_data(args)\n except DataFormatError and RuntimeError:\n self.parser.print_help()\n return None\n\n results = self.similarity(\n data=input_data, use_gpu=args.use_gpu, batch_size=args.batch_size)\n\n return results\n\n def add_module_config_arg(self):\n \"\"\"\n Add the command config options\n \"\"\"\n self.arg_config_group.add_argument(\n '--use_gpu',\n type=ast.literal_eval,\n default=False,\n help=\"whether use GPU for prediction\")\n\n self.arg_config_group.add_argument(\n '--batch_size',\n type=int,\n default=1,\n help=\"batch size for prediction\")\n\n def add_module_input_arg(self):\n \"\"\"\n Add the command input options\n \"\"\"\n self.arg_input_group.add_argument(\n '--input_file',\n type=str,\n default=None,\n help=\"file contain input data\")\n self.arg_input_group.add_argument(\n '--text_1', type=str, default=None, help=\"text to predict\")\n self.arg_input_group.add_argument(\n '--text_2', type=str, default=None, help=\"text to predict\")\n\n def check_input_data(self, args):\n input_data = {}\n if args.input_file:\n if not os.path.exists(args.input_file):\n print(\"File %s is not exist.\" % args.input_file)\n raise RuntimeError\n else:\n input_data = txt_parser.parse(args.input_file, use_strip=True)\n elif args.text_1 and args.text_2:\n if args.text_1.strip() != '' and args.text_2.strip() != '':\n if six.PY2:\n input_data = {\n \"text_1\": [\n args.text_1.strip().decode(\n sys_stdin_encoding()).decode(\"utf8\")\n ],\n \"text_2\": [\n args.text_2.strip().decode(\n sys_stdin_encoding()).decode(\"utf8\")\n ]\n }\n else:\n input_data = {\n \"text_1\": [args.text_1],\n \"text_2\": [args.text_2]\n }\n else:\n print(\n \"ERROR: The input data is inconsistent with expectations.\")\n\n if input_data == {}:\n print(\"ERROR: The input data is inconsistent with expectations.\")\n raise DataFormatError\n\n return input_data\n\n def get_vocab_path(self):\n \"\"\"\n Get the path to the vocabulary whih was used to pretrain\n Returns:\n self.vocab_path(str): the path to vocabulary\n \"\"\"\n return self.vocab_path\n\n\nif __name__ == \"__main__\":\n\n simnet_bow = SimnetBow()\n simnet_bow.context()\n # Data to be predicted\n test_text_1 = [\"这道题太难了\", \"这道题太难了\", \"这道题太难了\"]\n test_text_2 = [\"这道题是上一年的考题\", \"这道题不简单\", \"这道题很有意思\"]\n\n inputs = {\"text_1\": test_text_1, \"text_2\": test_text_2}\n results = simnet_bow.similarity(data=inputs, batch_size=2)\n print(results)\n max_score = -1\n result_text = \"\"\n for result in results:\n if result['similarity'] > max_score:\n max_score = result['similarity']\n result_text = result['text_2']\n\n print(\"The most matching with the %s is %s\" % (test_text_1[0], result_text))\n"
] |
[
[
"numpy.array"
]
] |
raghavgupta0296/ASL
|
[
"5012e1f2fa66b7f75b22f576003c8be50c59286e"
] |
[
"testCNN.py"
] |
[
"import numpy as np\r\nimport tensorflow as tf\r\nimport cv2\r\n\r\nclass testing:\r\n\r\n def ini_wt(self,shape):\r\n initial = tf.truncated_normal(shape, stddev=0.1)\r\n return tf.Variable(initial)\r\n def ini_bias(self,shape):\r\n initial = tf.constant(0.1, shape=shape)\r\n return tf.Variable(initial)\r\n def conv_2d(self,x, W):\r\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\r\n def max_pool(self,x):\r\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\r\n\r\n def __init__(self):\r\n self.im_ht = 100\r\n self.im_wid = 100\r\n self.no_labels = 2\r\n\r\n self.W1 = self.ini_wt([5, 5, 1, 32])\r\n self.b1 = self.ini_bias([32])\r\n self.W2 = self.ini_wt([5, 5, 32, 64])\r\n self.b2 = self.ini_bias([64])\r\n self.W3 = self.ini_wt([int(self.im_ht / 4 * self.im_wid / 4 * 64), 1024])\r\n self.b3 = self.ini_bias([1024])\r\n self.W4 = self.ini_wt([1024, self.no_labels])\r\n self.b4 = self.ini_bias([self.no_labels])\r\n\r\n init_op = tf.global_variables_initializer()\r\n saver = tf.train.Saver()\r\n\r\n self.sess = tf.Session()\r\n self.sess.run(init_op)\r\n saver.restore(self.sess, \"./tfWeights\")\r\n \r\n def test_im(self,im):\r\n im = cv2.resize(im,(self.im_ht,self.im_wid))\r\n \r\n X = tf.placeholder(tf.float32, shape=[None, self.im_ht, self.im_wid, 1])\r\n \r\n im = np.reshape(im,(1,im.shape[0],im.shape[1],1))\r\n\r\n # conv-pool layer1\r\n conv = tf.nn.relu(self.conv_2d(X, self.W1) + self.b1)\r\n maxPool = self.max_pool(conv)\r\n\r\n # conv-pool layer2\r\n conv = tf.nn.relu(self.conv_2d(maxPool, self.W2) + self.b2)\r\n maxPool = self.max_pool(conv)\r\n\r\n # Dense Layer1\r\n r = tf.reshape(maxPool, [-1, int(self.im_ht / 4 * self.im_wid / 4 * 64)])\r\n r = tf.nn.relu(tf.matmul(r, self.W3) + self.b3)\r\n\r\n # Dense Layer2 - label\r\n r = tf.matmul(r, self.W4) + self.b4\r\n\r\n r = tf.argmax(r,1)[0]\r\n\r\n r = self.sess.run(r,feed_dict={X:im})\r\n\r\n letters = [chr(i) for i in range(65, 67)] # 91\r\n num2al = dict(zip(range(len(letters)),letters))\r\n r = num2al[r]\r\n print (r)\r\n\r\nif __name__ == '__main__':\r\n im = cv2.imread(\"20.png\",-1)\r\n t = testing()\r\n t.test_im(im)"
] |
[
[
"tensorflow.matmul",
"tensorflow.constant",
"tensorflow.truncated_normal",
"tensorflow.Variable",
"tensorflow.nn.max_pool",
"numpy.reshape",
"tensorflow.placeholder",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"tensorflow.train.Saver",
"tensorflow.argmax",
"tensorflow.nn.conv2d"
]
] |
Qin-Folks/I2L-MeshNet_RELEASE
|
[
"2749441e03ae77d42837a4d8f0287e537d5e768c"
] |
[
"common/utils/preprocessing.py"
] |
[
"import numpy as np\nimport cv2\nimport random\nfrom config import cfg\nimport math\n\ndef load_img(path, order='RGB'):\n img = cv2.imread(path, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)\n if not isinstance(img, np.ndarray):\n raise IOError(\"Fail to read %s\" % path)\n\n if order=='RGB':\n img = img[:,:,::-1].copy()\n \n img = img.astype(np.float32)\n return img\n\ndef get_bbox(joint_img, joint_valid):\n\n x_img, y_img = joint_img[:,0], joint_img[:,1]\n x_img = x_img[joint_valid==1]; y_img = y_img[joint_valid==1];\n xmin = min(x_img); ymin = min(y_img); xmax = max(x_img); ymax = max(y_img);\n\n x_center = (xmin+xmax)/2.; width = xmax-xmin;\n xmin = x_center - 0.5*width*1.2\n xmax = x_center + 0.5*width*1.2\n \n y_center = (ymin+ymax)/2.; height = ymax-ymin;\n ymin = y_center - 0.5*height*1.2\n ymax = y_center + 0.5*height*1.2\n\n bbox = np.array([xmin, ymin, xmax - xmin, ymax - ymin]).astype(np.float32)\n return bbox\n\ndef process_bbox(bbox, img_width, img_height):\n # sanitize bboxes\n x, y, w, h = bbox\n x1 = np.max((0, x))\n y1 = np.max((0, y))\n x2 = np.min((img_width - 1, x1 + np.max((0, w - 1))))\n y2 = np.min((img_height - 1, y1 + np.max((0, h - 1))))\n if w*h > 0 and x2 >= x1 and y2 >= y1:\n bbox = np.array([x1, y1, x2-x1, y2-y1])\n else:\n return None\n\n # aspect ratio preserving bbox\n w = bbox[2]\n h = bbox[3]\n c_x = bbox[0] + w/2.\n c_y = bbox[1] + h/2.\n aspect_ratio = cfg.input_img_shape[1]/cfg.input_img_shape[0]\n if w > aspect_ratio * h:\n h = w / aspect_ratio\n elif w < aspect_ratio * h:\n w = h * aspect_ratio\n bbox[2] = w*1.25\n bbox[3] = h*1.25\n bbox[0] = c_x - bbox[2]/2.\n bbox[1] = c_y - bbox[3]/2.\n\n return bbox\n\ndef get_aug_config(exclude_flip):\n scale_factor = 0.25\n rot_factor = 30\n color_factor = 0.2\n \n scale = np.clip(np.random.randn(), -1.0, 1.0) * scale_factor + 1.0\n rot = np.clip(np.random.randn(), -2.0,\n 2.0) * rot_factor if random.random() <= 0.6 else 0\n c_up = 1.0 + color_factor\n c_low = 1.0 - color_factor\n color_scale = np.array([random.uniform(c_low, c_up), random.uniform(c_low, c_up), random.uniform(c_low, c_up)])\n if exclude_flip:\n do_flip = False\n else:\n do_flip = random.random() <= 0.5\n\n return scale, rot, color_scale, do_flip\n\ndef augmentation(img, bbox, data_split, exclude_flip=False):\n if data_split == 'train':\n scale, rot, color_scale, do_flip = get_aug_config(exclude_flip)\n else:\n scale, rot, color_scale, do_flip = 1.0, 0.0, np.array([1,1,1]), False\n \n img, trans, inv_trans = generate_patch_image(img, bbox, scale, rot, do_flip, cfg.input_img_shape)\n img = np.clip(img * color_scale[None,None,:], 0, 255)\n return img, trans, inv_trans, rot, do_flip\n\ndef generate_patch_image(cvimg, bbox, scale, rot, do_flip, out_shape):\n img = cvimg.copy()\n img_height, img_width, img_channels = img.shape\n \n bb_c_x = float(bbox[0] + 0.5*bbox[2])\n bb_c_y = float(bbox[1] + 0.5*bbox[3])\n bb_width = float(bbox[2])\n bb_height = float(bbox[3])\n\n if do_flip:\n img = img[:, ::-1, :]\n bb_c_x = img_width - bb_c_x - 1\n\n trans = gen_trans_from_patch_cv(bb_c_x, bb_c_y, bb_width, bb_height, out_shape[1], out_shape[0], scale, rot)\n img_patch = cv2.warpAffine(img, trans, (int(out_shape[1]), int(out_shape[0])), flags=cv2.INTER_LINEAR)\n img_patch = img_patch.astype(np.float32)\n inv_trans = gen_trans_from_patch_cv(bb_c_x, bb_c_y, bb_width, bb_height, out_shape[1], out_shape[0], scale, rot, inv=True)\n\n return img_patch, trans, inv_trans\n\ndef rotate_2d(pt_2d, rot_rad):\n x = pt_2d[0]\n y = pt_2d[1]\n sn, cs = np.sin(rot_rad), np.cos(rot_rad)\n xx = x * cs - y * sn\n yy = x * sn + y * cs\n return np.array([xx, yy], dtype=np.float32)\n\ndef gen_trans_from_patch_cv(c_x, c_y, src_width, src_height, dst_width, dst_height, scale, rot, inv=False):\n # augment size with scale\n src_w = src_width * scale\n src_h = src_height * scale\n src_center = np.array([c_x, c_y], dtype=np.float32)\n\n # augment rotation\n rot_rad = np.pi * rot / 180\n src_downdir = rotate_2d(np.array([0, src_h * 0.5], dtype=np.float32), rot_rad)\n src_rightdir = rotate_2d(np.array([src_w * 0.5, 0], dtype=np.float32), rot_rad)\n\n dst_w = dst_width\n dst_h = dst_height\n dst_center = np.array([dst_w * 0.5, dst_h * 0.5], dtype=np.float32)\n dst_downdir = np.array([0, dst_h * 0.5], dtype=np.float32)\n dst_rightdir = np.array([dst_w * 0.5, 0], dtype=np.float32)\n\n src = np.zeros((3, 2), dtype=np.float32)\n src[0, :] = src_center\n src[1, :] = src_center + src_downdir\n src[2, :] = src_center + src_rightdir\n\n dst = np.zeros((3, 2), dtype=np.float32)\n dst[0, :] = dst_center\n dst[1, :] = dst_center + dst_downdir\n dst[2, :] = dst_center + dst_rightdir\n \n if inv:\n trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))\n else:\n trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))\n\n trans = trans.astype(np.float32)\n return trans\n\n"
] |
[
[
"numpy.clip",
"numpy.cos",
"numpy.sin",
"numpy.max",
"numpy.random.randn",
"numpy.float32",
"numpy.array",
"numpy.zeros"
]
] |
RickOnEarth/pointpillars_based_CLOCs
|
[
"c6d4576a151540200dac2354b00dc4ecce6ee72d"
] |
[
"second/core/box_np_ops.py"
] |
[
"import numba\nfrom pathlib import Path\nimport numpy as np\nfrom second.utils.buildtools.pybind11_build import load_pb11\n\nfrom second.core.geometry import points_in_convex_polygon_3d_jit\nfrom second.core.non_max_suppression.nms_gpu import rotate_iou_gpu_eval\n\ntry:\n from second.core import box_ops_cc\nexcept:\n current_dir = Path(__file__).resolve().parents[0]\n box_ops_cc = load_pb11([\"./cc/box_ops.cc\"], current_dir / \"box_ops_cc.so\", current_dir)\n\n\ndef riou_cc(rbboxes, qrbboxes, standup_thresh=0.0):\n # less than 50ms when used in second one thread. 10x slower than gpu\n boxes_corners = center_to_corner_box2d(rbboxes[:, :2], rbboxes[:, 2:4],\n rbboxes[:, 4])\n boxes_standup = corner_to_standup_nd(boxes_corners)\n qboxes_corners = center_to_corner_box2d(qrbboxes[:, :2], qrbboxes[:, 2:4],\n qrbboxes[:, 4])\n qboxes_standup = corner_to_standup_nd(qboxes_corners)\n # if standup box not overlapped, rbbox not overlapped too.\n standup_iou = iou_jit(boxes_standup, qboxes_standup, eps=0.0)\n return box_ops_cc.rbbox_iou(boxes_corners, qboxes_corners, standup_iou,\n standup_thresh)\n\n\ndef second_box_encode(boxes, anchors, encode_angle_to_vector=False, smooth_dim=False):\n \"\"\"box encode for VoxelNet in lidar\n Args:\n boxes ([N, 7] Tensor): normal boxes: x, y, z, w, l, h, r\n anchors ([N, 7] Tensor): anchors\n encode_angle_to_vector: bool. increase aos performance, \n decrease other performance.\n \"\"\"\n # need to convert boxes to z-center format\n xa, ya, za, wa, la, ha, ra = np.split(anchors, 7, axis=-1)\n xg, yg, zg, wg, lg, hg, rg = np.split(boxes, 7, axis=-1)\n zg = zg + hg / 2\n za = za + ha / 2\n diagonal = np.sqrt(la**2 + wa**2) # 4.3\n xt = (xg - xa) / diagonal\n yt = (yg - ya) / diagonal\n\n zt = (zg - za) / ha # 1.6\n if smooth_dim:\n lt = lg / la - 1\n wt = wg / wa - 1\n ht = hg / ha - 1\n else:\n lt = np.log(lg / la)\n wt = np.log(wg / wa)\n ht = np.log(hg / ha)\n if encode_angle_to_vector:\n rgx = np.cos(rg)\n rgy = np.sin(rg)\n rax = np.cos(ra)\n ray = np.sin(ra)\n rtx = rgx - rax\n rty = rgy - ray\n return np.concatenate([xt, yt, zt, wt, lt, ht, rtx, rty], axis=-1)\n else:\n rt = rg - ra\n return np.concatenate([xt, yt, zt, wt, lt, ht, rt], axis=-1)\n\n\ndef second_box_decode(box_encodings, anchors, encode_angle_to_vector=False, smooth_dim=False):\n \"\"\"box decode for VoxelNet in lidar\n Args:\n boxes ([N, 7] Tensor): normal boxes: x, y, z, w, l, h, r\n anchors ([N, 7] Tensor): anchors\n \"\"\"\n # need to convert box_encodings to z-bottom format\n xa, ya, za, wa, la, ha, ra = np.split(anchors, 7, axis=-1)\n if encode_angle_to_vector:\n xt, yt, zt, wt, lt, ht, rtx, rty = np.split(box_encodings, 8, axis=-1)\n else:\n xt, yt, zt, wt, lt, ht, rt = np.split(box_encodings, 7, axis=-1)\n za = za + ha / 2\n diagonal = np.sqrt(la**2 + wa**2)\n xg = xt * diagonal + xa\n yg = yt * diagonal + ya\n\n zg = zt * ha + za\n if smooth_dim:\n lg = (lt + 1) * la\n wg = (wt + 1) * wa\n hg = (ht + 1) * ha\n else:\n lg = np.exp(lt) * la\n wg = np.exp(wt) * wa\n hg = np.exp(ht) * ha\n if encode_angle_to_vector:\n rax = np.cos(ra)\n ray = np.sin(ra)\n rgx = rtx + rax\n rgy = rty + ray\n rg = np.arctan2(rgy, rgx)\n else:\n rg = rt + ra\n zg = zg - hg / 2\n return np.concatenate([xg, yg, zg, wg, lg, hg, rg], axis=-1)\n\ndef bev_box_encode(boxes, anchors, encode_angle_to_vector=False, smooth_dim=False):\n \"\"\"box encode for VoxelNet in lidar\n Args:\n boxes ([N, 7] Tensor): normal boxes: x, y, z, w, l, h, r\n anchors ([N, 7] Tensor): anchors\n encode_angle_to_vector: bool. increase aos performance, \n decrease other performance.\n \"\"\"\n # need to convert boxes to z-center format\n xa, ya, wa, la, ra = np.split(anchors, 5, axis=-1)\n xg, yg, wg, lg, rg = np.split(boxes, 5, axis=-1)\n diagonal = np.sqrt(la**2 + wa**2) # 4.3\n xt = (xg - xa) / diagonal\n yt = (yg - ya) / diagonal\n if smooth_dim:\n lt = lg / la - 1\n wt = wg / wa - 1\n else:\n lt = np.log(lg / la)\n wt = np.log(wg / wa)\n if encode_angle_to_vector:\n rgx = np.cos(rg)\n rgy = np.sin(rg)\n rax = np.cos(ra)\n ray = np.sin(ra)\n rtx = rgx - rax\n rty = rgy - ray\n return np.concatenate([xt, yt, wt, lt, rtx, rty], axis=-1)\n else:\n rt = rg - ra\n return np.concatenate([xt, yt, wt, lt, rt], axis=-1)\n\n\ndef bev_box_decode(box_encodings, anchors, encode_angle_to_vector=False, smooth_dim=False):\n \"\"\"box decode for VoxelNet in lidar\n Args:\n boxes ([N, 7] Tensor): normal boxes: x, y, z, w, l, h, r\n anchors ([N, 7] Tensor): anchors\n \"\"\"\n # need to convert box_encodings to z-bottom format\n xa, ya, wa, la, ra = np.split(anchors, 5, axis=-1)\n if encode_angle_to_vector:\n xt, yt, wt, lt, rtx, rty = np.split(box_encodings, 6, axis=-1)\n else:\n xt, yt, wt, lt, rt = np.split(box_encodings, 5, axis=-1)\n diagonal = np.sqrt(la**2 + wa**2)\n xg = xt * diagonal + xa\n yg = yt * diagonal + ya\n if smooth_dim:\n lg = (lt + 1) * la\n wg = (wt + 1) * wa\n else:\n lg = np.exp(lt) * la\n wg = np.exp(wt) * wa\n if encode_angle_to_vector:\n rax = np.cos(ra)\n ray = np.sin(ra)\n rgx = rtx + rax\n rgy = rty + ray\n rg = np.arctan2(rgy, rgx)\n else:\n rg = rt + ra\n return np.concatenate([xg, yg, wg, lg, rg], axis=-1)\n\ndef corners_nd(dims, origin=0.5):\n \"\"\"generate relative box corners based on length per dim and\n origin point. \n \n Args:\n dims (float array, shape=[N, ndim]): array of length per dim\n origin (list or array or float): origin point relate to smallest point.\n \n Returns:\n float array, shape=[N, 2 ** ndim, ndim]: returned corners. \n point layout example: (2d) x0y0, x0y1, x1y0, x1y1;\n (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1\n where x0 < x1, y0 < y1, z0 < z1\n \"\"\"\n ndim = int(dims.shape[1])\n corners_norm = np.stack(\n np.unravel_index(np.arange(2**ndim), [2] * ndim), axis=1).astype(\n dims.dtype)\n # now corners_norm has format: (2d) x0y0, x0y1, x1y0, x1y1\n # (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1\n # so need to convert to a format which is convenient to do other computing.\n # for 2d boxes, format is clockwise start with minimum point\n # for 3d boxes, please draw lines by your hand.\n if ndim == 2:\n # generate clockwise box corners\n corners_norm = corners_norm[[0, 1, 3, 2]]\n elif ndim == 3:\n corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]]\n corners_norm = corners_norm - np.array(origin, dtype=dims.dtype)\n corners = dims.reshape([-1, 1, ndim]) * corners_norm.reshape(\n [1, 2**ndim, ndim])\n return corners\n\n\[email protected]\ndef corners_2d_jit(dims, origin=0.5):\n ndim = 2\n corners_norm = np.array([[0, 0], [0, 1], [1, 1], [1, 0]], dtype=dims.dtype)\n corners_norm = corners_norm - np.array(origin, dtype=dims.dtype)\n corners = dims.reshape((-1, 1, ndim)) * corners_norm.reshape(\n (1, 2**ndim, ndim))\n return corners\n\[email protected]\ndef corners_3d_jit(dims, origin=0.5):\n ndim = 3\n corners_norm = np.array([0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1], dtype=dims.dtype).reshape((8, 3))\n corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]]\n corners_norm = corners_norm - np.array(origin, dtype=dims.dtype)\n corners = dims.reshape((-1, 1, ndim)) * corners_norm.reshape(\n (1, 2**ndim, ndim))\n return corners\n\[email protected]\ndef corner_to_standup_nd_jit(boxes_corner):\n num_boxes = boxes_corner.shape[0]\n ndim = boxes_corner.shape[-1]\n result = np.zeros((num_boxes, ndim * 2), dtype=boxes_corner.dtype)\n for i in range(num_boxes):\n for j in range(ndim):\n result[i, j] = np.min(boxes_corner[i, :, j])\n for j in range(ndim):\n result[i, j + ndim] = np.max(boxes_corner[i, :, j])\n return result\n\n\ndef corner_to_standup_nd(boxes_corner):\n assert len(boxes_corner.shape) == 3\n standup_boxes = []\n standup_boxes.append(np.min(boxes_corner, axis=1))\n standup_boxes.append(np.max(boxes_corner, axis=1))\n return np.concatenate(standup_boxes, -1)\n\n\ndef rbbox2d_to_near_bbox(rbboxes):\n \"\"\"convert rotated bbox to nearest 'standing' or 'lying' bbox.\n Args:\n rbboxes: [N, 5(x, y, xdim, ydim, rad)] rotated bboxes\n Returns:\n bboxes: [N, 4(xmin, ymin, xmax, ymax)] bboxes\n \"\"\"\n rots = rbboxes[..., -1]\n rots_0_pi_div_2 = np.abs(limit_period(rots, 0.5, np.pi))\n cond = (rots_0_pi_div_2 > np.pi / 4)[..., np.newaxis]\n bboxes_center = np.where(cond, rbboxes[:, [0, 1, 3, 2]], rbboxes[:, :4])\n bboxes = center_to_minmax_2d(bboxes_center[:, :2], bboxes_center[:, 2:])\n return bboxes\n\n\ndef rotation_3d_in_axis(points, angles, axis=0):\n # points: [N, point_size, 3]\n rot_sin = np.sin(angles)\n rot_cos = np.cos(angles)\n ones = np.ones_like(rot_cos)\n zeros = np.zeros_like(rot_cos)\n if axis == 1:\n rot_mat_T = np.stack([[rot_cos, zeros, -rot_sin], [zeros, ones, zeros],\n [rot_sin, zeros, rot_cos]])\n elif axis == 2 or axis == -1:\n rot_mat_T = np.stack([[rot_cos, -rot_sin, zeros],\n [rot_sin, rot_cos, zeros], [zeros, zeros, ones]])\n elif axis == 0:\n rot_mat_T = np.stack([[zeros, rot_cos, -rot_sin],\n [zeros, rot_sin, rot_cos], [ones, zeros, zeros]])\n else:\n raise ValueError(\"axis should in range\")\n\n return np.einsum('aij,jka->aik', points, rot_mat_T)\n\n\ndef rotation_points_single_angle(points, angle, axis=0):\n # points: [N, 3]\n rot_sin = np.sin(angle)\n rot_cos = np.cos(angle)\n if axis == 1:\n rot_mat_T = np.array(\n [[rot_cos, 0, -rot_sin], [0, 1, 0], [rot_sin, 0, rot_cos]],\n dtype=points.dtype)\n elif axis == 2 or axis == -1:\n rot_mat_T = np.array(\n [[rot_cos, -rot_sin, 0], [rot_sin, rot_cos, 0], [0, 0, 1]],\n dtype=points.dtype)\n elif axis == 0:\n rot_mat_T = np.array(\n [[1, 0, 0], [0, rot_cos, -rot_sin], [0, rot_sin, rot_cos]],\n dtype=points.dtype)\n else:\n raise ValueError(\"axis should in range\")\n\n return points @ rot_mat_T\n\n\ndef rotation_2d(points, angles):\n \"\"\"rotation 2d points based on origin point clockwise when angle positive.\n \n Args:\n points (float array, shape=[N, point_size, 2]): points to be rotated.\n angles (float array, shape=[N]): rotation angle.\n\n Returns:\n float array: same shape as points\n \"\"\"\n rot_sin = np.sin(angles)\n rot_cos = np.cos(angles)\n rot_mat_T = np.stack([[rot_cos, -rot_sin], [rot_sin, rot_cos]])\n return np.einsum('aij,jka->aik', points, rot_mat_T)\n\n\ndef rotation_box(box_corners, angle):\n \"\"\"rotation 2d points based on origin point clockwise when angle positive.\n \n Args:\n points (float array, shape=[N, point_size, 2]): points to be rotated.\n angle (float): rotation angle.\n\n Returns:\n float array: same shape as points\n \"\"\"\n rot_sin = np.sin(angle)\n rot_cos = np.cos(angle)\n rot_mat_T = np.array(\n [[rot_cos, -rot_sin], [rot_sin, rot_cos]], dtype=box_corners.dtype)\n return box_corners @ rot_mat_T\n\n\ndef center_to_corner_box3d(centers,\n dims,\n angles=None,\n origin=[0.5, 1.0, 0.5],\n axis=1):\n \"\"\"convert kitti locations, dimensions and angles to corners\n \n Args:\n centers (float array, shape=[N, 3]): locations in kitti label file.\n dims (float array, shape=[N, 3]): dimensions in kitti label file.\n angles (float array, shape=[N]): rotation_y in kitti label file.\n origin (list or array or float): origin point relate to smallest point.\n use [0.5, 1.0, 0.5] in camera and [0.5, 0.5, 0] in lidar.\n axis (int): rotation axis. 1 for camera and 2 for lidar.\n Returns:\n [type]: [description]\n \"\"\"\n # 'length' in kitti format is in x axis.\n # yzx(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar)\n # center in kitti format is [0.5, 1.0, 0.5] in xyz.\n corners = corners_nd(dims, origin=origin)\n # corners: [N, 8, 3]\n if angles is not None:\n corners = rotation_3d_in_axis(corners, angles, axis=axis)\n corners += centers.reshape([-1, 1, 3])\n return corners\n\n\ndef center_to_corner_box2d(centers, dims, angles=None, origin=0.5):\n \"\"\"convert kitti locations, dimensions and angles to corners.\n format: center(xy), dims(xy), angles(clockwise when positive)\n \n Args:\n centers (float array, shape=[N, 2]): locations in kitti label file.\n dims (float array, shape=[N, 2]): dimensions in kitti label file.\n angles (float array, shape=[N]): rotation_y in kitti label file.\n \n Returns:\n [type]: [description]\n \"\"\"\n # 'length' in kitti format is in x axis.\n # xyz(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar)\n # center in kitti format is [0.5, 1.0, 0.5] in xyz.\n corners = corners_nd(dims, origin=origin)\n # corners: [N, 4, 2]\n if angles is not None:\n corners = rotation_2d(corners, angles)\n corners += centers.reshape([-1, 1, 2])\n return corners\n\n\[email protected](nopython=True)\ndef box2d_to_corner_jit(boxes):\n num_box = boxes.shape[0]\n corners_norm = np.zeros((4, 2), dtype=boxes.dtype)\n corners_norm[1, 1] = 1.0\n corners_norm[2] = 1.0\n corners_norm[3, 0] = 1.0\n corners_norm -= np.array([0.5, 0.5], dtype=boxes.dtype)\n corners = boxes.reshape(num_box, 1, 5)[:, :, 2:4] * corners_norm.reshape(\n 1, 4, 2)\n rot_mat_T = np.zeros((2, 2), dtype=boxes.dtype)\n box_corners = np.zeros((num_box, 4, 2), dtype=boxes.dtype)\n for i in range(num_box):\n rot_sin = np.sin(boxes[i, -1])\n rot_cos = np.cos(boxes[i, -1])\n rot_mat_T[0, 0] = rot_cos\n rot_mat_T[0, 1] = -rot_sin\n rot_mat_T[1, 0] = rot_sin\n rot_mat_T[1, 1] = rot_cos\n box_corners[i] = corners[i] @ rot_mat_T + boxes[i, :2]\n return box_corners\n\n\ndef rbbox3d_to_corners(rbboxes, origin=[0.5, 0.5, 0.0], axis=2):\n return center_to_corner_box3d(\n rbboxes[..., :3],\n rbboxes[..., 3:6],\n rbboxes[..., 6],\n origin,\n axis=axis)\n\n\ndef rbbox3d_to_bev_corners(rbboxes, origin=0.5):\n return center_to_corner_box2d(rbboxes[..., :2], rbboxes[..., 3:5],\n rbboxes[..., 6], origin)\n\n\ndef minmax_to_corner_2d(minmax_box):\n ndim = minmax_box.shape[-1] // 2\n center = minmax_box[..., :ndim]\n dims = minmax_box[..., ndim:] - center\n return center_to_corner_box2d(center, dims, origin=0.0)\n\n\ndef minmax_to_corner_2d_v2(minmax_box):\n # N, 4 -> N 4 2\n return minmax_box[..., [0, 1, 0, 3, 2, 3, 2, 1]].reshape(-1, 4, 2)\n\n\ndef minmax_to_corner_3d(minmax_box):\n ndim = minmax_box.shape[-1] // 2\n center = minmax_box[..., :ndim]\n dims = minmax_box[..., ndim:] - center\n return center_to_corner_box3d(center, dims, origin=0.0)\n\n\ndef minmax_to_center_2d(minmax_box):\n ndim = minmax_box.shape[-1] // 2\n center_min = minmax_box[..., :ndim]\n dims = minmax_box[..., ndim:] - center_min\n center = center_min + 0.5 * dims\n return np.concatenate([center, dims], axis=-1)\n\n\ndef center_to_minmax_2d_0_5(centers, dims):\n return np.concatenate([centers - dims / 2, centers + dims / 2], axis=-1)\n\n\ndef center_to_minmax_2d(centers, dims, origin=0.5):\n if origin == 0.5:\n return center_to_minmax_2d_0_5(centers, dims)\n corners = center_to_corner_box2d(centers, dims, origin=origin)\n return corners[:, [0, 2]].reshape([-1, 4])\n\n\ndef limit_period(val, offset=0.5, period=np.pi):\n return val - np.floor(val / period + offset) * period\n\n\ndef projection_matrix_to_CRT_kitti(proj):\n # P = C @ [R|T]\n # C is upper triangular matrix, so we need to inverse CR and use QR\n # stable for all kitti camera projection matrix\n CR = proj[0:3, 0:3]\n CT = proj[0:3, 3]\n RinvCinv = np.linalg.inv(CR)\n Rinv, Cinv = np.linalg.qr(RinvCinv)\n C = np.linalg.inv(Cinv)\n R = np.linalg.inv(Rinv)\n T = Cinv @ CT\n return C, R, T\n\n\ndef get_frustum(bbox_image, C, near_clip=0.001, far_clip=100):\n fku = C[0, 0]\n fkv = -C[1, 1]\n u0v0 = C[0:2, 2]\n z_points = np.array(\n [near_clip] * 4 + [far_clip] * 4, dtype=C.dtype)[:, np.newaxis]\n b = bbox_image\n box_corners = np.array(\n [[b[0], b[1]], [b[0], b[3]], [b[2], b[3]], [b[2], b[1]]],\n dtype=C.dtype)\n near_box_corners = (box_corners - u0v0) / np.array(\n [fku / near_clip, -fkv / near_clip], dtype=C.dtype)\n far_box_corners = (box_corners - u0v0) / np.array(\n [fku / far_clip, -fkv / far_clip], dtype=C.dtype)\n ret_xy = np.concatenate(\n [near_box_corners, far_box_corners], axis=0) # [8, 2]\n ret_xyz = np.concatenate([ret_xy, z_points], axis=1)\n return ret_xyz\n\n\ndef get_frustum_v2(bboxes, C, near_clip=0.001, far_clip=100):\n fku = C[0, 0]\n fkv = -C[1, 1]\n u0v0 = C[0:2, 2]\n num_box = bboxes.shape[0]\n z_points = np.array(\n [near_clip] * 4 + [far_clip] * 4,\n dtype=C.dtype)[np.newaxis, :, np.newaxis]\n z_points = np.tile(z_points, [num_box, 1, 1])\n box_corners = minmax_to_corner_2d_v2(bboxes)\n near_box_corners = (box_corners - u0v0) / np.array(\n [fku / near_clip, -fkv / near_clip], dtype=C.dtype)\n far_box_corners = (box_corners - u0v0) / np.array(\n [fku / far_clip, -fkv / far_clip], dtype=C.dtype)\n ret_xy = np.concatenate(\n [near_box_corners, far_box_corners], axis=1) # [8, 2]\n ret_xyz = np.concatenate([ret_xy, z_points], axis=-1)\n return ret_xyz\n\n\ndef create_anchors_3d_stride(feature_size,\n sizes=[1.6, 3.9, 1.56],\n anchor_strides=[0.4, 0.4, 0.0],\n anchor_offsets=[0.2, -39.8, -1.78],\n rotations=[0, np.pi / 2],\n dtype=np.float32):\n \"\"\"\n Args:\n feature_size: list [D, H, W](zyx)\n sizes: [N, 3] list of list or array, size of anchors, xyz\n\n Returns:\n anchors: [*feature_size, num_sizes, num_rots, 7] tensor.\n \"\"\"\n # almost 2x faster than v1\n x_stride, y_stride, z_stride = anchor_strides\n x_offset, y_offset, z_offset = anchor_offsets\n z_centers = np.arange(feature_size[0], dtype=dtype)\n y_centers = np.arange(feature_size[1], dtype=dtype)\n x_centers = np.arange(feature_size[2], dtype=dtype)\n z_centers = z_centers * z_stride + z_offset\n y_centers = y_centers * y_stride + y_offset\n x_centers = x_centers * x_stride + x_offset\n sizes = np.reshape(np.array(sizes, dtype=dtype), [-1, 3])\n rotations = np.array(rotations, dtype=dtype)\n rets = np.meshgrid(\n x_centers, y_centers, z_centers, rotations, indexing='ij')\n tile_shape = [1] * 5\n tile_shape[-2] = int(sizes.shape[0])\n for i in range(len(rets)):\n rets[i] = np.tile(rets[i][..., np.newaxis, :], tile_shape)\n rets[i] = rets[i][..., np.newaxis] # for concat\n sizes = np.reshape(sizes, [1, 1, 1, -1, 1, 3])\n tile_size_shape = list(rets[0].shape)\n tile_size_shape[3] = 1\n sizes = np.tile(sizes, tile_size_shape)\n rets.insert(3, sizes)\n ret = np.concatenate(rets, axis=-1)\n return np.transpose(ret, [2, 1, 0, 3, 4, 5])\n\n\ndef create_anchors_3d_range(feature_size,\n anchor_range,\n sizes=[1.6, 3.9, 1.56],\n rotations=[0, np.pi / 2],\n dtype=np.float32):\n \"\"\"\n Args:\n feature_size: list [D, H, W](zyx)\n sizes: [N, 3] list of list or array, size of anchors, xyz\n\n Returns:\n anchors: [*feature_size, num_sizes, num_rots, 7] tensor.\n \"\"\"\n anchor_range = np.array(anchor_range, dtype)\n z_centers = np.linspace(\n anchor_range[2], anchor_range[5], feature_size[0], dtype=dtype)\n y_centers = np.linspace(\n anchor_range[1], anchor_range[4], feature_size[1], dtype=dtype)\n x_centers = np.linspace(\n anchor_range[0], anchor_range[3], feature_size[2], dtype=dtype)\n sizes = np.reshape(np.array(sizes, dtype=dtype), [-1, 3])\n rotations = np.array(rotations, dtype=dtype)\n rets = np.meshgrid(\n x_centers, y_centers, z_centers, rotations, indexing='ij')\n tile_shape = [1] * 5\n tile_shape[-2] = int(sizes.shape[0])\n for i in range(len(rets)):\n rets[i] = np.tile(rets[i][..., np.newaxis, :], tile_shape)\n rets[i] = rets[i][..., np.newaxis] # for concat\n sizes = np.reshape(sizes, [1, 1, 1, -1, 1, 3])\n tile_size_shape = list(rets[0].shape)\n tile_size_shape[3] = 1\n sizes = np.tile(sizes, tile_size_shape)\n rets.insert(3, sizes)\n ret = np.concatenate(rets, axis=-1)\n return np.transpose(ret, [2, 1, 0, 3, 4, 5])\n\n\ndef project_to_image(points_3d, proj_mat):\n points_shape = list(points_3d.shape)\n points_shape[-1] = 1\n points_4 = np.concatenate([points_3d, np.zeros(points_shape)], axis=-1)\n point_2d = points_4 @ proj_mat.T\n point_2d_res = point_2d[..., :2] / point_2d[..., 2:3]\n return point_2d_res\n\n\ndef camera_to_lidar(points, r_rect, velo2cam):\n points_shape = list(points.shape[0:-1])\n if points.shape[-1] == 3:\n points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1)\n lidar_points = points @ np.linalg.inv((r_rect @ velo2cam).T)\n return lidar_points[..., :3]\n\n\ndef lidar_to_camera(points, r_rect, velo2cam):\n points_shape = list(points.shape[:-1])\n if points.shape[-1] == 3:\n points = np.concatenate([points, np.ones(points_shape + [1])], axis=-1)\n camera_points = points @ (r_rect @ velo2cam).T\n return camera_points[..., :3]\n\n\ndef box_camera_to_lidar(data, r_rect, velo2cam):\n xyz = data[:, 0:3]\n l, h, w = data[:, 3:4], data[:, 4:5], data[:, 5:6]\n r = data[:, 6:7]\n xyz_lidar = camera_to_lidar(xyz, r_rect, velo2cam)\n return np.concatenate([xyz_lidar, w, l, h, r], axis=1)\n\n\ndef box_lidar_to_camera(data, r_rect, velo2cam):\n xyz_lidar = data[:, 0:3]\n w, l, h = data[:, 3:4], data[:, 4:5], data[:, 5:6]\n r = data[:, 6:7]\n xyz = lidar_to_camera(xyz_lidar, r_rect, velo2cam)\n return np.concatenate([xyz, l, h, w, r], axis=1)\n\n\ndef remove_outside_points(points, rect, Trv2c, P2, image_shape):\n # 5x faster than remove_outside_points_v1(2ms vs 10ms)\n C, R, T = projection_matrix_to_CRT_kitti(P2)\n image_bbox = [0, 0, image_shape[1], image_shape[0]]\n frustum = get_frustum(image_bbox, C)\n frustum -= T\n frustum = np.linalg.inv(R) @ frustum.T\n frustum = camera_to_lidar(frustum.T, rect, Trv2c)\n frustum_surfaces = corner_to_surfaces_3d_jit(frustum[np.newaxis, ...])\n indices = points_in_convex_polygon_3d_jit(points[:, :3], frustum_surfaces)\n points = points[indices.reshape([-1])]\n return points\n\n\[email protected](nopython=True)\ndef iou_jit(boxes, query_boxes, eps=0.0):\n \"\"\"calculate box iou. note that jit version runs 2x faster than cython in \n my machine!\n Parameters\n ----------\n boxes: (N, 4) ndarray of float\n query_boxes: (K, 4) ndarray of float\n Returns\n -------\n overlaps: (N, K) ndarray of overlap between boxes and query_boxes\n \"\"\"\n N = boxes.shape[0]\n K = query_boxes.shape[0]\n overlaps = np.zeros((N, K), dtype=boxes.dtype)\n for k in range(K):\n box_area = ((query_boxes[k, 2] - query_boxes[k, 0] + eps) *\n (query_boxes[k, 3] - query_boxes[k, 1] + eps))\n for n in range(N):\n iw = (min(boxes[n, 2], query_boxes[k, 2]) -\n max(boxes[n, 0], query_boxes[k, 0]) + eps)\n if iw > 0:\n ih = (min(boxes[n, 3], query_boxes[k, 3]) -\n max(boxes[n, 1], query_boxes[k, 1]) + eps)\n if ih > 0:\n ua = (\n (boxes[n, 2] - boxes[n, 0] + eps) *\n (boxes[n, 3] - boxes[n, 1] + eps) + box_area - iw * ih)\n overlaps[n, k] = iw * ih / ua\n return overlaps\n\n\ndef points_in_rbbox(points, rbbox, lidar=True):\n if lidar:\n h_axis = 2\n origin = [0.5, 0.5, 0]\n else:\n origin = [0.5, 1.0, 0.5]\n h_axis = 1\n rbbox_corners = center_to_corner_box3d(\n rbbox[:, :3], rbbox[:, 3:6], rbbox[:, 6], origin=origin, axis=h_axis)\n surfaces = corner_to_surfaces_3d(rbbox_corners)\n indices = points_in_convex_polygon_3d_jit(points[:, :3], surfaces)\n return indices\n\n\[email protected](nopython=False)\ndef corner_to_surfaces_3d(corners):\n \"\"\"convert 3d box corners from corner function above\n to surfaces that normal vectors all direct to internal.\n\n Args:\n corners (float array, [N, 8, 3]): 3d box corners. \n Returns:\n surfaces (float array, [N, 6, 4, 3]): \n \"\"\"\n # box_corners: [N, 8, 3], must from corner functions in this module\n surfaces = np.array([\n [corners[:, 0], corners[:, 1], corners[:, 2], corners[:, 3]],\n [corners[:, 7], corners[:, 6], corners[:, 5], corners[:, 4]],\n [corners[:, 0], corners[:, 3], corners[:, 7], corners[:, 4]],\n [corners[:, 1], corners[:, 5], corners[:, 6], corners[:, 2]],\n [corners[:, 0], corners[:, 4], corners[:, 5], corners[:, 1]],\n [corners[:, 3], corners[:, 2], corners[:, 6], corners[:, 7]],\n ]).transpose([2, 0, 1, 3])\n return surfaces\n\n\[email protected](nopython=True)\ndef corner_to_surfaces_3d_jit(corners):\n \"\"\"convert 3d box corners from corner function above\n to surfaces that normal vectors all direct to internal.\n\n Args:\n corners (float array, [N, 8, 3]): 3d box corners. \n Returns:\n surfaces (float array, [N, 6, 4, 3]): \n \"\"\"\n # box_corners: [N, 8, 3], must from corner functions in this module\n num_boxes = corners.shape[0]\n surfaces = np.zeros((num_boxes, 6, 4, 3), dtype=corners.dtype)\n corner_idxes = np.array([\n 0, 1, 2, 3, 7, 6, 5, 4, 0, 3, 7, 4, 1, 5, 6, 2, 0, 4, 5, 1, 3, 2, 6, 7\n ]).reshape(6, 4)\n for i in range(num_boxes):\n for j in range(6):\n for k in range(4):\n surfaces[i, j, k] = corners[i, corner_idxes[j, k]]\n return surfaces\n\n\ndef image_box_region_area(img_cumsum, bbox):\n \"\"\"check a 2d voxel is contained by a box. used to filter empty\n anchors.\n Summed-area table algorithm:\n ==> W\n ------------------\n | | |\n |------A---------B\n | | |\n | | |\n |----- C---------D\n Iabcd = ID-IB-IC+IA\n Args:\n img_cumsum: [M, H, W](yx) cumsumed image.\n bbox: [N, 4](xyxy) bounding box, \n \"\"\"\n N = bbox.shape[0]\n M = img_cumsum.shape[0]\n ret = np.zeros([N, M], dtype=img_cumsum.dtype)\n ID = img_cumsum[:, bbox[:, 3], bbox[:, 2]]\n IA = img_cumsum[:, bbox[:, 1], bbox[:, 0]]\n IB = img_cumsum[:, bbox[:, 3], bbox[:, 0]]\n IC = img_cumsum[:, bbox[:, 1], bbox[:, 2]]\n ret = ID - IB - IC + IA\n return ret\n\n\[email protected](nopython=True)\ndef sparse_sum_for_anchors_mask(coors, shape):\n ret = np.zeros(shape, dtype=np.float32)\n for i in range(coors.shape[0]):\n ret[coors[i, 1], coors[i, 2]] += 1\n return ret\n\n\[email protected](nopython=True)\ndef fused_get_anchors_area(dense_map, anchors_bv, stride, offset,\n grid_size):\n anchor_coor = np.zeros(anchors_bv.shape[1:], dtype=np.int32)\n grid_size_x = grid_size[0] - 1\n grid_size_y = grid_size[1] - 1\n N = anchors_bv.shape[0]\n ret = np.zeros((N), dtype=dense_map.dtype)\n for i in range(N):\n anchor_coor[0] = np.floor(\n (anchors_bv[i, 0] - offset[0]) / stride[0])\n anchor_coor[1] = np.floor(\n (anchors_bv[i, 1] - offset[1]) / stride[1])\n anchor_coor[2] = np.floor(\n (anchors_bv[i, 2] - offset[0]) / stride[0])\n anchor_coor[3] = np.floor(\n (anchors_bv[i, 3] - offset[1]) / stride[1])\n anchor_coor[0] = max(anchor_coor[0], 0)\n anchor_coor[1] = max(anchor_coor[1], 0)\n anchor_coor[2] = min(anchor_coor[2], grid_size_x)\n anchor_coor[3] = min(anchor_coor[3], grid_size_y)\n ID = dense_map[anchor_coor[3], anchor_coor[2]]\n IA = dense_map[anchor_coor[1], anchor_coor[0]]\n IB = dense_map[anchor_coor[3], anchor_coor[0]]\n IC = dense_map[anchor_coor[1], anchor_coor[2]]\n ret[i] = ID - IB - IC + IA\n return ret\n\n\[email protected](nopython=True)\ndef distance_similarity(points,\n qpoints,\n dist_norm,\n with_rotation=False,\n rot_alpha=0.5):\n N = points.shape[0]\n K = qpoints.shape[0]\n dists = np.zeros((N, K), dtype=points.dtype)\n rot_alpha_1 = 1 - rot_alpha\n for k in range(K):\n for n in range(N):\n if np.abs(points[n, 0] - qpoints[k, 0]) <= dist_norm:\n if np.abs(points[n, 1] - qpoints[k, 1]) <= dist_norm:\n dist = np.sum((points[n, :2] - qpoints[k, :2])**2)\n dist_normed = min(dist / dist_norm, dist_norm)\n if with_rotation:\n dist_rot = np.abs(\n np.sin(points[n, -1] - qpoints[k, -1]))\n dists[\n n,\n k] = 1 - rot_alpha_1 * dist_normed - rot_alpha * dist_rot\n else:\n dists[n, k] = 1 - dist_normed\n return dists\n\n\ndef box3d_to_bbox(box3d, rect, Trv2c, P2):\n box_corners = center_to_corner_box3d(box3d[:, :3], box3d[:, 3:6], box3d[:, 6], [0.5, 1.0, 0.5], axis=1)\n box_corners_in_image = project_to_image(\n box_corners, P2)\n # box_corners_in_image: [N, 8, 2]\n minxy = np.min(box_corners_in_image, axis=1)\n maxxy = np.max(box_corners_in_image, axis=1)\n bbox = np.concatenate([minxy, maxxy], axis=1)\n return bbox\n\ndef assign_label_to_voxel(gt_boxes, coors, voxel_size, coors_range):\n \"\"\"assign a 0/1 label to each voxel based on whether \n the center of voxel is in gt_box. LIDAR.\n \"\"\"\n voxel_size = np.array(voxel_size, dtype=gt_boxes.dtype)\n coors_range = np.array(coors_range, dtype=gt_boxes.dtype)\n shift = coors_range[:3]\n voxel_origins = coors[:, ::-1] * voxel_size + shift\n voxel_centers = voxel_origins + voxel_size * 0.5\n gt_box_corners = center_to_corner_box3d(\n gt_boxes[:, :3] - voxel_size * 0.5,\n gt_boxes[:, 3:6] + voxel_size,\n gt_boxes[:, 6],\n origin=[0.5, 0.5, 0],\n axis=2)\n gt_surfaces = corner_to_surfaces_3d(gt_box_corners)\n ret = points_in_convex_polygon_3d_jit(voxel_centers, gt_surfaces)\n return np.any(ret, axis=1).astype(np.int64)\n\ndef change_box3d_center_(box3d, src, dst):\n dst = np.array(dst, dtype=box3d.dtype)\n src = np.array(src, dtype=box3d.dtype)\n box3d[..., :3] += box3d[..., 3:6] * (dst - src)\n\n\n# pang added to build the tensor for the second stage of training\[email protected](nopython=True,parallel=True)\ndef build_stage2_training_MX(boxes, query_boxes, criterion, scores_3d, scores_2d,\n dis_to_lidar_3d,overlaps,tensor_index, mask_idx, training_flag):\n N = boxes.shape[0] #SECOND:70400 #pointpillars:107136\n K = query_boxes.shape[0]\n ind=0\n ind_max = ind\n for k in range(K):\n qbox_area = ((query_boxes[k, 2] - query_boxes[k, 0]) *\n (query_boxes[k, 3] - query_boxes[k, 1]))\n for n in range(N):\n #MX\n if training_flag==False:\n if scores_3d[n,0] < 0.01: #0.1 #filter the 3d candidates\n continue\n\n iw = (min(boxes[n, 2], query_boxes[k, 2]) -\n max(boxes[n, 0], query_boxes[k, 0]))\n if iw > 0:\n ih = (min(boxes[n, 3], query_boxes[k, 3]) -\n max(boxes[n, 1], query_boxes[k, 1]))\n if ih > 0:\n if criterion == -1:\n ua = (\n (boxes[n, 2] - boxes[n, 0]) *\n (boxes[n, 3] - boxes[n, 1]) + qbox_area - iw * ih)\n elif criterion == 0:\n ua = ((boxes[n, 2] - boxes[n, 0]) *\n (boxes[n, 3] - boxes[n, 1]))\n elif criterion == 1:\n ua = qbox_area\n else:\n ua = 1.0\n overlaps[ind,0] = iw * ih / ua\n overlaps[ind,1] = scores_3d[n,0]\n overlaps[ind,2] = scores_2d[k,0]\n overlaps[ind,3] = dis_to_lidar_3d[n,0]\n\n tensor_index[ind,0] = k\n tensor_index[ind,1] = mask_idx[n]\n ind = ind+1\n\n elif k==K-1:\n overlaps[ind,0] = -10\n overlaps[ind,1] = scores_3d[n,0]\n overlaps[ind,2] = -10\n overlaps[ind,3] = dis_to_lidar_3d[n,0]\n tensor_index[ind,0] = k\n tensor_index[ind,1] = mask_idx[n]\n ind = ind+1\n elif k==K-1: #最后一个补-10,防止有些3d候选框一个2d跟它的组合都没有,全0,后面被滤掉\n overlaps[ind,0] = -10\n overlaps[ind,1] = scores_3d[n,0]\n overlaps[ind,2] = -10\n overlaps[ind,3] = dis_to_lidar_3d[n,0]\n tensor_index[ind,0] = k\n tensor_index[ind,1] = mask_idx[n]\n ind = ind+1\n\n return overlaps, tensor_index, ind"
] |
[
[
"numpy.split",
"numpy.sqrt",
"numpy.einsum",
"numpy.linspace",
"numpy.concatenate",
"numpy.max",
"numpy.arctan2",
"numpy.zeros_like",
"numpy.any",
"numpy.linalg.qr",
"numpy.exp",
"numpy.where",
"numpy.ones_like",
"numpy.reshape",
"numpy.arange",
"numpy.stack",
"numpy.sin",
"numpy.zeros",
"numpy.log",
"numpy.min",
"numpy.linalg.inv",
"numpy.floor",
"numpy.transpose",
"numpy.meshgrid",
"numpy.array",
"numpy.sum",
"numpy.abs",
"numpy.cos",
"numpy.tile",
"numpy.ones"
]
] |
Daniel-H-99/FBINET
|
[
"3f0ff747b1ec430946eb8d2facb6e91c28236f93"
] |
[
"models/ebconv.py"
] |
[
"import torch\nimport torch.nn.functional as F\nfrom torch import nn\nfrom torch.nn.common_types import _size_2_t\n\nimport math\nimport numpy as np\nfrom functools import partial\nfrom typing import Union\n\nfrom bnn import BConfig, bconfig\nfrom bnn.layers.helpers import copy_paramters\n\n\nclass BinarySoftActivation(torch.autograd.Function):\n\n @staticmethod\n def forward(ctx, input):\n # ctx.save_for_backward(input)\n return (input == input.max(dim=1, keepdim=True)\n [0]).view_as(input).type_as(input)\n\n @staticmethod\n def backward(ctx, grad_output):\n #input, = ctx.saved_tensors\n grad_input = grad_output.clone()\n #grad_input.masked_fill_(input.ge(1) | input.le(-1), 0)\n return grad_input\n\n\nclass EBConv2d(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n kernel_size: _size_2_t,\n stride: _size_2_t = 1,\n padding: Union[str, _size_2_t] = 0,\n dilation: _size_2_t = 1,\n groups: int = 1,\n bias: bool = True,\n padding_mode: str = 'zeros',\n num_experts: int = 1,\n activation=torch.sigmoid,\n use_only_first: bool = False,\n use_se: bool = True,\n bconfig: BConfig = None\n ) -> None:\n super(EBConv2d, self).__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.kernel_size = kernel_size\n self.stride = stride\n self.padding = padding\n self.dilation = dilation\n self.groups = groups\n self.padding_mode = padding_mode\n self.num_experts = num_experts\n self.bconfig = bconfig\n self.use_se = use_se\n self.use_only_first = use_only_first\n\n self.weight = nn.Parameter(\n torch.Tensor(\n num_experts,\n out_channels,\n in_channels // groups,\n kernel_size,\n kernel_size\n )\n )\n if bias:\n self.bias = nn.Parameter(\n torch.Tensor(num_experts, self.out_channels)\n )\n else:\n self.register_parameter('bias', None)\n\n # ebcond head\n self.fc = nn.Linear(in_channels, num_experts)\n self.activation = activation\n\n # se head\n if self.use_se:\n self.se_fc = nn.Sequential(\n nn.Linear(in_channels, out_channels // 8, bias=False),\n nn.ReLU(inplace=True),\n nn.Linear(out_channels // 8, out_channels, bias=False),\n nn.Sigmoid()\n )\n\n if bconfig is not None:\n self.activation_pre_process = bconfig.activation_pre_process()\n self.activation_post_process = bconfig.activation_post_process(\n self, shape=[1, out_channels, 1, 1])\n self.weight_pre_process = bconfig.weight_pre_process()\n\n self.reset_parameters()\n\n def reset_parameters(self):\n for i in range(self.num_experts):\n nn.init.kaiming_uniform_(self.weight[i], a=math.sqrt(5))\n if self.bias is not None:\n fan_in = np.prod(self.weight.shape[2:])\n bound = 1 / math.sqrt(fan_in)\n nn.init.uniform_(self.bias[i], a=-bound, b=bound)\n\n def forward(self, x):\n B, C, H, W = x.size()\n\n # Compute the expert selection\n avg_x = F.adaptive_avg_pool2d(x, 1).flatten(1)\n gate_x = self.activation(self.fc(avg_x))\n # WTA function with Identity\n gate_x = BinarySoftActivation.apply(gate_x)\n\n # Supress the expert selection temporarily, select expert 0 always.\n if (self.bconfig is None or isinstance(\n self.activation_pre_process,\n nn.Identity)) or self.use_only_first:\n gate_x = gate_x * torch.zeros_like(gate_x)\n gate_x[:, 0] = gate_x[:, 0] + torch.ones_like(gate_x[:, 0])\n\n base_weight = self.weight\n weight = torch.matmul(\n gate_x,\n base_weight.view(self.num_experts, -1)\n ).view(B * self.out_channels, self.in_channels // self.groups, self.kernel_size, self.kernel_size)\n\n bias = None\n if self.bias is not None:\n bias = torch.matmul(gate_x, self.bias).flatten()\n\n # Binarize the weights and the input features\n if self.bconfig is not None:\n weight = self.weight_pre_process(weight)\n x = self.activation_pre_process(x)\n\n x = x.view(1, B * C, H, W)\n out = F.conv2d(\n x, weight, bias, stride=self.stride, padding=self.padding,\n dilation=self.dilation, groups=self.groups * B\n )\n out = out.permute([1, 0, 2, 3]).view(\n B, self.out_channels, out.shape[-2], out.shape[-1])\n\n # Apply learnable alpha if set\n if self.bconfig is not None:\n out = self.activation_post_process(out, x)\n\n if self.use_se:\n scaling = self.se_fc(avg_x) # Use feature pre-binarization\n scaling = scaling.view(B, scaling.size(1), 1, 1)\n out = out.mul(scaling.expand_as(out))\n\n return out\n\n @classmethod\n def from_module(\n cls,\n mod: nn.Module,\n bconfig: BConfig = None,\n update: bool = False):\n if not bconfig:\n assert hasattr(\n mod, 'bconfig'), 'The input modele requires a predifined bconfig'\n assert mod.bconfig, 'The input model bconfig is invalid'\n bconfig = mod.bconfig\n bnn_conv = cls(\n mod.in_channels,\n mod.out_channels,\n mod.kernel_size,\n stride=mod.stride,\n padding=mod.padding,\n dilation=mod.dilation,\n groups=mod.groups,\n bias=mod.bias is not None,\n padding_mode=mod.padding_mode,\n num_experts=mod.num_experts,\n activation=mod.activation,\n use_only_first=mod.use_only_first,\n use_se=mod.use_se,\n bconfig=bconfig)\n bnn_conv.weight = mod.weight\n bnn_conv.bias = mod.bias\n\n if update:\n copy_paramters(mod, bnn_conv, bconfig)\n\n return bnn_conv\n"
] |
[
[
"torch.nn.init.uniform_",
"torch.Tensor",
"torch.nn.functional.conv2d",
"torch.zeros_like",
"torch.nn.Sigmoid",
"torch.nn.functional.adaptive_avg_pool2d",
"torch.nn.Linear",
"torch.matmul",
"numpy.prod",
"torch.nn.ReLU",
"torch.ones_like"
]
] |
JSchweisthal/Positive-and-Unlabeled-Learning-from-Imbalanced-Data
|
[
"82e193842e4f6a7b4a0ef476f1104944ef90ee47"
] |
[
"ImbalancedSelfPU/utils/util.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport numpy as np\nfrom sklearn.utils import extmath\n\n\nnon_image_vars = ['Age', 'PTGENDER', 'PTEDUCAT', 'APOE Status', 'MMSCORE', 'CDR', 'AVLT-LTM', 'AVLT-Total', 'ADAS']\none_hot_vars = {\"APOE Status\": {'NC': 0, 'HT': 1, 'HM': 2, 0.0: 3}}\ndx2label = {\"AD\": 0, \"MCI\": 1, \"NL\": 2}\n\n\ndef one_hot_torch(index, classes):\n '''\n index: labels, batch_size * 1, index starts from 0\n classes: int, # of classes\n '''\n y = index.type(torch.LongTensor)\n # One hot encoding buffer that you create out of the loop and just keep reusing\n y_onehot = torch.FloatTensor(y.size()[0], classes)\n y_onehot.zero_()\n '''\n TypeError: scatter_ received an invalid combination of arguments - got (int, Variable, int), but expected one of:\n * (int dim, torch.LongTensor index, float value)\n didn't match because some of the arguments have invalid types: (int, Variable, int)\n * (int dim, torch.LongTensor index, torch.FloatTensor src)\n didn't match because some of the arguments have invalid types: (int, Variable, int)\n '''\n y_onehot.scatter_(1, y.data, 1)\n #return Variable(y_onehot).cuda()\n return Variable(y_onehot)\n\n\ndef focal_loss(input, y, weight=None, alpha=0.25, gamma=2, eps=1e-7, reduction='elementwise_mean', one_hot=True, reverse_weighting=False):\n # print(\"focal loss:\", input, target)\n y = y.view(-1, 1)\n\n ###############################\n if one_hot:\n y_hot = one_hot_torch(y, input.size(-1))\n else:\n #y_hot = Variable(torch.ones(y.size(0), 2)).cuda() * y # y is float tensor\n y_hot = Variable(torch.ones(y.size(0), 2).cuda()) * y\n y_hot[:, 0] = 1 - y_hot[:, 1]\n ###############################\n\n if weight is None:\n logit = F.softmax(input, dim=-1)\n else:\n logit = F.softmax(input, dim=-1) * weight\n logit = logit.clamp(eps, 1. - eps)\n\n loss = -1 * y_hot * torch.log(logit) # cross entropy\n if reverse_weighting:\n for i in range(loss.size()[0]):\n index = torch.argmax(y_hot)\n loss[i, index] = loss[i, index] * (1 - logit[i, 1 - index]) ** gamma\n loss *= alpha\n else:\n loss = alpha * loss * (1 - logit) ** gamma # focal loss\n\n if reduction == 'elementwise_mean':\n return None,loss.sum() / input.size()[0]\n elif reduction == 'sum':\n return None,loss.sum()\n elif reduction == 'elementwise_sum':\n return None,loss.sum(dim=1)\n else:\n return None,loss\n\n\nclass FocalLoss(nn.Module):\n\n def __init__(self, weight=None, alpha=0.25, gamma=2, eps=1e-7, one_hot=True):\n super(FocalLoss, self).__init__()\n self.alpha = alpha\n self.gamma = gamma\n self.eps = eps\n self.weight = weight\n self.one_hot = one_hot\n\n def forward(self, input, y):\n return focal_loss(input, y, weight=self.weight, alpha=self.alpha, gamma=self.gamma, eps=self.eps, one_hot=self.one_hot)\n\ndef crossentropy_loss(input):\n\n loss = -torch.log(torch.sigmoid(input))\n return loss\n\ndef sigmoid_loss(input, reduction='elementwise_mean'):\n # y must be -1/+1\n # NOTE: torch.nn.functional.sigmoid is 1 / (1 + exp(-x)). BUT sigmoid loss should be 1 / (1 + exp(x))\n loss = torch.sigmoid(-input)\n \n return loss\n\nclass SigmoidLoss(nn.Module):\n\n def __init__(self, reduction='elementwise_mean'):\n super(SigmoidLoss, self).__init__()\n self.reduction = reduction\n\n def forward(self, input, y):\n return sigmoid_loss(input, y, self.reduction)\n\n\ndef edge_weight(In_data): \n Rho = 1e-2\n # https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/utils/extmath.py\n X = extmath.row_norms(In_data, squared=True) # Row-wise (squared) Euclidean norm of X.\n X = X[:,np.newaxis]\n kernel = np.dot(In_data, In_data.T)\n XX = np.ones((len(X), 1))\n X = np.dot(X, XX.T)\n kernel *= -2\n kernel = X + kernel + X.T\n kernel = np.exp(-Rho * kernel)\n return kernel\n\n\ndef laplacian(In_data, normal=False):\n In_data = In_data.reshape(len(In_data), -1)\n # In_data = np.float128(In_data)/255.\n adj_mat = edge_weight(In_data)\n D = np.zeros((len(In_data), len(In_data)))\n for n in range(len(D)):\n D[n,n] = np.sum(adj_mat[n,:])\n if normal == True:\n sqrt_deg_matrix = np.mat(np.diag(np.diag(D)**(-0.5)))\n lap_matrix = sqrt_deg_matrix * np.mat(D - adj_mat) * sqrt_deg_matrix\n else:\n lap_matrix = D - adj_mat\n return (np.float32(lap_matrix))\n\ndef pu_risk_estimators_sigmoid(y_pred, y_true, prior):\n # y_true is -1/1\n #one_u = torch.ones(y_true.size()).cuda()\n one_u = torch.ones(y_true.size())\n prior_prime = 0.5\n positive = (y_true == 1).float()\n unlabeled = (y_true == -1).float()\n P_size = max(1., torch.sum(positive))\n u_size = max(1. ,torch.sum(unlabeled))\n y_positive = sigmoid_loss(y_pred).view(-1)\n y_unlabeled = sigmoid_loss(-y_pred).view(-1)\n\n positive_risk = (prior_prime * y_positive * positive / P_size).sum()\n negative_risk = (((1-prior_prime)/(1-prior)*unlabeled / u_size - (1-prior_prime)/(1-prior)*prior * positive / P_size) * y_unlabeled).sum()\n return positive_risk, negative_risk\ndef pu_risk_estimators_sigmoid_eps(y_pred, y_true, prior, eps):\n prior_prime = 0.5\n # y_true is -1/1\n #one_u = torch.ones(y_true.size()).cuda()\n one_u = torch.ones(y_true.size())\n positive = (y_true == 1).float()\n unlabeled = (y_true == -1).float()\n P_size = max(1., torch.sum(positive))\n u_size = max(1. ,torch.sum(unlabeled))\n y_positive = sigmoid_loss(y_pred).view(-1) * eps\n y_unlabeled = sigmoid_loss(-y_pred).view(-1) * eps\n positive_risk = ((prior_prime * y_positive * positive / P_size)).sum()\n negative_risk = ((((1-prior_prime)/(1-prior)*unlabeled / u_size - (1-prior_prime)/(1-prior)*prior * positive / P_size) * y_unlabeled)).sum()\n return positive_risk, negative_risk\n\n# def nu_risk_estimators_sigmoid(y_pred, y_true, prior):\n# # y_true is -1/1\n# #one_u = torch.ones(y_true.size()).cuda()\n# one_u = torch.ones(y_true.size())\n# positive = (y_true == 1).float()\n# unlabeled = (y_true == -1).float()\n# P_size = max(1., torch.sum(positive))\n# u_size = max(1. ,torch.sum(unlabeled))\n# y_positive = sigmoid_loss(y_pred).view(-1)\n# y_unlabeled = sigmoid_loss(-y_pred).view(-1)\n# positive_risk = (prior * y_positive * positive / P_size).sum()\n# negative_risk = ((unlabeled / u_size - prior * positive / P_size) * y_unlabeled).sum()\n# return positive_risk, negative_risk\n\ndef pu_risk_estimators_crossentropy(y_pred, y_true, prior):\n\n one_u = torch.ones(y_true.size())\n positive = (y_true == 1).float()\n unlabeled = (y_true == -1).float()\n P_size = max(1., torch.sum(positive))\n u_size = max(1. ,torch.sum(unlabeled))\n y_positive = crossentropy_loss(y_pred).view(-1)\n y_unlabeled = crossentropy_loss(-y_pred).view(-1)\n positive_risk = (y_positive * positive / P_size).sum()\n negative_risk = ((unlabeled / u_size) * y_unlabeled).sum()\n #print(P_p, P_n, P_u)\n return positive_risk, negative_risk\ndef pu_risk_estimators_focal(y_pred, y_true):\n # y_pred is [score1, score2] before softmax logit, y_true is 0/1\n #one_u = torch.ones(y_true.size()).cuda()\n #zeros = torch.zeros(y_true.size()).cuda()\n one_u = torch.ones(y_true.size())\n zeros = torch.zeros(y_true.size())\n u_mask = torch.abs(y_true - one_u)\n \n #P_size = torch.max(torch.sum(y_true), torch.Tensor([1]).cuda())\n P_size = torch.max(torch.sum(y_true), torch.Tensor([1]))\n #u_size = torch.max(torch.sum(u_mask), torch.Tensor([1]).cuda())\n u_size = torch.max(torch.sum(u_mask), torch.Tensor([1]))\n P_p = (focal_loss(y_pred, one_u, gamma=3, reduction='elementwise_sum')).dot(y_true) / P_size # should go down\n P_n = (focal_loss(y_pred, zeros, gamma=3, reduction='elementwise_sum')).dot(y_true) / P_size # should go up\n P_u = (focal_loss(y_pred, zeros, gamma=3, reduction='elementwise_sum')).dot(u_mask) / u_size # should go down\n return P_p, P_n, P_u\n\n\ndef pu_loss(y_pred, y_true, loss_fn, Probility_P=0.25, BETA=0, gamma=1.0, Yi=1e-8, L=None, nnPU = True, eps = None):\n P_p, P_n, P_u = 0, 0, 0\n if loss_fn == \"sigmoid\":\n R_p, R_n = pu_risk_estimators_sigmoid(y_pred, y_true, Probility_P)\n elif loss_fn == \"focal\":\n P_p, P_n, P_u = pu_risk_estimators_focal(y_pred, y_true)\n elif loss_fn == 'Xent':\n R_p, R_n = pu_risk_estimators_crossentropy(y_pred, y_true, Probility_P)\n elif loss_fn == 'sigmoid_eps':\n R_p, R_n = pu_risk_estimators_sigmoid_eps(y_pred, y_true, Probility_P, eps)\n else: pass\n\n M_reg = torch.zeros(1)\n if L is not None:\n FL = torch.mm((2 * y_pred - 1).transpose(0, 1), L)\n R_manifold = torch.mm(FL, (2 * y_pred - 1))\n M_reg = Yi * R_manifold\n if (not nnPU) or (loss_fn == 'Xent'):\n return None, R_p + R_n \n\n if -BETA > R_n:\n #print(\"NEGATIVE\")\n #print(R_n)\n return R_p - BETA, -gamma*R_n#, Probility_P * P_p, P_u, Probility_P * P_\n # return -gamma * PU_2, torch.sum(M_reg), Probility_P * P_p, P_u, Probility_P * P_n\n # return Probility_P * P_p\n else:\n #print(\"POSITIVE\")\n #print(R_p, R_n, R_p + R_n)\n return R_p + R_n, R_p + R_n#, Probility_P * P_p, P_u, Probility_P * P_n\n # return PU_1, torch.sum(M_reg), Probility_P * P_p, P_u, Probility_P * P_n\n\n\nclass PULoss(nn.Module):\n '''\n only works for binary classification\n '''\n\n def __init__(self, loss_fn='sigmoid', Probability_P=0.25, BETA=0, gamma=1.0, Yi=1e-8, nnPU=True):\n super(PULoss, self).__init__()\n self.loss_fn = loss_fn\n self.Probability_P = Probability_P\n self.BETA = BETA\n self.gamma = gamma\n self.Yi = Yi\n self.nnPU = nnPU\n\n def update_p(self, p):\n self.Probability_P = p\n def forward(self, y_pred, y_true, L=None, eps = None):\n return pu_loss(y_pred, y_true, self.loss_fn, self.Probability_P, self.BETA, self.gamma, self.Yi, L, nnPU = self.nnPU, eps = eps)\n\n\n\ndef L1_reg(model):\n # https://discuss.pytorch.org/t/how-does-one-implement-weight-regularization-l1-or-l2-manually-without-optimum/7951\n l1_reg = None\n for W in model.parameters():\n if l1_reg is None:\n l1_reg = W.norm(1)\n else:\n l1_reg = l1_reg + W.norm(1)\n return l1_reg\n\n\ndef suvr2class(suvrs):\n labels = torch.round((suvrs - 0.8) * 10).type(torch.LongTensor)\n return labels\n\n\n\n\ndef show_slices(slices, lower = None, upper = None):\n fig, axes = plt.subplots(1, len(slices), figsize=(30,30))\n for i, slice in enumerate(slices):\n if lower != None and upper != None: axes[i].imshow(slice.T, cmap=\"gray\", origin=\"lower\", vmin=lower, vmax=upper)\n elif lower != None: axes[i].imshow(slice.T, cmap=\"gray\", origin=\"lower\", vmin=lower)\n elif upper != None: axes[i].imshow(slice.T, cmap=\"gray\", origin=\"lower\", vmax=upper)\n else: axes[i].imshow(slice.T, cmap=\"gray\", origin=\"lower\")\n\n\n\ndef confusion_matrix(predictions, truths, classes):\n '''\n predictions, truths: list of integers\n classes: int, # of classes\n return confusion_matrix: x-axis target, y-axis predictions\n '''\n m = np.zeros((classes, classes))\n accuracy = np.zeros(classes)\n for i in range(len(predictions)):\n m[int(predictions[i]), int(truths[i])] += 1\n diagonal = 0\n for i in range(classes):\n accuracy[i] = m[i, i] / np.sum(m[:, i], axis=0)\n diagonal += m[i, i]\n return m, accuracy, float(diagonal) / len(predictions)\n\nclass ProbOhemCrossEntropy2d(nn.Module):\n def __init__(self, ignore_label, reduction='mean', thresh=0.6, min_kept=256,\n down_ratio=1, use_weight=False):\n super(ProbOhemCrossEntropy2d, self).__init__()\n self.ignore_label = ignore_label\n self.thresh = float(thresh)\n self.min_kept = int(min_kept)\n self.down_ratio = down_ratio\n if use_weight:\n weight = torch.FloatTensor(\n [0.8373, 0.918, 0.866, 1.0345, 1.0166, 0.9969, 0.9754, 1.0489,\n 0.8786, 1.0023, 0.9539, 0.9843, 1.1116, 0.9037, 1.0865, 1.0955,\n 1.0865, 1.1529, 1.0507])\n self.criterion = torch.nn.CrossEntropyLoss(reduction=reduction,\n weight=weight,\n ignore_index=ignore_label)\n else:\n self.criterion = torch.nn.CrossEntropyLoss(reduction=reduction,\n ignore_index=ignore_label)\n\n def forward(self, pred, target):\n b, c, h, w = pred.size()\n target = target.view(-1)\n valid_mask = target.ne(self.ignore_label)\n target = target * valid_mask.long()\n num_valid = valid_mask.sum()\n\n prob = F.softmax(pred, dim=1)\n prob = (prob.transpose(0, 1)).reshape(c, -1)\n\n if self.min_kept > num_valid:\n logger.info('Labels: {}'.format(num_valid))\n elif num_valid > 0:\n prob = prob.masked_fill_(1 - valid_mask, 1)\n mask_prob = prob[\n target, torch.arange(len(target), dtype=torch.long)]\n threshold = self.thresh\n if self.min_kept > 0:\n index = mask_prob.argsort()\n threshold_index = index[min(len(index), self.min_kept) - 1]\n if mask_prob[threshold_index] > self.thresh:\n threshold = mask_prob[threshold_index]\n kept_mask = mask_prob.le(threshold)\n target = target * kept_mask.long()\n valid_mask = valid_mask * kept_mask\n # logger.info('Valid Mask: {}'.format(valid_mask.sum()))\n\n target = target.masked_fill_(1 - valid_mask, self.ignore_label)\n target = target.view(b, h, w)\n\n return self.criterion(pred, target)\n\n"
] |
[
[
"numpy.diag",
"numpy.dot",
"torch.abs",
"torch.nn.functional.softmax",
"torch.zeros",
"torch.sum",
"torch.FloatTensor",
"numpy.exp",
"torch.autograd.Variable",
"torch.nn.CrossEntropyLoss",
"torch.mm",
"torch.round",
"numpy.float32",
"numpy.zeros",
"numpy.mat",
"sklearn.utils.extmath.row_norms",
"torch.sigmoid",
"torch.log",
"numpy.sum",
"torch.Tensor",
"torch.argmax"
]
] |
wuyuebupt/doubleheadsrcnn
|
[
"a744b4121d52935741f49d845bae7878270ea291"
] |
[
"maskrcnn_benchmark/modeling/poolers.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\n\nfrom maskrcnn_benchmark.layers import ROIAlign\n\nfrom .utils import cat\n\n\nclass LevelMapper(object):\n \"\"\"Determine which FPN level each RoI in a set of RoIs should map to based\n on the heuristic in the FPN paper.\n \"\"\"\n\n def __init__(self, k_min, k_max, canonical_scale=224, canonical_level=4, eps=1e-6):\n \"\"\"\n Arguments:\n k_min (int)\n k_max (int)\n canonical_scale (int)\n canonical_level (int)\n eps (float)\n \"\"\"\n self.k_min = k_min\n self.k_max = k_max\n self.s0 = canonical_scale\n self.lvl0 = canonical_level\n self.eps = eps\n\n def __call__(self, boxlists):\n \"\"\"\n Arguments:\n boxlists (list[BoxList])\n \"\"\"\n # Compute level ids\n s = torch.sqrt(cat([boxlist.area() for boxlist in boxlists]))\n\n # Eqn.(1) in FPN paper\n target_lvls = torch.floor(self.lvl0 + torch.log2(s / self.s0 + self.eps))\n target_lvls = torch.clamp(target_lvls, min=self.k_min, max=self.k_max)\n return target_lvls.to(torch.int64) - self.k_min\n\n\nclass Pooler(nn.Module):\n \"\"\"\n Pooler for Detection with or without FPN.\n It currently hard-code ROIAlign in the implementation,\n but that can be made more generic later on.\n Also, the requirement of passing the scales is not strictly necessary, as they\n can be inferred from the size of the feature map / size of original image,\n which is available thanks to the BoxList.\n \"\"\"\n\n def __init__(self, output_size, scales, sampling_ratio):\n \"\"\"\n Arguments:\n output_size (list[tuple[int]] or list[int]): output size for the pooled region\n scales (list[float]): scales for each Pooler\n sampling_ratio (int): sampling ratio for ROIAlign\n \"\"\"\n super(Pooler, self).__init__()\n poolers = []\n for scale in scales:\n poolers.append(\n ROIAlign(\n output_size, spatial_scale=scale, sampling_ratio=sampling_ratio\n )\n )\n self.poolers = nn.ModuleList(poolers)\n self.output_size = output_size\n # get the levels in the feature map by leveraging the fact that the network always\n # downsamples by a factor of 2 at each level.\n lvl_min = -torch.log2(torch.tensor(scales[0], dtype=torch.float32)).item()\n lvl_max = -torch.log2(torch.tensor(scales[-1], dtype=torch.float32)).item()\n self.map_levels = LevelMapper(lvl_min, lvl_max)\n\n def convert_to_roi_format(self, boxes):\n concat_boxes = cat([b.bbox for b in boxes], dim=0)\n device, dtype = concat_boxes.device, concat_boxes.dtype\n ids = cat(\n [\n torch.full((len(b), 1), i, dtype=dtype, device=device)\n for i, b in enumerate(boxes)\n ],\n dim=0,\n )\n rois = torch.cat([ids, concat_boxes], dim=1)\n return rois\n\n def forward(self, x, boxes):\n \"\"\"\n Arguments:\n x (list[Tensor]): feature maps for each level\n boxes (list[BoxList]): boxes to be used to perform the pooling operation.\n Returns:\n result (Tensor)\n \"\"\"\n num_levels = len(self.poolers)\n rois = self.convert_to_roi_format(boxes)\n if num_levels == 1:\n return self.poolers[0](x[0], rois)\n\n levels = self.map_levels(boxes)\n\n num_rois = len(rois)\n num_channels = x[0].shape[1]\n output_size = self.output_size[0]\n\n dtype, device = x[0].dtype, x[0].device\n result = torch.zeros(\n (num_rois, num_channels, output_size, output_size),\n dtype=dtype,\n device=device,\n )\n for level, (per_level_feature, pooler) in enumerate(zip(x, self.poolers)):\n idx_in_level = torch.nonzero(levels == level).squeeze(1)\n rois_per_level = rois[idx_in_level]\n result[idx_in_level] = pooler(per_level_feature, rois_per_level)\n\n return result\n\nclass PoolerNeighbor(nn.Module):\n \"\"\"\n Pooler for Detection with or without FPN.\n It currently hard-code ROIAlign in the implementation,\n but that can be made more generic later on.\n Also, the requirement of passing the scales is not strictly necessary, as they\n can be inferred from the size of the feature map / size of original image,\n which is available thanks to the BoxList.\n \"\"\"\n\n def __init__(self, neighbor_expand, roi_expand, output_size, scales, sampling_ratio):\n \"\"\"\n Arguments:\n neighbor_expand (float): scale for enlarged proposals\n roi_expand (bool): if the output size is expanded like the proposals\n output_size (list[tuple[int]] or list[int]): output size for the pooled region\n scales (list[float]): scales for each Pooler\n sampling_ratio (int): sampling ratio for ROIAlign\n \"\"\"\n super(PoolerNeighbor, self).__init__()\n\n self.neighbor_expand = neighbor_expand\n ## expand the output as well\n if roi_expand:\n output_size = tuple([ int(x * neighbor_expand) for x in output_size])\n\n poolers = []\n for scale in scales:\n poolers.append(\n ROIAlign(\n output_size, spatial_scale=scale, sampling_ratio=sampling_ratio\n )\n )\n self.poolers = nn.ModuleList(poolers)\n self.output_size = output_size\n\n # get the levels in the feature map by leveraging the fact that the network always\n # downsamples by a factor of 2 at each level.\n lvl_min = -torch.log2(torch.tensor(scales[0], dtype=torch.float32)).item()\n lvl_max = -torch.log2(torch.tensor(scales[-1], dtype=torch.float32)).item()\n self.map_levels = LevelMapper(lvl_min, lvl_max)\n\n def boxes_expand(self, boxes):\n \"\"\"\n Arguments:\n boxes (Tensor):\n Returns:\n result (Tensor)\n \"\"\"\n neighbor_expand = self.neighbor_expand\n\n TO_REMOVE = 1 # TODO remove\n widths = boxes[:, 2] - boxes[:, 0] + TO_REMOVE\n heights = boxes[:, 3] - boxes[:, 1] + TO_REMOVE\n ctr_x = boxes[:, 0] + 0.5 * widths\n ctr_y = boxes[:, 1] + 0.5 * heights\n\n ## \n expand_widths = widths * neighbor_expand\n expand_heights = heights * neighbor_expand\n\n pred_boxes = torch.zeros_like(boxes)\n\n # x1\n pred_boxes[:, 0::4] = ctr_x[:, None] - 0.5 * expand_widths[:, None]\n # y1\n pred_boxes[:, 1::4] = ctr_y[:, None] - 0.5 * expand_heights[:, None]\n # x2 (note: \"- 1\" is correct; don't be fooled by the asymmetry)\n pred_boxes[:, 2::4] = ctr_x[:, None] + 0.5 * expand_widths[:, None] - 1\n # y2 (note: \"- 1\" is correct; don't be fooled by the asymmetry)\n pred_boxes[:, 3::4] = ctr_y[:, None] + 0.5 * expand_heights[:, None] - 1\n\n return pred_boxes\n\n\n def convert_to_roi_format(self, boxes):\n concat_boxes = cat([b.bbox for b in boxes], dim=0)\n ## expand the boxes\n concat_boxes = self.boxes_expand(concat_boxes)\n\n device, dtype = concat_boxes.device, concat_boxes.dtype\n ids = cat(\n [\n torch.full((len(b), 1), i, dtype=dtype, device=device)\n for i, b in enumerate(boxes)\n ],\n dim=0,\n )\n rois = torch.cat([ids, concat_boxes], dim=1)\n return rois\n\n def forward(self, x, boxes):\n \"\"\"\n Arguments:\n x (list[Tensor]): feature maps for each level\n boxes (list[BoxList]): boxes to be used to perform the pooling operation.\n Returns:\n result (Tensor)\n \"\"\"\n num_levels = len(self.poolers)\n\n rois = self.convert_to_roi_format(boxes)\n if num_levels == 1:\n return self.poolers[0](x[0], rois)\n\n levels = self.map_levels(boxes)\n\n num_rois = len(rois)\n num_channels = x[0].shape[1]\n output_size = self.output_size[0]\n\n dtype, device = x[0].dtype, x[0].device\n result = torch.zeros(\n (num_rois, num_channels, output_size, output_size),\n dtype=dtype,\n device=device,\n )\n for level, (per_level_feature, pooler) in enumerate(zip(x, self.poolers)):\n idx_in_level = torch.nonzero(levels == level).squeeze(1)\n rois_per_level = rois[idx_in_level]\n result[idx_in_level] = pooler(per_level_feature, rois_per_level)\n\n return result\n\n\n"
] |
[
[
"torch.zeros",
"torch.cat",
"torch.nn.ModuleList",
"torch.zeros_like",
"torch.tensor",
"torch.log2",
"torch.nonzero",
"torch.clamp"
]
] |
ssantos97/SyMo
|
[
"58d7b64f888fd78cc27d4c1092071ef35725f0d4"
] |
[
"Experiments_pendulum/train_noise.py"
] |
[
"import torch, argparse\nimport numpy as np\nfrom torch.utils.data import TensorDataset, DataLoader\nfrom torch import optim\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nimport pickle\nimport scipy.integrate\nsolve_ivp = scipy.integrate.solve_ivp\n\nimport os, sys\nTHIS_DIR = os.path.dirname(os.path.abspath(__file__))\nPARENT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nsys.path.append(PARENT_DIR)\nDATA_DIR = THIS_DIR\nTHIS_DIR = THIS_DIR + \"/h=0.01_noise\"\nfrom Code.Trainer import train\nfrom Code.symo import SyMo_T\nfrom Code.Rigid_Body import MLP\nfrom Code.Utils import get_n_params, to_pickle, from_pickle\nfrom Code.integrate_models import integrate_ODE, implicit_integration_DEL\nfrom Code.models import pendulum\nfrom Code.Trainer import train\nfrom Code.root_find import rootfind\nfrom Code.NN import LODE_T, NODE_T, ODE\nfrom data import get_dataset, arrange_DEL_dataset, arrange_NODE_dataset\ndef get_args():\n parser = argparse.ArgumentParser(description=None)\n parser.add_argument('--model', type=str, default='E2E-SyMo', help=\"one of 'NODE', 'L-NODE', 'SyMo' and 'E2E-SyMo'\")\n parser.add_argument('--name', default='pendulum', type=str, help='only one option right now')\n parser.add_argument('--num_angles', default=1, type=int, help='number of rotational coordinates')\n parser.add_argument('--embedding', default=True, type=bool, help='If embedding is desired')\n parser.add_argument('--N_train', type=int, default='128', help=\"Number of different train trajectories\")\n parser.add_argument('--n_train', type=int, default='32', help=\"Number of train observations in each trajectory\")\n parser.add_argument('--N_test', type=int, default='128', help=\"Number of different test trajectories\")\n parser.add_argument('--n_test', type=int, default='32', help=\"Number of test observations in each trajectory\")\n parser.add_argument('--N_int', type=int, default='16', help=\"Number of different test trajectories\")\n parser.add_argument('--n_int', type=int, default='500', help=\"Number of test observations in each trajectory\")\n parser.add_argument('--noise_std', type=float, default='0.', help=\"Induced noise std.\")\n parser.add_argument('--train_seed', default=0, type=int, help=' training random seed')\n parser.add_argument('--int_seed', default=1, type=int, help=' integration random seed')\n parser.add_argument('--learning_rate', default=1e-4, type=float, help=' initial learning rate')\n parser.add_argument('--nonlinearity', default='tanh', type=str, help='neural net nonlinearity')\n parser.add_argument('--n_hidden_layers', default=2, type=int, help='number of hidden layers')\n parser.add_argument('--n_neurons', default=128, type=int, help='number of neurons')\n parser.add_argument('--n_epochs', default=2000, type=int, help='number of epochs')\n parser.add_argument('--patience', default=50, type=int, help='scheduler patience')\n parser.add_argument('--factor', default=0.7, type=float, help='scheduler patience')\n parser.add_argument('--batch_size', default=128, type=int, help='batch size')\n parser.add_argument('--weight_decay', default=0, type=int, help='weight decay')\n parser.add_argument('--time_step', default=0.01, type=float, help='time_step')\n parser.add_argument('--root_find', default='Newton', type=str, help=\"one of 'Newton' or 'Broyden'\")\n parser.add_argument('--odeint', default=\"rk4\", type=str, help=\"One of the solvers available at https://github.com/rtqichen/torchdiffeq\")\n parser.add_argument('--forward_tol', default=1e-5, type=float, help=\"If E2E-SyMo specify a forward tolerance\")\n parser.add_argument('--backward_tol', default=1e-8, type=float, help=\"If Broyden specify a backward tolerance\")\n parser.add_argument('--forward_maxiter', default=10, type=int, help=\"If E2E-SyMo specify the maximum number of iterations during forward pass\")\n parser.add_argument('--backward_maxiter', default=20, type=int, help=\"If Broyden specify the maximum number of iterations during backward pass\")\n parser.add_argument('--int_tol', default=1e-4, type=float, help=\"Integration tolerance\")\n parser.add_argument('--int_maxiter', default=10, type=int, help=\"Integration maxiter\")\n parser.add_argument('--save_dir', default=THIS_DIR, type=str, help='where to save the trained model')\n parser.add_argument('--gpu', type=int, default=2)\n parser.set_defaults(feature=True)\n return parser.parse_args() \n\ndef total_loss(args, model, x, target):\n model = model.eval()\n q_past, q, u_past, u, u_next = torch.split(x, args.num_angles, 1)\n q_next = model(x.to(args.device))\n v_next = (q_next - q.to(args.device))/args.time_step\n output = torch.cat((q_next, v_next),1)\n criterion = torch.nn.MSELoss()\n return criterion(output, target.to(args.device))\n\ndef get_model(args, nn, model, trainloader, testloader, criterion):\n optimizer = optim.Adam(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay) \n scheduler = ReduceLROnPlateau(optimizer, 'min', patience = args.patience, verbose=True, factor=args.factor)\n return train(args.model, nn, model, criterion, optimizer, scheduler, args.device, trainloader, testloader, args.n_epochs)\n\ndef Train(args, x_train, y_train, u_train, x_test, y_test, u_test, data_int, controls_int):\n\n device = torch.device('cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu')\n # reproducibility: set random seed\n args.device = device\n torch.manual_seed(230)\n np.random.seed(230)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n\n n_angles = args.num_angles\n lr = args.learning_rate\n n_epochs = args.n_epochs\n activation = args.nonlinearity\n n_hidden_layers = args.n_hidden_layers\n n_neurons = args.n_neurons\n h = args.time_step\n embedding = args.embedding\n batch_size = args.batch_size\n weight_decay = args.weight_decay\n args.device = device\n args.d_f = args.num_angles\n \n stats = {}\n\n #Convert to tensors\n x_train = torch.tensor(x_train).float()\n y_train = torch.tensor(y_train).float()\n x_test = torch.tensor(x_test).float()\n y_test = torch.tensor(y_test).float()\n u_train = torch.tensor(u_train).float()\n u_test = torch.tensor(u_test).float()\n \n #flatten the data\n x_train = torch.flatten(x_train, 0,1)\n y_train = torch.flatten(y_train, 0,1)\n x_test = torch.flatten(x_test, 0,1)\n y_test = torch.flatten(y_test, 0,1)\n u_train = torch.flatten(u_train, 0,1)\n u_test = torch.flatten(u_test, 0,1)\n\n if (args.model == 'E2E-SyMo') and args.root_find == \"Newton\":\n args.input_dim = 1\n forward_tol = args.forward_tol\n forward_maxiter = args.forward_maxiter\n nn = SyMo_T(n_angles, n_hidden_layers, n_neurons, h, activation, embedding=embedding, ln=False).to(device)\n model = rootfind(nn, args.root_find, forward_tol, forward_maxiter, analyse=True)\n\n \n criterion = nn.implicit_loss\n train_data = TensorDataset(x_train, y_train[:, :args.num_angles])\n trainloader = DataLoader(train_data, batch_size=batch_size, shuffle=False)\n test_data = TensorDataset(x_test, y_test[:, :args.num_angles])\n testloader = DataLoader(test_data, batch_size=len(x_test), shuffle=False)\n checkpoint = get_model(args, nn, model, trainloader, testloader, criterion)\n\n elif (args.model == 'E2E-SyMo') and args.root_find == \"Broyden\":\n args.input_dim = 1\n forward_tol = args.forward_tol\n forward_maxiter = args.forward_maxiter\n backward_tol = args.backward_tol\n backward_maxiter = args.backward_maxiter\n nn = SyMo_T(n_angles, n_hidden_layers, n_neurons, h, activation, embedding=embedding, ln=False).to(device)\n model = rootfind(nn, args.root_find, forward_tol, forward_maxiter, backward_tol, backward_maxiter)\n\n \n criterion = nn.implicit_loss\n train_data = TensorDataset(x_train, y_train[:, :args.num_angles])\n trainloader = DataLoader(train_data, batch_size=batch_size, shuffle=False)\n test_data = TensorDataset(x_test, y_test[:, :args.num_angles])\n testloader = DataLoader(test_data, batch_size=len(x_test), shuffle=False)\n checkpoint = get_model(args, nn, model, trainloader, testloader, criterion)\n\n \n \n elif (args.model == 'SyMo'):\n args.input_dim = 1\n forward_tol = args.forward_tol\n forward_maxiter = args.forward_maxiter\n nn = SyMo_T(n_angles, n_hidden_layers, n_neurons, h, activation, embedding=embedding, ln=False).to(device)\n model = rootfind(nn, args.root_find, forward_tol, forward_maxiter)\n criterion = nn.loss\n d_f = args.d_f\n\n #add q_next to the input data\n x_train_wq = torch.cat((x_train[:, :int(2*d_f)], y_train[:, 0][:, None], x_train[:, int(2*d_f):]), dim=1)\n x_test_wq = torch.cat((x_test[:, :int(2*d_f)], y_test[:, 0][:, None], x_test[:, int(2*d_f):]), dim=1)\n \n train_data = TensorDataset(x_train_wq, y_train[:, :args.num_angles])\n trainloader = DataLoader(train_data, batch_size=batch_size, shuffle=False)\n test_data = TensorDataset(x_test_wq, y_test[:, :args.num_angles])\n testloader = DataLoader(test_data, batch_size=len(x_test), shuffle=False)\n checkpoint = get_model(args, nn, model, trainloader, testloader, criterion)\n\n\n\n\n elif (args.model == 'L-NODE'):\n args.input_dim = 1\n nn = LODE_T(n_angles, n_hidden_layers, n_neurons, activation, embedding=embedding, ln=False).to(device)\n model = ODE(nn, args.odeint, h).to(device)\n criterion = nn.loss\n x_train = torch.cat((x_train, u_train), dim=1)\n train_data = TensorDataset(x_train, y_train)\n trainloader = DataLoader(train_data, batch_size=batch_size, shuffle=False)\n x_test = torch.cat((x_test, u_test), dim=1)\n test_data = TensorDataset(x_test, y_test)\n testloader = DataLoader(test_data, batch_size=len(x_test), shuffle=False)\n checkpoint = get_model(args, nn, model, trainloader, testloader, criterion)\n \n \n elif (args.model == 'NODE'):\n args.input_dim = 3\n u_index = 0\n nn = NODE_T(n_angles, n_hidden_layers, n_neurons, activation, u_index, embedding=embedding, ln=False).to(device)\n model = ODE(nn, args.odeint, h).to(device)\n criterion = nn.loss\n x_train = torch.cat((x_train, u_train), dim=1)\n train_data = TensorDataset(x_train, y_train)\n trainloader = DataLoader(train_data, batch_size=batch_size, shuffle=False)\n x_test = torch.cat((x_test, u_test), dim=1)\n test_data = TensorDataset(x_test, y_test)\n testloader = DataLoader(test_data, batch_size=len(x_test), shuffle=False)\n checkpoint = get_model(args, nn, model, trainloader, testloader, criterion)\n \n else:\n raise ValueError('model_type not supported')\n \n\n stats = checkpoint.copy()\n\n #Integrate, compute energies, masses and respective losses \n H_true = pendulum().H\n y_int_pred = torch.zeros(size=(args.N_int, args.n_int, int(2*args.d_f)))\n E_loss_per_traj = []\n Energies = torch.empty(size=(args.N_int, args.n_int, 1))\n Vs = torch.empty(size=(args.N_int, args.n_int, 1))\n Ts = torch.empty(size=(args.N_int, args.n_int, 1))\n Hs = torch.empty(size=(args.N_int, args.n_int, int(args.d_f**2)))\n H_loss_per_traj = []\n\n if args.model == 'SyMo' or args.model == 'E2E-SyMo':\n x_int, y_int_true, _ = arrange_DEL_dataset(data_int['test_x'], controls_int['test_u'])\n x_int = torch.tensor(x_int).float()\n y_int_true = torch.tensor(y_int_true).float()\n E_int_true = pendulum().energy(data_int['x0'])\n\n x0_int = x_int[:,0, :2]\n for i in range(args.N_int):\n y_pred = implicit_integration_DEL(args.root_find, args.n_int, args.time_step, nn.eval(), x0_int[i], args.int_tol, args.int_maxiter, device)\n y_int_pred[i] = y_pred.cpu()\n E = pendulum().energy(y_int_pred[i])\n Energies[i] = E.view(len(E), 1)\n H, V, T = nn.get_matrices(y_pred.to(args.device))\n Vs[i] = V\n Ts[i] = T\n Hs[i] = H.flatten(1,2)\n E_loss_per_traj.append(torch.mean((E_int_true[i] - E)**2))\n H_loss_per_traj.append(torch.mean((H_true - H)**2))\n \n elif args.model =='NODE' or args.model == 'L-NODE':\n x_int, y_int_true, _ = arrange_NODE_dataset(data_int['test_x'], controls_int['test_u'])\n x_int = torch.tensor(x_int).float()\n y_int_true = torch.tensor(y_int_true).float()\n E_int_true = pendulum().energy(data_int['x0'])\n x0_int = x_int[:,0]\n \n for i in range(args.N_int):\n y_pred = integrate_ODE(nn.eval(), args.odeint, x0_int[i], args.n_int, args.time_step, args.device)\n y_int_pred[i] = y_pred.cpu()\n E = pendulum().energy(y_int_pred[i])\n E_loss_per_traj.append(torch.mean((E_int_true[i] - E)**2))\n Energies[i] = E.view(len(E), 1)\n \n if args.model == 'L-NODE':\n H, V, T = nn.get_matrices(y_int_pred[i].to(args.device))\n Vs[i] = V\n Ts[i] = T\n Hs[i] = H.flatten(1,2)\n H_loss_per_traj.append(torch.mean((H_true - H)**2))\n \n E_loss_per_traj = torch.stack(E_loss_per_traj)\n if not args.model == 'NODE':\n H_loss_per_traj = torch.stack(H_loss_per_traj)\n stats['H_std'] = H_loss_per_traj.std().item()\n stats['H_loss'] = torch.mean(H_loss_per_traj).detach().cpu().numpy().item()\n stats['H_int'] = Hs.numpy()\n stats['T_int'] = Ts.numpy()\n stats['V_int'] = Vs.numpy()\n \n #get integration losses\n criterion = torch.nn.MSELoss(reduction='none')\n losses = criterion(y_int_pred, y_int_true)\n int_loss_per_traj = torch.mean(losses, [1,2])\n int_loss_per_traj_poses = torch.mean(losses, [1])[:, 0]\n stats['int_std'] = int_loss_per_traj.std().item()\n stats['int_loss'] = torch.mean(int_loss_per_traj).detach().cpu().numpy().item()\n \n stats['int_std_poses'] = int_loss_per_traj_poses.std().item()\n stats['int_loss_poses'] = torch.mean(int_loss_per_traj_poses).detach().cpu().numpy().item()\n \n #get energy losses- mean over MSE of each trajectory\n stats['E_std'] = E_loss_per_traj.std().item()\n stats['E_loss'] = torch.mean(E_loss_per_traj).detach().cpu().numpy().item()\n #Save Es for eventual plots\n stats['E_int'] = Energies.numpy()\n stats['E_int_true'] = E_int_true\n\n\n #Save data for reproducibility\n stats['int_losses'] = losses.numpy()\n stats['x_train'] = x_train.numpy()\n stats['y_train'] = y_train.numpy()\n stats['x_test'] = x_test.numpy()\n stats['y_test'] = y_test.numpy()\n stats['x0_int'] = x0_int.numpy()\n stats['y_int_pred'] = y_int_pred.numpy()\n stats['y_int_true'] = y_int_true.numpy()\n \n #just to check losses with reconstructed velocities\n if args.model == 'SyMo' or args.model == 'E2E-SyMo':\n stats['train_loss_vel'] = total_loss(args, model, x_train, y_train)\n stats['test_loss_vel'] = total_loss(args, model, x_test, y_test)\n return nn, stats\n\ndef save_model(args, model, stats):\n hyperparameters = vars(args)\n hyperparameters['total_n_params'] = get_n_params(model)\n stats['hyperparameters'] = hyperparameters\n os.makedirs(args.save_dir) if not os.path.exists(args.save_dir) else None\n if args.root_find == 'Broyden' and args.model == 'E2E-SyMo':\n label = '-B-SyMo'\n elif args.root_find == 'Newton' and args.model == 'E2E-SyMo':\n label = '-N-SyMo'\n elif args.model == 'SyMo':\n label = '-SyMo'\n elif args.model == 'L-NODE':\n label = '-L-NODE-' + args.odeint \n elif args.model == 'NODE':\n label = '-NODE-' + args.odeint\n\n path = '{}/{}{}-p-{}x{}_sigma_{}.tar'.format(args.save_dir, args.name, label, args.N_train, args.n_train, args.noise)\n torch.save(model.state_dict(), path)\n path = '{}/{}{}-p-{}x{}_sigma_{}-stats.pkl'.format(args.save_dir, args.name, label, args.N_train, args.n_train, args.noise)\n to_pickle(stats, path)\n\n\n\nif __name__ == \"__main__\":\n args = get_args()\n\n #Load noiseless data set\n dir = DATA_DIR + \"/h=0.01/dataset.pkl\"\n data = from_pickle(dir)\n n_trajs = 32\n noise_std = [0.01, 0.05, 0.1]\n\n import copy\n noisy_data_sigma_01 = copy.deepcopy(data)\n noisy_data_sigma_05 = copy.deepcopy(data)\n noisy_data_sigma_1 = copy.deepcopy(data)\n\n data_int, controls_int = data['int_data']['data'], data['int_data']['controls'] \n data, controls = data['data'], data['controls'] \n \n np.random.seed(seed=0)\n dummy_data_train = data['train_x'][:n_trajs] + np.random.randn(*data['train_x'][:n_trajs].shape)*noise_std[0]\n dummy_controls_train = controls['train_u'][:n_trajs] + np.random.randn(*controls['train_u'][:n_trajs].shape)*noise_std[0]\n dummy_data_test = data['test_x'][:n_trajs] + np.random.randn(*data['test_x'][:n_trajs].shape)*noise_std[0]\n dummy_controls_test = controls['test_u'][:n_trajs] + np.random.randn(*controls['test_u'][:n_trajs].shape)*noise_std[0]\n\n noisy_data_sigma_01['data']['train_x'] = dummy_data_train\n noisy_data_sigma_01['controls']['train_u'] = dummy_controls_train\n noisy_data_sigma_01['data']['test_x'] = dummy_data_test\n noisy_data_sigma_01['controls']['test_u'] = dummy_controls_test\n \n #save data for reproducibility\n path = '{}/{}.pkl'.format(args.save_dir, \"dataset\" + \"_sigma_\" + str(noise_std[0]))\n to_pickle(noisy_data_sigma_01, path)\n\n np.random.seed(seed=0)\n dummy_data_train = data['train_x'][:n_trajs] + np.random.randn(*data['train_x'][:n_trajs].shape)*noise_std[1]\n dummy_controls_train = controls['train_u'][:n_trajs] + np.random.randn(*controls['train_u'][:n_trajs].shape)*noise_std[1]\n dummy_data_test = data['test_x'][:n_trajs] + np.random.randn(*data['test_x'][:n_trajs].shape)*noise_std[1]\n dummy_controls_test = controls['test_u'][:n_trajs] + np.random.randn(*controls['test_u'][:n_trajs].shape)*noise_std[1]\n\n noisy_data_sigma_05['data']['train_x'] = dummy_data_train\n noisy_data_sigma_05['controls']['train_u'] = dummy_controls_train\n noisy_data_sigma_05['data']['test_x'] = dummy_data_test\n noisy_data_sigma_05['controls']['test_u'] = dummy_controls_test\n \n #save data for reproducibility\n path = '{}/{}.pkl'.format(args.save_dir, \"dataset\" + \"sigma_\" + str(noise_std[1]))\n to_pickle(noisy_data_sigma_05, path)\n \n np.random.seed(seed=0)\n dummy_data_train = data['train_x'][:n_trajs] + np.random.randn(*data['train_x'][:n_trajs].shape)*noise_std[2]\n dummy_controls_train = controls['train_u'][:n_trajs] + np.random.randn(*controls['train_u'][:n_trajs].shape)*noise_std[2]\n dummy_data_test = data['test_x'][:n_trajs] + np.random.randn(*data['test_x'][:n_trajs].shape)*noise_std[2]\n dummy_controls_test = controls['test_u'][:n_trajs] + np.random.randn(*controls['test_u'][:n_trajs].shape)*noise_std[2]\n\n noisy_data_sigma_1['data']['train_x'] = dummy_data_train\n noisy_data_sigma_1['controls']['train_u'] = dummy_controls_train\n noisy_data_sigma_1['data']['test_x'] = dummy_data_test\n noisy_data_sigma_1['controls']['test_u'] = dummy_controls_test\n \n #save data for reproducibility\n path = '{}/{}.pkl'.format(args.save_dir, \"dataset\" + \"sigma_\" + str(noise_std[2]))\n to_pickle(noisy_data_sigma_1, path)\n \n data_array = [noisy_data_sigma_01, noisy_data_sigma_05, noisy_data_sigma_1]\n\n for i in range(len(data_array)):\n #Create data for symos\n x_train_symo, y_train_symo, u_train_symo = arrange_DEL_dataset(data_array[i]['data']['train_x'], data_array[i]['controls']['train_u'])\n x_test_symo, y_test_symo, u_test_symo = arrange_DEL_dataset(data_array[i]['data']['test_x'], data_array[i]['controls']['test_u'])\n \n #Create data for odes\n x_train_ode, y_train_ode, u_train_ode = arrange_NODE_dataset(data_array[i]['data']['train_x'], data_array[i]['controls']['train_u'])\n x_test_ode, y_test_ode, u_test_ode = arrange_NODE_dataset(data_array[i]['data']['test_x'], data_array[i]['controls']['test_u'])\n \n args.noise = noise_std[i]\n methods = ['E2E-SyMo''', \"SyMo\", 'L-NODE', 'NODE']\n \n for method in methods:\n args.model = method\n print(method)\n args.N_train = 32\n args.batch_size = int(32*4)\n \n if method == 'E2E-SyMo':\n root_find = ['Newton']\n for rf in root_find:\n args.root_find = rf\n model, stats = Train(args, x_train_symo, y_train_symo, u_train_symo, x_test_symo, y_test_symo, u_test_symo, data_int, controls_int)\n save_model(args, model, stats)\n \n elif method == 'SyMo':\n model, stats = Train(args, x_train_symo, y_train_symo, u_train_symo, x_test_symo, y_test_symo, u_test_symo, data_int, controls_int)\n save_model(args, model, stats)\n else:\n odeint = ['midpoint', 'rk4']\n for ode in odeint:\n args.odeint = ode\n model, stats = Train(args, x_train_ode, y_train_ode, u_train_ode, x_test_ode, y_test_ode, u_test_ode, data_int, controls_int)\n save_model(args, model, stats)\n"
] |
[
[
"torch.mean",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.empty",
"numpy.random.seed",
"torch.cat",
"torch.manual_seed",
"torch.utils.data.TensorDataset",
"torch.utils.data.DataLoader",
"torch.tensor",
"torch.flatten",
"numpy.random.randn",
"torch.cuda.is_available",
"torch.split",
"torch.stack",
"torch.nn.MSELoss"
]
] |
JuanPorrasl/AMSECovid19
|
[
"e8d754e04c301f164b19117e779ea82e79b79558"
] |
[
"dash/cleaning_datas_tests.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 18 16:03:41 2020\n\n@author: juanporras\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\n\nimport json\nimport urllib.request\nfrom urllib.request import urlopen\n\nimport datetime\nimport time\n\nconfig = {'displayModeBar': False}\n\nfrom cleaning_datas import df\n\nOWData = pd.read_csv(\"https://covid.ourworldindata.org/data/owid-covid-data.csv\")\n\n"
] |
[
[
"pandas.read_csv"
]
] |
pin-hsuan-wu/caw-quant-training2
|
[
"d3bb66390c2060d8dde41725f0d4da97a02d07cf"
] |
[
"section3/task2/sec3_task2.py"
] |
[
"import talib\nimport numpy as np\nfrom binance.websockets import BinanceSocketManager\nfrom binance.client import Client\nimport pandas as pd\n\nwith open(\"./binance_api.txt\") as f:\n file = f.read()\nkey = file.split(',')[0]\nsecret = file.split(',')[1]\n\nclient = Client(api_key=key, api_secret=secret)\n\nbm = BinanceSocketManager(client)\n\n# 1\nheaders = ['close', 'high', 'low', 'open', 'volume', 'baseVolume', 'datetime']\ndf = pd.DataFrame(columns=headers)\n\n\ndef append_df(msg):\n global df\n if msg['k']['x'] == True:\n data = pd.DataFrame({\"close\": [msg['k']['c']], \"high\": [msg['k']['h']], \"low\": [msg['k']['l']], \"open\": [\n msg['k']['o']], \"volume\": [msg['k']['v']], \"baseVolume\": [msg['k']['q']], \"datetime\": [msg['E']]}, dtype=np.float64)\n data['datetime'] = pd.to_datetime(data['datetime'], unit='ms')\n data[\"datetime\"] = data[\"datetime\"].dt.strftime('%Y-%m-%d %H:%M:%S')\n df = df.append(data, ignore_index=True)\n output = talib.SMA(df['close'].values, timeperiod=3)\n print(output)\n else:\n pass\n\n\nbm.start_kline_socket(\n 'BTCUSDT', interval=Client.KLINE_INTERVAL_1MINUTE, callback=append_df)\nbm.start()\n\n\n# 2\nclass Trading():\n\n def __init__(self):\n self.position = 0\n self.df = pd.DataFrame(columns=['close', 'high', 'low', 'open', 'volume', 'baseVolume', 'datetime'])\n\n def smacross(self, msg):\n if msg['k']['x'] == True:\n data = pd.DataFrame({\"close\": [msg['k']['c']], \"high\": [msg['k']['h']], \"low\": [msg['k']['l']], \"open\": [\n msg['k']['o']], \"volume\": [msg['k']['v']], \"baseVolume\": [msg['k']['q']], \"datetime\": [msg['E']]}, dtype=np.float64)\n data['datetime'] = pd.to_datetime(data['datetime'], unit='ms')\n data[\"datetime\"] = data[\"datetime\"].dt.strftime(\n '%Y-%m-%d %H:%M:%S')\n self.df = self.df.append(data, ignore_index=True)\n sma_f = talib.SMA(self.df['close'].values, timeperiod=3)\n sma_s = talib.SMA(self.df['close'].values, timeperiod=5)\n \n def cross_up(self, sma_f, sma_s):\n if sma_f[-1] > sma_s[-1]:\n return True\n else:\n return False\n\n def cross_down(self, sma_f, sma_s):\n if sma_f[-1] < sma_s[-1]:\n return True\n else:\n return False\n\n if cross_up == True:\n self.position = 1\n if (cross_down == True) & (self.position == 1):\n self.position = 0\n print(self.position)\n else:\n pass\n\n\nbm.start_kline_socket(\n 'BTCUSDT', interval=Client.KLINE_INTERVAL_1MINUTE, callback=Trading().smacross)\nbm.start()\n"
] |
[
[
"pandas.to_datetime",
"pandas.DataFrame"
]
] |
ShiChenAI/ACCORD-tf
|
[
"1b07b133d55d702ddea0186ff8849da0346ec84f"
] |
[
"utils.py"
] |
[
"import numpy as np\nfrom tqdm import tqdm\nimport yaml\nimport glob\nfrom pathlib import Path\nfrom datasets import ACCORDDataloader\n\nclass Params:\n def __init__(self, project_file):\n self.params = yaml.safe_load(open(project_file).read())\n\n def __getattr__(self, item):\n return self.params.get(item, None)\n\ndef cal_acc(**kwargs):\n mode = kwargs.get('mode', 'train')\n threshold = kwargs.get('threshold', [0.5, 0.1])\n pred = kwargs.get('pred', None)\n if mode == 'train':\n batch_size = pred.shape[0]//2 \n pred = np.sum(np.squeeze(pred, axis=2), axis=1)\n pos_sum = np.sum(pred[:batch_size])\n neg_sum = np.sum(pred[batch_size:])\n pos_mask = np.where(pred < threshold[0], 0 ,1)\n neg_mask = np.where(pred < threshold[1], 0 ,1)\n #pred = np.where(pred < threshold, 0 ,1)\n\n return (np.sum(pos_mask[:batch_size]==1) + np.sum(neg_mask[batch_size:]==0)) / len(pred), pos_sum, neg_sum\n elif mode == 'eval':\n abnormal_flag = kwargs.get('abnormal_flag', None)\n tp = 0\n total = 0\n for fault_flag, p in pred.items():\n p = np.sum(np.squeeze(p, axis=2), axis=1)\n p = np.where(p < threshold, 0 ,1)\n if fault_flag == abnormal_flag:\n # Positive\n tp += np.sum(p==1)\n else:\n # Negative\n tp += np.sum(p==0)\n total += len(p)\n\n return tp / total\n\ndef cal_classifier_acc(fault_flags, faults_classifiers, threshold=0.6):\n accs = {}\n for fault_flag in fault_flags:\n data_loader = faults_classifiers[fault_flag]['test_loader']\n pos_model = faults_classifiers[fault_flag]['model']\n neg_models = []\n for k, v in faults_classifiers.items():\n neg_models.append(v['model'])\n\n tp = 0\n total = 0\n process = tqdm(enumerate(data_loader), total=data_loader.gen_len())\n for step, data in process:\n batch = data['pos_data']\n pred = pos_model(batch)\n pred = np.sum(np.squeeze(pred, axis=2), axis=1)\n for neg_model in neg_models:\n neg_pred = neg_model(batch)\n neg_pred = np.sum(np.squeeze(neg_pred, axis=2), axis=1)\n pred = np.vstack((pred, neg_pred))\n\n ab = np.where(pred>0, 1, 0)[0]\n max_idxs = np.argmax(pred, axis=0)\n tp += len(np.intersect1d(np.where(max_idxs==0)[0], np.where(ab==1)[0]))\n total += len(max_idxs)\n acc = len(np.intersect1d(np.where(max_idxs==0)[0], np.where(ab==1)[0])) / len(max_idxs)\n\n postfix = '[{0} samples evaluation] Step: {1:4d}, Val acc: {2:.4f}'.format(fault_flag, step+1, acc)\n process.set_postfix_str(postfix)\n\n fault_acc = tp / total\n accs[fault_flag] = fault_acc\n\n return accs\n\ndef generate_classifier(fault_flags, dataset, val_idx, batch_size):\n faults_classifiers = {}\n for fault_flag in fault_flags:\n abnormal_flags = [fault_flag]\n print('[fold: {0}] Generating data...[fault_flag: {1}]'.format(val_idx+1, fault_flag))\n datasets = dataset.generate_datasets(abnormal_flags, val_idx)\n faults_classifiers[fault_flag] = {'model': None,\n 'train_loader': ACCORDDataloader(datasets['train'], batch_size), \n 'test_loader': ACCORDDataloader(datasets['test'], batch_size)}\n\n return faults_classifiers\n\ndef increment_dir(dir, comment=''):\n # Increments a directory runs/exp1 --> runs/exp2_comment\n\n n = 0 # number\n dir = str(Path(dir)) # os-agnostic\n d = sorted(glob.glob(dir + '*')) # directories\n if len(d):\n n = max([int(x[len(dir):x.find('_') if '_' in x else None]) for x in d]) + 1 # increment\n return dir + str(n) + ('_' + comment if comment else '')"
] |
[
[
"numpy.squeeze",
"numpy.argmax",
"numpy.where",
"numpy.sum",
"numpy.vstack"
]
] |
ChenQingya/instagan-part
|
[
"1ff152a9340a88e0681d62a7250434ef50cede8a"
] |
[
"featuresimilarityloss/vgg_model.py"
] |
[
"import torch\nimport torch.nn as nn\nfrom torchvision.models import vgg19, vgg16\nfrom collections import OrderedDict\n\n# VGG 19\n# vgg_layer = {\n# 'conv_1_1': 0, 'conv_1_2': 2, 'pool_1': 4, 'conv_2_1': 5, 'conv_2_2': 7, 'pool_2': 9, 'conv_3_1': 10, 'conv_3_2': 12, 'conv_3_3': 14, 'conv_3_4': 16, 'pool_3': 18, 'conv_4_1': 19, 'conv_4_2': 21, 'conv_4_3': 23, 'conv_4_4': 25, 'pool_4': 27, 'conv_5_1': 28, 'conv_5_2': 30, 'conv_5_3': 32, 'conv_5_4': 34, 'pool_5': 36\n# }\n#\n# vgg_layer_inv = {\n# 0: 'conv_1_1', 2: 'conv_1_2', 4: 'pool_1', 5: 'conv_2_1', 7: 'conv_2_2', 9: 'pool_2', 10: 'conv_3_1', 12: 'conv_3_2', 14: 'conv_3_3', 16: 'conv_3_4', 18: 'pool_3', 19: 'conv_4_1', 21: 'conv_4_2', 23: 'conv_4_3', 25: 'conv_4_4', 27: 'pool_4', 28: 'conv_5_1', 30: 'conv_5_2', 32: 'conv_5_3', 34: 'conv_5_4', 36: 'pool_5'\n# }\n# VGG 16\nvgg_layer = {\n 'conv_1_1': 0, 'conv_1_2': 2, 'pool_1': 4, 'conv_2_1': 5, 'conv_2_2': 7, 'pool_2': 9, 'conv_3_1': 10, 'conv_3_2': 12, 'conv_3_3': 14, 'pool_3': 16, 'conv_4_1': 17, 'conv_4_2': 19, 'conv_4_3': 21, 'pool_4': 23, 'conv_5_1': 24, 'conv_5_2': 26, 'conv_5_3': 28, 'pool_5': 30\n}\n\nvgg_layer_inv = {\n 0: 'conv_1_1', 2: 'conv_1_2', 4: 'pool_1', 5: 'conv_2_1', 7: 'conv_2_2', 9: 'pool_2', 10: 'conv_3_1', 12: 'conv_3_2', 14: 'conv_3_3', 16: 'pool_3', 17: 'conv_4_1', 19: 'conv_4_2', 21: 'conv_4_3', 23: 'pool_4', 24: 'conv_5_1', 26: 'conv_5_2', 28: 'conv_5_3', 30: 'pool_5'\n}\n\nclass VGG_Model(nn.Module):\n def __init__(self, listen_list=None):\n super(VGG_Model, self).__init__()\n vgg = vgg16(pretrained=True)\n self.vgg_model = vgg.features\n vgg_dict = vgg.state_dict()\n vgg_f_dict = self.vgg_model.state_dict()\n vgg_dict = {k: v for k, v in vgg_dict.items() if k in vgg_f_dict}\n vgg_f_dict.update(vgg_dict)\n # no grad\n for p in self.vgg_model.parameters():\n p.requires_grad = False\n if listen_list == []:\n self.listen = []\n else:\n self.listen = set()\n for layer in listen_list:\n self.listen.add(vgg_layer[layer])\n self.features = OrderedDict()\n\n def forward(self, x):\n # if self.transform_input:\n if True:\n x_ch0 = torch.unsqueeze(x[:, 0], 1) * (0.229 / 0.5) + (0.485 - 0.5) / 0.5\n x_ch1 = torch.unsqueeze(x[:, 1], 1) * (0.224 / 0.5) + (0.456 - 0.5) / 0.5\n x_ch2 = torch.unsqueeze(x[:, 2], 1) * (0.225 / 0.5) + (0.406 - 0.5) / 0.5\n x = torch.cat((x_ch0, x_ch1, x_ch2), 1)\n for index, layer in enumerate(self.vgg_model):\n x = layer(x)\n if index in self.listen:\n self.features[vgg_layer_inv[index]] = x\n return self.features\n\n\n\n"
] |
[
[
"torch.unsqueeze",
"torch.cat"
]
] |
ougx/swatResultReader
|
[
"554128883095ed9a2e0f2cae46831ff92aff7ea6"
] |
[
"save_flow.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nThis script read a SWAT output.rch file and plot against USGS observed flow.\n\nCreated on Fri Feb 14 00:21:31 2020\n\n@author: Michael Ou\n\nusage:\n python swat_plot.py \n\"\"\"\n\n\n\n\nimport os\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom swat_reader import swat_reader\nfrom usgs_water_data_reader import read_usgs_flow\nimport argparse\n\nplt.style.use('ggplot')\n \nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Save streamflow from SWAT output.rch (against USGS measurements) to CSV files')\n parser.add_argument('TxtInOut', help='TxtInOut directory path, required.')\n parser.add_argument('-b', '--subbasin', default=[1], type=int, nargs='*', help='Desired subbasin index/indices to save (default: %(default)s).')\n parser.add_argument('-u', '--usgs', nargs='*', help='The USGS site numbers corresponding to the subbasins')\n parser.add_argument('-o', '--output', default='.', metavar='output_dir', help='Output directory for plots (default: %(default)s).')\n parser.add_argument('-s1', action='store_true', help='All the reaches will be saved in the same CSV. ')\n parser.add_argument('-l', '--lengthunit', default='m', choices=['m','f','af'], help='The unit for flow volume m:meters; f:feet; af:acre-feet')\n parser.add_argument('-t', '--timeunit', default='s', choices=['s','d'], help='The unit for flow volume s:second; d:day; m:month; y:year')\n parser.add_argument('-p', '--prefix', default='', help='Prefix for CSV file names')\n parser.add_argument('-n', type=int, default=100, metavar='minimum_number_record', help='The minimum observation record number of a gauge. \\\n If the number of record during the output period is smaller than this number, the usgs site will not included in the plot. (default: %(default)s).')\n\n \n \n # parser.print_help()\n # args = parser.parse_args('D:\\\\WorkSync\\\\CPNRD-UnSWAT\\\\ArcSWAT_2021\\\\Scenarios\\\\Default-Irrigation\\\\TxtInOut -b 8 14 15 22 53 72 84 91 92 93 99 118 119 123\\\n # --usgs 06774000 06794650 06772898 06773500 06772775 06772100 06772000 06771500 06769000 06769525 06770500 06770000 06770200 06767500 -p flow -n 100'.split())\n \n args = parser.parse_args()\n \n # print(args)\n if args.usgs is not None:\n assert len(args.subbasin) == len(args.usgs), 'The subbasin number need to the gaging station number.'\n \n # read the output.rch file:\n swatreader = swat_reader(args.TxtInOut)\n df_out = swatreader.read_rch()\n \n # filter the subbasins\n df_filter = swat_reader.filter(df_out, args.subbasin, [\"FLOW_OUTcms\"])\n \n # convert units if necessary\n time_factor = dict(s=1., d=86400.)\n time_label = dict(s='sec', d='day')\n length_factor = dict(m=1., f=35.3147, af=0.000810714)\n length_factor_usgs = dict(m=0.0283168, f=1, af=1/43560)\n length_label = dict(m='meter$^3$', f='feet$^3$', af='acre-feet')\n\n\n df_filter *= length_factor[args.unit] * time_factor[args.timeunit]\n df_filter.columns = ['Simulated']\n # print('df_fileter:\\n', df_filter)\n # download the USGS data\n start_date = df_filter.index.levels[1].min()\n end_date = df_filter.index.levels[1].max()\n print('The starting date of the SWAT output file is:', start_date)\n print('The ending date of the SWAT output file is :', end_date)\n \n output_dir = args.output\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n \n for s, u in zip(args.subbasin, args.usgs):\n \n # read flow\n \n # read flow\n flow, gauge_names = read_usgs_flow([u], '{:%Y-%m-%d}'.format(start_date), '{:%Y-%m-%d}'.format(end_date), 'D')\n flow = flow.iloc[:, 1]\n flow.index = pd.DatetimeIndex(flow.index)\n if swatreader.cio['IPRINT'] == '0': # monthly output\n flow = pd.to_numeric(flow).resample('M').mean()\n elif swatreader.cio['IPRINT'] == '1':\n pass\n else:\n raise NotImplementedError('IPRINT is {} for annual output.\\nPlotting annual output is not supported.'.format(args.iprint))\n \n\n \n \n df_filter.loc[s].plot(ax=ax, label='Simulated', legend=True, linewidth=args.slw, linestyle=args.sls)\n csv = df_filter.loc[s]\n csv.columns = ['Simulated']\n \n if flow.shape[0] > args.n:\n \n observed = flow\n \n observed = pd.to_numeric(observed, errors='coerce').dropna() * length_factor_usgs[args.unit]\n observed.index = pd.DatetimeIndex(observed.index) \n observed.plot( ax=ax, label='Observed', legend=True, linewidth=args.olw, linestyle=args.ols)\n observed.name = 'Observed'\n csv = pd.concat([csv, observed], axis=1)\n \n if args.log:\n ax.set_yscale('log')\n ax.set_title(gauge_names[u])\n ax.set_xlabel('Time')\n ax.set_ylabel('Streamflow ({}/{})'.format(length_label[args.unit], time_label[args.timeunit]))\n fig.savefig(os.path.join(args.output, '{}{}.png'.format(args.prefix, s)), dpi=300)\n csv.to_csv(os.path.join(args.output, '{}{}.csv'.format(args.prefix, s)))\n\n print('Plots are generated successfully at {}'.format(os.path.abspath(args.output)))\n\n"
] |
[
[
"pandas.concat",
"pandas.to_numeric",
"matplotlib.pyplot.style.use",
"pandas.DatetimeIndex"
]
] |
Davidnet/EmbedMask
|
[
"7f9082cdfc0ec41e4bdb86fff50008f99abd2c7e"
] |
[
"fcos_core/structures/bounding_box.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\nimport torch\n\n# transpose\nFLIP_LEFT_RIGHT = 0\nFLIP_TOP_BOTTOM = 1\n\n\nclass BoxList(object):\n \"\"\"\n This class represents a set of bounding boxes.\n The bounding boxes are represented as a Nx4 Tensor.\n In order to uniquely determine the bounding boxes with respect\n to an image, we also store the corresponding image dimensions.\n They can contain extra information that is specific to each bounding box, such as\n labels.\n \"\"\"\n\n def __init__(self, bbox, image_size, mode=\"xyxy\"):\n device = bbox.device if isinstance(bbox, torch.Tensor) else torch.device(\"cpu\")\n bbox = torch.as_tensor(bbox, dtype=torch.float32, device=device)\n if len(bbox) > 0:\n if bbox.ndimension() != 2:\n raise ValueError(\n \"bbox should have 2 dimensions, got {}\".format(bbox.ndimension())\n )\n if bbox.size(-1) != 4:\n raise ValueError(\n \"last dimension of bbox should have a \"\n \"size of 4, got {}\".format(bbox.size(-1))\n )\n if mode not in (\"xyxy\", \"xywh\"):\n raise ValueError(\"mode should be 'xyxy' or 'xywh'\")\n\n self.bbox = bbox\n self.size = image_size # (image_width, image_height)\n self.mode = mode\n self.extra_fields = {}\n\n def add_field(self, field, field_data):\n self.extra_fields[field] = field_data\n\n def get_field(self, field):\n return self.extra_fields[field]\n\n def has_field(self, field):\n return field in self.extra_fields\n\n def fields(self):\n return list(self.extra_fields.keys())\n\n def _copy_extra_fields(self, bbox):\n for k, v in bbox.extra_fields.items():\n self.extra_fields[k] = v\n\n def convert(self, mode):\n if self.__len__() == 0:\n return self\n if mode not in (\"xyxy\", \"xywh\"):\n raise ValueError(\"mode should be 'xyxy' or 'xywh'\")\n if mode == self.mode:\n return self\n # we only have two modes, so don't need to check\n # self.mode\n xmin, ymin, xmax, ymax = self._split_into_xyxy()\n if mode == \"xyxy\":\n bbox = torch.cat((xmin, ymin, xmax, ymax), dim=-1)\n bbox = BoxList(bbox, self.size, mode=mode)\n else:\n TO_REMOVE = 1\n bbox = torch.cat(\n (xmin, ymin, xmax - xmin + TO_REMOVE, ymax - ymin + TO_REMOVE), dim=-1\n )\n bbox = BoxList(bbox, self.size, mode=mode)\n bbox._copy_extra_fields(self)\n return bbox\n\n def _split_into_xyxy(self):\n if self.mode == \"xyxy\":\n xmin, ymin, xmax, ymax = self.bbox.split(1, dim=-1)\n return xmin, ymin, xmax, ymax\n elif self.mode == \"xywh\":\n TO_REMOVE = 1\n xmin, ymin, w, h = self.bbox.split(1, dim=-1)\n return (\n xmin,\n ymin,\n xmin + (w - TO_REMOVE).clamp(min=0),\n ymin + (h - TO_REMOVE).clamp(min=0),\n )\n else:\n raise RuntimeError(\"Should not be here\")\n\n def resize(self, size, *args, **kwargs):\n \"\"\"\n Returns a resized copy of this bounding box\n\n :param size: The requested size in pixels, as a 2-tuple:\n (width, height).\n \"\"\"\n\n ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(size, self.size))\n if ratios[0] == ratios[1]:\n ratio = ratios[0]\n scaled_box = self.bbox * ratio\n bbox = BoxList(scaled_box, size, mode=self.mode)\n # bbox._copy_extra_fields(self)\n for k, v in self.extra_fields.items():\n if not (isinstance(v, torch.Tensor) or isinstance(v, list)):\n v = v.resize(size, *args, **kwargs)\n bbox.add_field(k, v)\n return bbox\n\n ratio_width, ratio_height = ratios\n xmin, ymin, xmax, ymax = self._split_into_xyxy()\n scaled_xmin = xmin * ratio_width\n scaled_xmax = xmax * ratio_width\n scaled_ymin = ymin * ratio_height\n scaled_ymax = ymax * ratio_height\n scaled_box = torch.cat(\n (scaled_xmin, scaled_ymin, scaled_xmax, scaled_ymax), dim=-1\n )\n bbox = BoxList(scaled_box, size, mode=\"xyxy\")\n # bbox._copy_extra_fields(self)\n for k, v in self.extra_fields.items():\n if not (isinstance(v, torch.Tensor) or isinstance(v, list)):\n v = v.resize(size, *args, **kwargs)\n bbox.add_field(k, v)\n\n return bbox.convert(self.mode)\n\n def transpose(self, method):\n \"\"\"\n Transpose bounding box (flip or rotate in 90 degree steps)\n :param method: One of :py:attr:`PIL.Image.FLIP_LEFT_RIGHT`,\n :py:attr:`PIL.Image.FLIP_TOP_BOTTOM`, :py:attr:`PIL.Image.ROTATE_90`,\n :py:attr:`PIL.Image.ROTATE_180`, :py:attr:`PIL.Image.ROTATE_270`,\n :py:attr:`PIL.Image.TRANSPOSE` or :py:attr:`PIL.Image.TRANSVERSE`.\n \"\"\"\n if method not in (FLIP_LEFT_RIGHT, FLIP_TOP_BOTTOM):\n raise NotImplementedError(\n \"Only FLIP_LEFT_RIGHT and FLIP_TOP_BOTTOM implemented\"\n )\n\n image_width, image_height = self.size\n xmin, ymin, xmax, ymax = self._split_into_xyxy()\n if method == FLIP_LEFT_RIGHT:\n TO_REMOVE = 1\n transposed_xmin = image_width - xmax - TO_REMOVE\n transposed_xmax = image_width - xmin - TO_REMOVE\n transposed_ymin = ymin\n transposed_ymax = ymax\n elif method == FLIP_TOP_BOTTOM:\n transposed_xmin = xmin\n transposed_xmax = xmax\n transposed_ymin = image_height - ymax\n transposed_ymax = image_height - ymin\n\n transposed_boxes = torch.cat(\n (transposed_xmin, transposed_ymin, transposed_xmax, transposed_ymax), dim=-1\n )\n bbox = BoxList(transposed_boxes, self.size, mode=\"xyxy\")\n # bbox._copy_extra_fields(self)\n for k, v in self.extra_fields.items():\n if not isinstance(v, torch.Tensor):\n v = v.transpose(method)\n bbox.add_field(k, v)\n return bbox.convert(self.mode)\n\n def crop(self, box):\n \"\"\"\n Cropss a rectangular region from this bounding box. The box is a\n 4-tuple defining the left, upper, right, and lower pixel\n coordinate.\n \"\"\"\n xmin, ymin, xmax, ymax = self._split_into_xyxy()\n w, h = box[2] - box[0], box[3] - box[1]\n cropped_xmin = (xmin - box[0]).clamp(min=0, max=w)\n cropped_ymin = (ymin - box[1]).clamp(min=0, max=h)\n cropped_xmax = (xmax - box[0]).clamp(min=0, max=w)\n cropped_ymax = (ymax - box[1]).clamp(min=0, max=h)\n\n # TODO should I filter empty boxes here?\n if False:\n is_empty = (cropped_xmin == cropped_xmax) | (cropped_ymin == cropped_ymax)\n\n cropped_box = torch.cat(\n (cropped_xmin, cropped_ymin, cropped_xmax, cropped_ymax), dim=-1\n )\n bbox = BoxList(cropped_box, (w, h), mode=\"xyxy\")\n # bbox._copy_extra_fields(self)\n for k, v in self.extra_fields.items():\n if not isinstance(v, torch.Tensor):\n v = v.crop(box)\n bbox.add_field(k, v)\n return bbox.convert(self.mode)\n\n # Tensor-like methods\n\n def to(self, device):\n bbox = BoxList(self.bbox.to(device), self.size, self.mode)\n for k, v in self.extra_fields.items():\n if hasattr(v, \"to\"):\n v = v.to(device)\n bbox.add_field(k, v)\n return bbox\n\n def __getitem__(self, item):\n bbox = BoxList(self.bbox[item], self.size, self.mode)\n for k, v in self.extra_fields.items():\n if not (isinstance(v, torch.Tensor) and v.dim() == 0):\n bbox.add_field(k, v[item])\n return bbox\n\n def __len__(self):\n return self.bbox.shape[0]\n\n def clip_to_image(self, remove_empty=True):\n if self.__len__() == 0:\n return self\n TO_REMOVE = 1\n self.bbox[:, 0].clamp_(min=0, max=self.size[0] - TO_REMOVE)\n self.bbox[:, 1].clamp_(min=0, max=self.size[1] - TO_REMOVE)\n self.bbox[:, 2].clamp_(min=0, max=self.size[0] - TO_REMOVE)\n self.bbox[:, 3].clamp_(min=0, max=self.size[1] - TO_REMOVE)\n if remove_empty:\n box = self.bbox\n keep = (box[:, 3] > box[:, 1]) & (box[:, 2] > box[:, 0])\n return self[keep]\n return self\n\n def area(self):\n box = self.bbox\n if self.mode == \"xyxy\":\n TO_REMOVE = 1\n area = (box[:, 2] - box[:, 0] + TO_REMOVE) * (box[:, 3] - box[:, 1] + TO_REMOVE)\n elif self.mode == \"xywh\":\n area = box[:, 2] * box[:, 3]\n else:\n raise RuntimeError(\"Should not be here\")\n\n return area\n\n def copy_with_fields(self, fields, skip_missing=False):\n bbox = BoxList(self.bbox, self.size, self.mode)\n if not isinstance(fields, (list, tuple)):\n fields = [fields]\n for field in fields:\n if self.has_field(field):\n bbox.add_field(field, self.get_field(field))\n elif not skip_missing:\n raise KeyError(\"Field '{}' not found in {}\".format(field, self))\n return bbox\n\n def __repr__(self):\n s = self.__class__.__name__ + \"(\"\n s += \"num_boxes={}, \".format(len(self))\n s += \"image_width={}, \".format(self.size[0])\n s += \"image_height={}, \".format(self.size[1])\n s += \"mode={})\".format(self.mode)\n return s\n\n\nif __name__ == \"__main__\":\n bbox = BoxList([[0, 0, 10, 10], [0, 0, 5, 5]], (10, 10))\n s_bbox = bbox.resize((5, 5))\n print(s_bbox)\n print(s_bbox.bbox)\n\n t_bbox = bbox.transpose(0)\n print(t_bbox)\n print(t_bbox.bbox)\n"
] |
[
[
"torch.device",
"torch.cat",
"torch.as_tensor"
]
] |
s-akanksha/DialoGraph_ICLR21
|
[
"d5bbc10b2623c9f84d21a99a5e54e7dcfdfb1bcc"
] |
[
"src/bot/cocoa/src/model/graph_embedder.py"
] |
[
"import tensorflow as tf\nfrom tensorflow.python.ops.math_ops import tanh\n#from tensorflow.python.ops.rnn_cell_impl import _linear as linear\nfrom tensorflow.contrib.rnn.python.ops.core_rnn_cell import _Linear as linear \n# from tensorflow.python.ops.rnn_cell import _linear as linear\nfrom src.model.util import batch_embedding_lookup, batch_linear, EPS\n\ndef add_graph_embed_arguments(parser):\n parser.add_argument('--node-embed-size', type=int, default=10, help='Knowledge graph node/subgraph embedding size')\n parser.add_argument('--edge-embed-size', type=int, default=10, help='Knowledge graph edge label embedding size')\n parser.add_argument('--entity-embed-size', type=int, default=10, help='Knowledge graph entity embedding size')\n parser.add_argument('--entity-cache-size', type=int, default=2, help='Number of entities to remember (this is more of a performance concern; ideally we can remember all entities within the history)')\n parser.add_argument('--use-entity-embedding', action='store_true', default=False, help='Whether to use entity embedding when compute node embeddings')\n parser.add_argument('--mp-iters', type=int, default=2, help='Number of iterations of message passing on the graph')\n parser.add_argument('--utterance-decay', type=float, default=1, help='Decay of old utterance embedding over time')\n parser.add_argument('--learned-utterance-decay', default=False, action='store_true', help='Learning weight to combine old and new utterances')\n parser.add_argument('--msg-aggregation', default='sum', choices=['sum', 'max', 'avg'], help='How to aggregate messages from neighbors')\n\nactivation = tf.tanh\n\nclass GraphEmbedder(object):\n '''\n Graph embedding model.\n '''\n def __init__(self, config, scope=None):\n self.config = config\n self.scope = scope\n self.context_initialized = False\n self.update_initialized = False\n self.build_model(scope)\n\n def build_model(self, scope=None):\n with tf.variable_scope(scope or type(self).__name__):\n with tf.variable_scope('EdgeEmbedding'):\n self.edge_embedding = tf.get_variable('edge', [self.config.num_edge_labels, self.config.edge_embed_size])\n\n if self.config.use_entity_embedding:\n with tf.variable_scope('EntityEmbedding'):\n self.entity_embedding = tf.get_variable('entity', [self.config.num_entities, self.config.entity_embed_size])\n\n with tf.name_scope('Inputs'):\n # Nodes in the Graph, id is row index in utterances.\n # The number of nodes can vary in each batch.\n node_ids = tf.placeholder(tf.int32, shape=[None, None], name='node_ids')\n mask = tf.placeholder(tf.bool, shape=[None, None], name='mask')\n\n # Entity ids used for look up in entity_embedding when use_entity_embedding.\n # NOTE: node_ids is local; it's essentially range(number of nodes). entity_ids\n # use the global entity mapping.\n entity_ids = tf.placeholder(tf.int32, shape=[None, None], name='entity_ids')\n\n # A path is a tuple of (node_id, edge_label, node_id)\n # NOTE: we assume the first path is always a padding path (NODE_PAD, EDGE_PAD,\n # NODE_PAD) when computing mask in pass_message\n # The number of paths can vary in each batch.\n paths = tf.placeholder(tf.int32, shape=[None, None, 3], name='paths')\n\n # Each node has a list of paths starting from that node. path id is row index\n # in paths. Paths of padded nodes are PATH_PAD.\n node_paths = tf.placeholder(tf.int32, shape=[None, None, None], name='node_paths')\n\n # Node features. NOTE: feats[i] must corresponds to node_ids[i]\n node_feats = tf.placeholder(tf.float32, shape=[None, None, self.config.feat_size], name='node_feats')\n\n self.input_data = (node_ids, mask, entity_ids, paths, node_paths, node_feats)\n # TODO:\n self.node_ids, self.mask, self.entity_ids, self.paths, self.node_paths, self.node_feats = self.input_data\n\n # This will be used by GraphDecoder to figure out the shape of the output attention scores\n self.node_ids = self.input_data[0]\n\n def get_feed_dict(self, **kwargs):\n feed_dict = kwargs.pop('feed_dict', {})\n feed_dict[self.node_ids] = kwargs.pop('node_ids')\n feed_dict[self.mask] = kwargs.pop('mask')\n feed_dict[self.entity_ids] = kwargs.pop('entity_ids')\n feed_dict[self.paths] = kwargs.pop('paths')\n feed_dict[self.node_paths] = kwargs.pop('node_paths')\n feed_dict[self.node_feats] = kwargs.pop('node_feats')\n return feed_dict\n\n def get_context(self, utterances):\n '''\n Compute embedding of each node as context for the attention model.\n utterances: current utterance embeddings from the dialogue history\n '''\n node_ids, mask, entity_ids, paths, node_paths, node_feats = self.input_data\n with tf.variable_scope(self.scope or type(self).__name__):\n with tf.variable_scope('NodeEmbedding'):\n with tf.variable_scope('InitNodeEmbedding') as scope:\n # It saves some reshapes to do batch_linear and batch_embedding_lookup\n # together, but this way is clearer.\n if self.config.use_entity_embedding:\n initial_node_embed = tf.concat(2,\n [tf.nn.embedding_lookup(self.entity_embedding, entity_ids),\n batch_embedding_lookup(utterances[0], node_ids),\n batch_embedding_lookup(utterances[1], node_ids),\n node_feats])\n else:\n initial_node_embed = tf.concat(2,\n [batch_embedding_lookup(utterances[0], node_ids),\n batch_embedding_lookup(utterances[1], node_ids),\n node_feats])\n scope.reuse_variables()\n\n # Message passing\n def mp(curr_node_embedding):\n messages = self.embed_path(curr_node_embedding, self.edge_embedding, paths)\n return self.pass_message(messages, node_paths, self.config.pad_path_id)\n\n node_embeds = [initial_node_embed]\n if self.config.mp_iters > 0:\n # NOTE: initial MP uses different parameters because the node_embed_size is different\n with tf.variable_scope('InitialMP'):\n node_embeds.append(mp(node_embeds[-1]))\n for i in xrange(self.config.mp_iters-1):\n if i > 0:\n tf.get_variable_scope().reuse_variables()\n node_embeds.append(mp(node_embeds[-1]))\n\n context = tf.concat(2, node_embeds)\n\n self.context_initialized = True\n return context, mask\n\n def embed_path(self, node_embedding, edge_embedding, paths):\n '''\n Compute embedding of a path (edge_label, node_id).\n node_embedding: (batch_size, num_nodes, node_embed_size)\n edge_embedding: (num_edge_label, edge_embed_size)\n paths: each path is a tuple of (node_id, edge_label, node_id).\n (batch_size, num_paths, 3)\n '''\n edge_embeds = tf.nn.embedding_lookup(edge_embedding, paths[:, :, 1])\n node_embeds = batch_embedding_lookup(node_embedding, paths[:, :, 2])\n path_embed_size = self.config.node_embed_size\n path_embeds = activation(batch_linear([edge_embeds, node_embeds], path_embed_size, True))\n return path_embeds\n\n def pass_message(self, path_embeds, neighbors, padded_path=0):\n '''\n Compute new node embeddings by summing path embeddings (message) of neighboring nodes.\n neighbors: ids of neighboring paths of each node where id is row index in path_embeds\n (batch_size, num_nodes, num_neighbors)\n path_embeds: (batch_size, num_paths, path_embed_size)\n PATH_PAD: if a node is not incident to any edge, its path ids in neighbors are PATH_PAD\n '''\n # Mask padded nodes in neighbors\n # NOTE: although we mask padded nodes in get_context, we still need to mask neighbors\n # for entities not in the KB but mentioned by the partner. These are dangling nodes\n # and should not have messages passed in.\n mask = tf.to_float(tf.not_equal(neighbors, tf.constant(padded_path))) # (batch_size, num_nodes, num_neighbors)\n num_neighbors = tf.reduce_sum(tf.cast(mask, tf.float32), 2, keep_dims=True) + EPS\n\n # Use static shape when possible\n shape = tf.shape(neighbors)\n batch_size, num_nodes, _ = neighbors.get_shape().as_list()\n batch_size = batch_size or shape[0]\n num_nodes = num_nodes or shape[1]\n path_embed_size = path_embeds.get_shape().as_list()[-1]\n\n # Gather neighboring path embeddings\n neighbors = tf.reshape(neighbors, [batch_size, -1]) # (batch_size, num_nodes x num_neighbors)\n embeds = batch_embedding_lookup(path_embeds, neighbors) # (batch_size, num_nodes x num_neighbors, path_embed_size)\n embeds = tf.reshape(embeds, [batch_size, num_nodes, -1, path_embed_size])\n mask = tf.expand_dims(mask, 3) # (batch_size, num_nodes, num_neighbors, 1)\n embeds = embeds * mask\n\n # (batch_size, num_nodes, path_embed_size)\n if self.config.msg_agg == 'sum':\n new_node_embeds = tf.reduce_sum(embeds, 2)\n elif self.config.msg_agg == 'avg':\n new_node_embeds = tf.reduce_sum(embeds, 2) / num_neighbors\n elif self.config.msg_agg == 'max':\n new_node_embeds = tf.reduce_max(embeds, 2)\n else:\n raise ValueError('Unknown message aggregation method')\n\n return new_node_embeds\n\n def update_utterance(self, entity_indices, utterance, curr_utterances, utterance_id):\n new_utterances = []\n for i, u in enumerate(curr_utterances):\n if i == utterance_id:\n new_utterances.append(self._update_utterance(entity_indices, utterance, u))\n else:\n new_utterances.append(u)\n return tuple(new_utterances)\n\n def _update_utterance(self, entity_indices, utterance, curr_utterances):\n '''\n We first transform utterance into a dense matrix of the same size as curr_utterances,\n then return their sum.\n entity_indices: entity ids correponding to rows to be updated in the curr_utterances\n (batch_size, entity_cache_size)\n utterance: hidden states from the RNN\n (batch_size, utterance_size)\n NOTE: each curr_utterance matrix should have a row (e.g. the last one) as padded utterance.\n Padded entities in entity_indices corresponds to the padded utterance. This is handled\n by GraphBatch during construnction of the input data.\n '''\n entity_inds_shape = tf.shape(entity_indices)\n B = entity_inds_shape[0] # batch_size is a variable\n E = entity_inds_shape[1] # number of entities to be updated\n U = self.config.utterance_size\n # Construct indices corresponding to each entry to be updated in self.utterances\n # self.utterance has shape (batch_size, num_nodes, utterance_size)\n # Therefore each row in the indices matrix specifies (batch_id, node_id, utterance_dim)\n batch_inds = tf.reshape(tf.tile(tf.reshape(tf.range(B), [-1, 1]), [1, E*U]), [-1, 1])\n node_inds = tf.reshape(tf.tile(tf.reshape(entity_indices, [-1, 1]), [1, U]), [-1, 1])\n utterance_inds = tf.reshape(tf.tile(tf.range(U), [E*B]), [-1, 1])\n inds = tf.concat(1, [batch_inds, node_inds, utterance_inds])\n\n # Repeat utterance for each entity\n utterance = tf.reshape(tf.tile(utterance, [1, E]), [-1])\n new_utterance = tf.sparse_to_dense(inds, tf.shape(curr_utterances), utterance, validate_indices=False)\n\n if self.config.learned_decay:\n with tf.variable_scope('UpdateUtterance', reuse=self.update_initialized):\n weight = tf.sigmoid(batch_linear(tf.concat(2, [curr_utterances, new_utterance]), 1, True)) # (batch_size, num_nodes, 1)\n if not self.update_initialized:\n self.update_initialized = True\n\n\n if self.config.learned_decay:\n return tf.mul(1 - weight, curr_utterances) + tf.mul(weight, new_utterance)\n else:\n return curr_utterances * self.config.decay + new_utterance\n"
] |
[
[
"tensorflow.get_variable",
"tensorflow.reduce_max",
"tensorflow.concat",
"tensorflow.constant",
"tensorflow.range",
"tensorflow.shape",
"tensorflow.reduce_sum",
"tensorflow.reshape",
"tensorflow.cast",
"tensorflow.expand_dims",
"tensorflow.placeholder",
"tensorflow.mul",
"tensorflow.tile",
"tensorflow.name_scope",
"tensorflow.variable_scope",
"tensorflow.get_variable_scope",
"tensorflow.nn.embedding_lookup"
]
] |
avanoene/risk_dash
|
[
"0459c8d04e9586022799585a86f79cc51c390b93"
] |
[
"pages/single_ticker.py"
] |
[
"import json\n\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_bootstrap_components as dbc\nimport numpy as np\nimport pandas as pd\nfrom pandas.tseries.offsets import BDay\nimport plotly.graph_objs as go\nfrom dash.dependencies import Input, Output, State\n\nfrom app import app\nfrom risk_dash import market_data as md, simgen as mc\nfrom apiconfig import quandl_apikey as apikey\n\n\ntime_options =['5D',\n'10D',\n'20D',\n'3M',\n'6M',\n'1Y',\n'5Y',\n'10Y'\n]\n\nlayout = dbc.Container([\n html.H2('Individual Equity Analysis'),\n dbc.Row(\n [\n html.H5('Enter ticker symbol and hit Run to run simulation'),\n html.Label('Ticker'),\n dcc.Input(id='stock', value='AAPL', type='text'),\n html.Button(id='submit', n_clicks=0, children='Run')\n ],\n ),\n html.Div(id='querydata', style={'display':'none'}),\n html.Div(id='simdata', style={'display':'none'}),\n dbc.Row(\n [\n dbc.Col(dcc.Graph(id='equityline', config=dict( autosizeable=True)), lg=12)\n ],\n align='center'\n ),\n dbc.Row(\n [\n dbc.Col(\n [\n html.H5('Number of Simulations'),\n dcc.Input(\n id='obs',\n type='number',\n placeholder='Number of MC Obs',\n value=1000\n )\n ],\n md=2\n ),\n dbc.Col(\n [\n html.H5('Lookback Period for Volatility Parameterization'),\n dcc.RadioItems(\n id='lookback',\n options=[\n {'label': 20, 'value': 20},\n {'label': 80, 'value': 80},\n {'label': 100, 'value': 100}\n ],\n value = 80,\n labelStyle={'display': 'inline-block'}\n )\n ],\n md=2\n ),\n dbc.Col(\n [\n html.H5('Number of Days Forward'),\n dcc.Input(\n id='periods_forward',\n type='number',\n placeholder = 'Number of Days Forward',\n value = 10\n )\n ],\n md=2\n )\n ],\n align='center'\n ),\n dbc.Row([\n dbc.Col(\n dcc.Graph(id='montecarlo'),\n md=6\n ),\n dbc.Col(\n id='metrics_table',\n md=6\n )\n ],\n )\n ],\n fluid=True\n)\n\[email protected](\n Output('querydata', 'children'),\n [Input('submit', 'n_clicks')],\n [\n State('stock', 'value'),\n State('obs', 'value'),\n State('lookback', 'value'),\n State('periods_forward', 'value')\n ]\n )\n\ndef get_data(n_clicks,stock, obs, lookback, forward):\n if n_clicks != 0:\n data = md.QuandlStockData(apikey, stock, days=lookback)\n gen = mc.NormalDistribution(\n location = data.currentexmean,\n scale = data.currentexvol\n )\n sim = mc.NaiveMonteCarlo(\n gen,\n obs=obs\n )\n sim.simulate(forward, obs)\n dist = sim.simulated_distribution * data.current_price()\n hist_dist = (data.market_data['percentchange'] * data.current_price()).dropna()\n return json.dumps(\n {\n 'data': data.market_data.to_json(date_format='iso', orient='records'),\n 'dist': list(dist),\n 'hist_dist': list(hist_dist),\n 'currentvol': data.currentexvol,\n 'currentexvol': data.currentexvol * np.sqrt(252),\n 'currentswvol': data.currentswvol * np.sqrt(252),\n 'currentexr': (1 + data.currentexmean) ** (252) - 1,\n 'currentswr': (1 + data.currentswmean) ** (252) - 1,\n 'percentVaR': np.nanpercentile(sim.simulated_distribution, 2.5),\n 'forwardSimulationMean': list(np.exp(sim.simulation_mean) * data.current_price()),\n 'forwardSimulationLower': list(np.exp(sim.simulation_mean - 2 * sim.simulation_std) * data.current_price()),\n 'forwardSimulationUpper' : list(np.exp(sim.simulation_mean + 2 * sim.simulation_std) * data.current_price()),\n 'currentprice': data.current_price()\n }\n )\n\n\[email protected](\n Output('equityline', 'figure'),\n [\n Input('querydata', 'children'),\n Input('stock', 'value')\n ]\n)\ndef chart(data, stock):\n data = json.loads(data)\n tempdata = pd.DataFrame(json.loads(data['data']))\n tempdata['date'] = pd.to_datetime(tempdata['date'])\n forward_dates = pd.bdate_range(\n tempdata['date'].max() + BDay(1) ,\n tempdata['date'].max() + BDay(len(data['forwardSimulationMean']))\n )\n line = go.Scatter(\n x = tempdata['date'],\n y = tempdata['adj_close'],\n name = stock + ' Adjusted Close Price'\n )\n average = go.Scatter(\n x = forward_dates,\n y = data['forwardSimulationMean'],\n name = 'Simulation Average'\n )\n lower_std = go.Scatter(\n x = forward_dates,\n y = data['forwardSimulationLower'],\n name = 'Simulation Lower Bound'\n )\n upper_std = go.Scatter(\n x = forward_dates,\n y = data['forwardSimulationUpper'],\n name = 'Simulation Upper Bound'\n )\n\n line_candle = go.Candlestick(x = tempdata['date'],\n open=tempdata['adj_open'],\n close = tempdata['adj_close'],\n low=tempdata['adj_low'],\n high=tempdata['adj_high'],\n increasing=dict(\n line=dict(\n color='black'\n )\n ),\n decreasing=dict(\n line=dict(\n color='red'\n )\n ),\n name= stock + ' Adjusted Price'\n )\n\n outlayout = dict(\n title = stock + ' Adjusted Prices',\n xaxis = dict(\n rangeselector=dict(\n buttons=list([\n dict(\n count=5,\n label='5D',\n step='day',\n stepmode='backward'\n ),\n dict(\n count=20,\n label='20D',\n step='day',\n stepmode='backward'\n ),\n dict(\n count=3,\n label='3M',\n step='month',\n stepmode='backward'\n ),\n dict(\n count=6,\n label='6M',\n step='month',\n stepmode='backward'\n ),\n dict(\n count=1,\n label='YTD',\n step='year',\n stepmode='todate'\n ),\n dict(\n count=1,\n label='1Y',\n step='year',\n stepmode='backward'\n ),\n dict(\n count=5,\n label='5Y',\n step='year',\n stepmode='backward'\n ),\n dict(\n step='all'\n )\n ]\n ),\n visible=True\n )\n )\n )\n fig = go.Figure(data=[line, upper_std, average, lower_std], layout=outlayout)\n return fig\n\n\[email protected](\n Output('montecarlo', 'figure'),\n [\n Input('querydata', 'children'),\n Input('periods_forward', 'value')\n ]\n)\ndef monte_carlo_histogram(simdata, forward):\n simdata = json.loads(simdata)\n tempdata = pd.DataFrame(json.loads(simdata['data']))\n fig_data = [\n go.Histogram(\n x=simdata['dist'],\n histnorm='probability',\n opacity=.75,\n name='MC {}D Simulation'.format(forward)\n )\n ]\n fig_data.append(\n go.Histogram(\n x = tempdata['percentchange'].rolling(forward).sum() * simdata['currentprice'],\n histnorm='probability',\n opacity=.75,\n name='Historic {}D Distribution'.format(forward)\n )\n )\n layout = go.Layout(barmode='overlay')\n fig = go.Figure(data=fig_data, layout=layout)\n return fig\n\n\[email protected](Output('metrics_table', 'children'),\n [Input('querydata', 'children'),\n Input('periods_forward', 'value')])\ndef summary_table(sim, forward):\n simdata = json.loads(sim)\n var = np.nanpercentile(simdata['dist'], 2.5)\n percentvar = simdata['percentVaR']\n percentvol = simdata['currentvol'] * 2\n actvar = np.nanpercentile(simdata['hist_dist'], 2.5)\n simvol = np.nanstd(simdata['dist'])\n exvol = simdata['currentexvol']\n swvol = simdata['currentswvol']\n exreturn = simdata['currentswr']\n ewexreturn = simdata['currentexr']\n metrics = [var, percentvar, actvar, percentvol, simvol, exvol, swvol, exreturn, ewexreturn]\n names = ['Simulated Dollar {}D Value at Risk'.format(forward),\n 'Simulated Percent {}D VaR'.format(forward),\n 'Historic Dist {}D VaR'.format(forward),\n 'Historic {}D Percent VaR @ 2 SD'.format(forward),\n 'Dollar SD Move',\n 'Historic Exponentially Weighted Annualized Vol',\n 'Historic Simply Weighted Annualized Vol',\n 'Expected SW Annualized Return',\n 'Expected EW Annualized Return'\n ]\n tbl_out = [html.Tr([\n html.Th(\n 'Distribution Metric'\n ),\n html.Th(\n 'Value'\n )\n ]\n )\n ]\n tbl_out = tbl_out + [html.Tr([html.Td(i), html.Td('{:.3f}'.format(j))])\n for i , j in zip(names, metrics)]\n return html.Table(tbl_out, className='table')\n"
] |
[
[
"numpy.nanpercentile",
"pandas.to_datetime",
"numpy.sqrt",
"numpy.nanstd",
"numpy.exp",
"pandas.tseries.offsets.BDay"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.