repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list |
---|---|---|---|---|
NunoEdgarGFlowHub/datasets
|
[
"c3351cdd59eedf8193d670334672ff75020f82b6"
] |
[
"tensorflow_datasets/core/features/image_feature_test.py"
] |
[
"# coding=utf-8\n# Copyright 2018 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for tensorflow_datasets.core.features.image_feature.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow_datasets.core import features as features_lib\nfrom tensorflow_datasets.core import test_utils\n\n\nclass ImageFeatureTest(test_utils.FeatureExpectationsTestCase):\n\n @property\n def expectations(self):\n randint = np.random.randint\n\n img = randint(256, size=(128, 100, 3), dtype=np.uint8)\n img_other_shape = randint(256, size=(64, 200, 3), dtype=np.uint8)\n img_file_path = os.path.join(os.path.dirname(__file__),\n '../../testing/test_data/6pixels.png')\n img_file_expected_content = [ # see tests_data/README.md\n [[0, 255, 0], [255, 0, 0], [255, 0, 255]],\n [[0, 0, 255], [255, 255, 0], [126, 127, 128]],\n ]\n\n img_shaped = randint(256, size=(32, 64, 3), dtype=np.uint8)\n\n return [\n test_utils.FeatureExpectation(\n name='image',\n feature=features_lib.Image(),\n shape=(None, None, 3),\n dtype=tf.uint8,\n tests=[\n # Numpy array\n test_utils.FeatureExpectationItem(\n value=img,\n expected=img,\n ),\n # File path\n test_utils.FeatureExpectationItem(\n value=img_file_path,\n expected=img_file_expected_content,\n ),\n # 'img' shape can be dynamic\n test_utils.FeatureExpectationItem(\n value=img_other_shape,\n expected=img_other_shape,\n ),\n # Invalid type\n test_utils.FeatureExpectationItem(\n value=randint(256, size=(128, 128, 3), dtype=np.uint32),\n raise_cls=ValueError,\n raise_msg='should be uint8',\n ),\n # Invalid number of dimensions\n test_utils.FeatureExpectationItem(\n value=randint(256, size=(128, 128), dtype=np.uint8),\n raise_cls=ValueError,\n raise_msg='must have the same rank',\n ),\n # Invalid number of channels\n test_utils.FeatureExpectationItem(\n value=randint(256, size=(128, 128, 1), dtype=np.uint8),\n raise_cls=ValueError,\n raise_msg='are incompatible',\n ),\n ],\n ),\n # Image with statically defined shape\n test_utils.FeatureExpectation(\n name='image_shaped',\n feature=features_lib.Image(shape=(32, 64, 3)),\n shape=(32, 64, 3),\n dtype=tf.uint8,\n tests=[\n test_utils.FeatureExpectationItem(\n value=img_shaped,\n expected=img_shaped,\n ),\n # 'img_shaped' shape should be static\n test_utils.FeatureExpectationItem(\n value=randint(256, size=(31, 64, 3), dtype=np.uint8),\n raise_cls=ValueError,\n raise_msg='are incompatible',\n ),\n ],\n ),\n ]\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
] |
[
[
"tensorflow.test.main"
]
] |
brando90/Does-MAML-Only-Work-via-Feature-Re-use-A-Data-Set-Centric-Perspective
|
[
"45c4fabf35d6d8d19e49092e84e8ac9fa55aee8d",
"45c4fabf35d6d8d19e49092e84e8ac9fa55aee8d",
"45c4fabf35d6d8d19e49092e84e8ac9fa55aee8d"
] |
[
"maml_vs_adapted_maml_src/models/resnet_rfs.py",
"gpu_tests/pytorch_cuda_test.py",
"maml_vs_adapted_maml_src/dataloaders/union_sl_dataloaders.py"
] |
[
"import torch.nn as nn\nimport torch\nimport torch.nn.functional as F\nfrom torch.distributions import Bernoulli\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\nclass SELayer(nn.Module):\n def __init__(self, channel, reduction=16):\n super(SELayer, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.fc = nn.Sequential(\n nn.Linear(channel, channel // reduction),\n nn.ReLU(inplace=True),\n nn.Linear(channel // reduction, channel),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n b, c, _, _ = x.size()\n y = self.avg_pool(x).view(b, c)\n y = self.fc(y).view(b, c, 1, 1)\n return x * y\n\n\nclass DropBlock(nn.Module):\n def __init__(self, block_size):\n super(DropBlock, self).__init__()\n\n self.block_size = block_size\n # self.gamma = gamma\n # self.bernouli = Bernoulli(gamma)\n\n def forward(self, x, gamma):\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n # shape: (bsize, channels, height, width)\n\n if self.training:\n batch_size, channels, height, width = x.shape\n\n bernoulli = Bernoulli(gamma)\n mask = bernoulli.sample(\n (batch_size, channels, height - (self.block_size - 1), width - (self.block_size - 1))).to(device)\n block_mask = self._compute_block_mask(mask)\n countM = block_mask.size()[0] * block_mask.size()[1] * block_mask.size()[2] * block_mask.size()[3]\n count_ones = block_mask.sum()\n\n return block_mask * x * (countM / count_ones)\n else:\n return x\n\n def _compute_block_mask(self, mask):\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n left_padding = int((self.block_size - 1) / 2)\n right_padding = int(self.block_size / 2)\n\n batch_size, channels, height, width = mask.shape\n # print (\"mask\", mask[0][0])\n non_zero_idxs = mask.nonzero()\n nr_blocks = non_zero_idxs.shape[0]\n\n offsets = torch.stack(\n [\n torch.arange(self.block_size).view(-1, 1).expand(self.block_size, self.block_size).reshape(-1),\n # - left_padding,\n torch.arange(self.block_size).repeat(self.block_size), # - left_padding\n ]\n ).t().to(device)\n offsets = torch.cat((torch.zeros(self.block_size ** 2, 2).to(device).long(), offsets.long()), 1)\n\n if nr_blocks > 0:\n non_zero_idxs = non_zero_idxs.repeat(self.block_size ** 2, 1)\n offsets = offsets.repeat(nr_blocks, 1).view(-1, 4)\n offsets = offsets.long()\n\n block_idxs = non_zero_idxs + offsets\n # block_idxs += left_padding\n padded_mask = F.pad(mask, (left_padding, right_padding, left_padding, right_padding))\n padded_mask[block_idxs[:, 0], block_idxs[:, 1], block_idxs[:, 2], block_idxs[:, 3]] = 1.\n else:\n padded_mask = F.pad(mask, (left_padding, right_padding, left_padding, right_padding))\n\n block_mask = 1 - padded_mask # [:height, :width]\n return block_mask\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, drop_rate=0.0, drop_block=False,\n block_size=1, use_se=False):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.LeakyReLU(0.1)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = conv3x3(planes, planes)\n self.bn3 = nn.BatchNorm2d(planes)\n self.maxpool = nn.MaxPool2d(stride)\n self.downsample = downsample\n self.stride = stride\n self.drop_rate = drop_rate\n self.num_batches_tracked = 0\n self.drop_block = drop_block\n self.block_size = block_size\n self.DropBlock = DropBlock(block_size=self.block_size)\n self.use_se = use_se\n if self.use_se:\n self.se = SELayer(planes, 4)\n\n def forward(self, x):\n self.num_batches_tracked += 1\n\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n if self.use_se:\n out = self.se(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n out += residual\n out = self.relu(out)\n out = self.maxpool(out)\n\n if self.drop_rate > 0:\n if self.drop_block == True:\n feat_size = out.size()[2]\n keep_rate = max(1.0 - self.drop_rate / (20 * 2000) * (self.num_batches_tracked), 1.0 - self.drop_rate)\n gamma = (1 - keep_rate) / self.block_size ** 2 * feat_size ** 2 / (feat_size - self.block_size + 1) ** 2\n out = self.DropBlock(out, gamma=gamma)\n else:\n out = F.dropout(out, p=self.drop_rate, training=self.training, inplace=True)\n\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, block, n_blocks, keep_prob=1.0, avg_pool=False, drop_rate=0.0,\n dropblock_size=5, num_classes=-1, use_se=False):\n super(ResNet, self).__init__()\n\n self.inplanes = 3\n self.use_se = use_se\n self.layer1 = self._make_layer(block, n_blocks[0], 64,\n stride=2, drop_rate=drop_rate)\n self.layer2 = self._make_layer(block, n_blocks[1], 160,\n stride=2, drop_rate=drop_rate)\n self.layer3 = self._make_layer(block, n_blocks[2], 320,\n stride=2, drop_rate=drop_rate, drop_block=True, block_size=dropblock_size)\n self.layer4 = self._make_layer(block, n_blocks[3], 640,\n stride=2, drop_rate=drop_rate, drop_block=True, block_size=dropblock_size)\n if avg_pool:\n # self.avgpool = nn.AvgPool2d(5, stride=1)\n self.avgpool = nn.AdaptiveAvgPool2d(1)\n self.keep_prob = keep_prob\n self.keep_avg_pool = avg_pool\n self.dropout = nn.Dropout(p=1 - self.keep_prob, inplace=False)\n self.drop_rate = drop_rate\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='leaky_relu')\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n self.num_classes = num_classes\n if self.num_classes > 0:\n self.classifier = nn.Linear(640, self.num_classes)\n\n def _make_layer(self, block, n_block, planes, stride=1, drop_rate=0.0, drop_block=False, block_size=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=1, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n if n_block == 1:\n layer = block(self.inplanes, planes, stride, downsample, drop_rate, drop_block, block_size, self.use_se)\n else:\n layer = block(self.inplanes, planes, stride, downsample, drop_rate, self.use_se)\n layers.append(layer)\n self.inplanes = planes * block.expansion\n\n for i in range(1, n_block):\n if i == n_block - 1:\n layer = block(self.inplanes, planes, drop_rate=drop_rate, drop_block=drop_block,\n block_size=block_size, use_se=self.use_se)\n else:\n layer = block(self.inplanes, planes, drop_rate=drop_rate, use_se=self.use_se)\n layers.append(layer)\n\n return nn.Sequential(*layers)\n\n def forward(self, x, is_feat=False):\n x = self.layer1(x)\n f0 = x\n x = self.layer2(x)\n f1 = x\n x = self.layer3(x)\n f2 = x\n x = self.layer4(x)\n f3 = x\n if self.keep_avg_pool:\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n feat = x\n if self.num_classes > 0:\n x = self.classifier(x)\n\n if is_feat:\n return [f0, f1, f2, f3, feat], x\n else:\n return x\n\n def get_embedding(self, x):\n [f0, f1, f2, f3, feat], x = self.forward(x, is_feat=True)\n return feat\n\n\ndef resnet12(keep_prob=1.0, avg_pool=False, **kwargs):\n \"\"\"Constructs a ResNet-12 model.\n \"\"\"\n model = ResNet(BasicBlock, [1, 1, 1, 1], keep_prob=keep_prob, avg_pool=avg_pool, **kwargs)\n return model\n\n\ndef resnet18(keep_prob=1.0, avg_pool=False, **kwargs):\n \"\"\"Constructs a ResNet-18 model.\n \"\"\"\n model = ResNet(BasicBlock, [1, 1, 2, 2], keep_prob=keep_prob, avg_pool=avg_pool, **kwargs)\n return model\n\n\ndef resnet24(keep_prob=1.0, avg_pool=False, **kwargs):\n \"\"\"Constructs a ResNet-24 model.\n \"\"\"\n model = ResNet(BasicBlock, [2, 2, 2, 2], keep_prob=keep_prob, avg_pool=avg_pool, **kwargs)\n return model\n\n\ndef resnet50(keep_prob=1.0, avg_pool=False, **kwargs):\n \"\"\"Constructs a ResNet-50 model.\n indeed, only (3 + 4 + 6 + 3) * 3 + 1 = 49 layers\n \"\"\"\n model = ResNet(BasicBlock, [3, 4, 6, 3], keep_prob=keep_prob, avg_pool=avg_pool, **kwargs)\n return model\n\n\ndef resnet101(keep_prob=1.0, avg_pool=False, **kwargs):\n \"\"\"Constructs a ResNet-101 model.\n indeed, only (3 + 4 + 23 + 3) * 3 + 1 = 100 layers\n \"\"\"\n model = ResNet(BasicBlock, [3, 4, 23, 3], keep_prob=keep_prob, avg_pool=avg_pool, **kwargs)\n return model\n\n\ndef seresnet12(keep_prob=1.0, avg_pool=False, **kwargs):\n \"\"\"Constructs a ResNet-12 model.\n \"\"\"\n model = ResNet(BasicBlock, [1, 1, 1, 1], keep_prob=keep_prob, avg_pool=avg_pool, use_se=True, **kwargs)\n return model\n\n\ndef seresnet18(keep_prob=1.0, avg_pool=False, **kwargs):\n \"\"\"Constructs a ResNet-18 model.\n \"\"\"\n model = ResNet(BasicBlock, [1, 1, 2, 2], keep_prob=keep_prob, avg_pool=avg_pool, use_se=True, **kwargs)\n return model\n\n\ndef seresnet24(keep_prob=1.0, avg_pool=False, **kwargs):\n \"\"\"Constructs a ResNet-24 model.\n \"\"\"\n model = ResNet(BasicBlock, [2, 2, 2, 2], keep_prob=keep_prob, avg_pool=avg_pool, use_se=True, **kwargs)\n return model\n\n\ndef seresnet50(keep_prob=1.0, avg_pool=False, **kwargs):\n \"\"\"Constructs a ResNet-50 model.\n indeed, only (3 + 4 + 6 + 3) * 3 + 1 = 49 layers\n \"\"\"\n model = ResNet(BasicBlock, [3, 4, 6, 3], keep_prob=keep_prob, avg_pool=avg_pool, use_se=True, **kwargs)\n return model\n\n\ndef seresnet101(keep_prob=1.0, avg_pool=False, **kwargs):\n \"\"\"Constructs a ResNet-101 model.\n indeed, only (3 + 4 + 23 + 3) * 3 + 1 = 100 layers\n \"\"\"\n model = ResNet(BasicBlock, [3, 4, 23, 3], keep_prob=keep_prob, avg_pool=avg_pool, use_se=True, **kwargs)\n return model\n\n\nif __name__ == '__main__':\n from types import SimpleNamespace\n # import argparse\n #\n # parser = argparse.ArgumentParser('argument for training')\n # parser.add_argument('--model', type=str, choices=['resnet12', 'resnet18', 'resnet24', 'resnet50', 'resnet101',\n # 'seresnet12', 'seresnet18', 'seresnet24', 'seresnet50',\n # 'seresnet101'])\n # args = parser.parse_args()\n args = SimpleNamespace(model='resnet12')\n args.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n model_dict = {\n 'resnet12': resnet12,\n 'resnet18': resnet18,\n 'resnet24': resnet24,\n 'resnet50': resnet50,\n 'resnet101': resnet101,\n 'seresnet12': seresnet12,\n 'seresnet18': seresnet18,\n 'seresnet24': seresnet24,\n 'seresnet50': seresnet50,\n 'seresnet101': seresnet101,\n }\n\n model = model_dict[args.model](avg_pool=True, drop_rate=0.1, dropblock_size=5, num_classes=64).to(args.device)\n data = torch.randn(2, 3, 84, 84)\n model = model.to(args.device)\n data = data.to(args.device)\n feat, logit = model(data, is_feat=True)\n print(feat[-1].shape)\n print(logit.shape)\n\n print(\"DONE\")\n",
"import torch\nimport torch.nn as nn\n\nfrom collections import OrderedDict\n\n# import torchvision.transforms as transforms\n# from torch.models.custom_layers import Flatten\n\nclass Flatten(nn.Module):\n def forward(self, input):\n batch_size = input.size(0)\n out = input.view(batch_size,-1)\n return out # (batch_size, *size)\n\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nprint(f'{torch.cuda.is_available()}')\nprint(f'Device = {device}')\nassert(torch.cuda.is_available())\n\ndata = torch.randn(2, 3, 84, 84).to(device)\n\nout_features = 64\nmodel = nn.Sequential(OrderedDict([\n ('features', nn.Sequential(OrderedDict([('flatten', Flatten())]))),\n ('cls', torch.nn.Linear(in_features=84 * 84 * 3, out_features=out_features, bias=True))\n]))\nmodel = nn.Sequential(OrderedDict([('model', model)])).to(device)\n\nout = model(data)\n\nprint(out.sum())\nprint('Success! Your code works with gpu')\n\n\n\n\n\n",
"\"\"\"\nUnion of data sets for SL training.\n\"\"\"\nfrom typing import Union\n\nimport torchvision\nfrom torch import Tensor\nfrom torch.utils.data import Dataset\nfrom pathlib import Path\n\nimport torch\n\nfrom task2vec import Task2Vec\nfrom models import get_model\nimport datasets\nimport task_similarity\n\ndef get_datasets(root: Union[str, Path], dataset_names: list[str]) -> list[torchvision.datasets]:\n import datasets\n root: Path = Path(root).expanduser() if isinstance(root, str) else root.expanduser()\n data_sets: list[torchvision.datasets] = [datasets.__dict__[name](root=root)[0] for name in dataset_names]\n return data_sets\n\nclass UnionDatasets(Dataset):\n \"\"\"\n todo:\n - bisect into the right data set\n - make sure we are using the right split\n \"\"\"\n\n def __init__(self, root: Union[str, Path], dataset_names: list[str], split: str):\n root: Path = Path(root).expanduser() if isinstance(root, str) else root.expanduser()\n # - set fields\n self.root: Path = root\n self.dataset_names: list[str] = dataset_names\n self.split\n # - get data sets\n self.data_sets: list[torchvision.datasets] = get_datasets(dataset_names, root)\n\n def __len__(self):\n total_numer_of_data_examples: int = sum([len(dataset) for dataset in self.data_sets])\n return total_numer_of_data_examples\n\n def __getitem__(self, idx: int):\n pass\n\n# - tests\n\ndef go_through_hdml1_test():\n # - get data set list\n # dataset_names = ('stl10', 'mnist', 'cifar10', 'cifar100', 'letters', 'kmnist')\n # dataset_names = ('mnist',)\n dataset_names = ('stl10', 'letters', 'kmnist')\n root: Path = Path('~/data').expanduser()\n print(f'{root=}')\n dataset_list: list[torchvision.datasets] = [datasets.__dict__[name](root=root)[0] for name in dataset_names]\n print(f'{dataset_list=}')\n device = torch.device(f\"cuda:{0}\" if torch.cuda.is_available() else \"cpu\")\n print(f'{device=}')\n\n # - get union data loader\n union_datasets: UnionDatasets = UnionDatasets(root, dataset_names)\n\n # - go through the union data loader\n\n\nif __name__ == '__main__':\n go_through_hdml1_test()\n print('Done!\\n\\a')\n"
] |
[
[
"torch.nn.Linear",
"torch.nn.BatchNorm2d",
"torch.nn.LeakyReLU",
"torch.nn.init.kaiming_normal_",
"torch.distributions.Bernoulli",
"torch.cuda.is_available",
"torch.nn.functional.pad",
"torch.nn.MaxPool2d",
"torch.nn.init.constant_",
"torch.zeros",
"torch.nn.Sequential",
"torch.nn.functional.dropout",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.Dropout",
"torch.arange",
"torch.nn.Sigmoid",
"torch.nn.AdaptiveAvgPool2d",
"torch.randn"
],
[
"torch.nn.Linear",
"torch.cuda.is_available",
"torch.randn"
],
[
"torch.cuda.is_available"
]
] |
jwmueller/autogluon-benchmarking
|
[
"28f35188a65c5fb37d4950fa9657ea84c9163049"
] |
[
"autogluon_utils/benchmarking/evaluation/tex_table.py"
] |
[
"import pandas as pd\nimport numpy as np\n\n# Example usage: x = tex_table(df, textable_file=\"testlatextable.txt\", bold='min')\n\ndef tex_table(df, textable_file, bold = None, nan_char = \" x \", max_digits = 4):\n \"\"\" This function is only intended for fully numerical tables (dataset x frameworks comparison).\n Datasets should be row indices of df rather than a column.\n Args:\n df = DataFrame\n textable_file = path to output file\n bold = 'min' or = 'max' (if df only contains numbers), or = None for no bolding.\n nan_char replaces NaN in LaTex table\n max_digits = Maximum number of digits to show in each cell. \n \"\"\"\n if bold is not None:\n if bold == 'min':\n best_row_vals = df.min(axis=1)\n # best_cols = df.idxmin(axis=1)\n elif bold == 'max':\n best_row_vals = df.max(axis=1)\n # best_cols = df.idxmax(axis=0)\n else:\n raise ValueError(\"unknown bold option\")\n best_cols = []\n for i in df.index:\n row_best_cols = list(df.columns[np.abs(df.loc[i] - best_row_vals[i]) < 1e-5])\n best_cols.append(row_best_cols)\n if len(row_best_cols) <= 0:\n raise ValueError(\"no row value matches best row value\")\n \n # SHIFT_FACTOR = 100\n # df = df * SHIFT_FACTOR\n # df = df.round(num_decimals)\n # df = df / SHIFT_FACTOR\n max_int = int(df.max(numeric_only=True).max())\n max_digits = max(max_digits, len(str(max_int))) # make sure we don't truncate values before decimal\n df = df.astype('str')\n df = df.replace(\"nan\", nan_char)\n df = df.applymap(lambda x: x[:max_digits])\n \n print(df.columns)\n if bold is not None:\n ind = 0\n for i in df.index: # bold best value:\n if len(best_cols[ind]) > 0:\n for col_name in best_cols[ind]:\n df.at[i,col_name] = \"\\\\textbf{\" + df.at[i,col_name] + \"}\"\n ind += 1\n \n df.reset_index(inplace=True) # set dataset indices as first column\n df.rename(columns={'dataset':'Dataset'}, inplace=True)\n cols = list(df.columns)\n df.columns = ['\\\\textbf{'+col+'}' for col in cols]\n textab = df.to_latex(escape=True, index=False, column_format = 'l'+'c'*(len(df.columns)-1))\n textab = textab.replace(\"\\\\textbackslash textbf\", \"\\\\textbf\")\n textab = textab.replace(\"\\\\{\", \"{\")\n textab = textab.replace(\"\\\\}\", \"}\")\n \n with open(textable_file,'w') as tf:\n tf.write(textab)\n print(\"saved tex table to: %s\" % textable_file)\n return textab\n\n"
] |
[
[
"numpy.abs"
]
] |
peiyong86/FATE
|
[
"efae2b1add20d9f98ac05a669298e36369f91497"
] |
[
"federatedml/linear_model/linear_model_weight.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#\n# Copyright 2019 The FATE Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport numpy as np\n\nfrom federatedml.framework.weights import ListWeights, TransferableWeights\n\n\nclass LinearModelWeights(ListWeights):\n def __init__(self, l, fit_intercept):\n super().__init__(l)\n self.fit_intercept = fit_intercept\n\n def for_remote(self):\n return TransferableWeights(self._weights, self.__class__, self.fit_intercept)\n\n @property\n def coef_(self):\n if self.fit_intercept:\n return np.array(self._weights[:-1])\n return np.array(self._weights)\n\n @property\n def intercept_(self):\n if self.fit_intercept:\n return self._weights[-1]\n return 0.0\n"
] |
[
[
"numpy.array"
]
] |
NoldAndreas/FINDER
|
[
"a3d947c5d59a7cd6e54400b0e9aeb9e111689976"
] |
[
"Code/Geometry_Base.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 20 14:51:07 2020\n\n@author: andreas\n\"\"\"\n\nimport numpy as np\nfrom abc import abstractmethod\n#import pickle\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport scipy.spatial.distance as dist\nimport glob\n\n#Geometry base class\n\nclass Geometry_Base:\n \n @abstractmethod\n def __init__(self,basefolder,unitCluster_Library):\n \n #Values that have to be set in child classes:\n self.basefolder = basefolder;\n self.geometry_name = [];\n self.XC = [];\n self.N_clusters = [];\n self.N_Noise = [];\n self.labels_groundtruth = [];\n self.parameters = [];\n self.unitCluster_Library = unitCluster_Library;\n self.seed = [];\n\n self.__loadUnitClusters();\n \n #Methods that need to be set in child classes:\n @abstractmethod \n def GeneratePoints(self):\n yield None\n \n def __loadUnitClusters(self): \n data_template_clusters = [];\n folder_ = self.unitCluster_Library;\n\n filenamesList = glob.glob(self.basefolder +'TemplateClusters/'+ folder_+'/cluster_*.txt');\n for fn in filenamesList:\n XS_i = np.loadtxt(fn, comments=\"#\", delimiter=\" \", unpack=False);\n data_template_clusters.append(XS_i); \n print(\"Loaded \"+str(len(data_template_clusters))+\" template clusters..\");\n\n #In the input data, \n #1 unit corresponds to 158nm. We normalize such that 1 unit = 1 nanometer\n datascale = 158;\n for i,X_cl in enumerate(data_template_clusters):\n data_template_clusters[i] = datascale*(X_cl - np.mean(X_cl,axis=0));\n \n self.template_clusters = data_template_clusters;\n \n def PlotScatter(self,filename):\n \n labels = self.labels_groundtruth;\n XC = self.XC;\n \n plt.figure();\n mark = (labels==-1);\n sns.scatterplot(x=XC[mark,0],y=XC[mark,1],color='grey',alpha=0.2);\n mark = (labels>=0);\n sns.scatterplot(x=XC[mark,0],y=XC[mark,1],hue=labels[mark],palette='bright',legend=False);\n plt.axis('equal')\n plt.savefig(filename);\n \n def GetTypicalDiameter_of_templateClusters(self):\n D_ = 0; \n for cl in self.template_clusters:\n d = np.max(dist.pdist(cl));\n if(d>D_):\n D_ = d;\n return D_\n \n def GetTypical_Number_of_points_templateClusters(self):\n Ns = [len(cl) for cl in self.template_clusters]; \n \n return np.mean(np.asarray(Ns));"
] |
[
[
"scipy.spatial.distance.pdist",
"numpy.asarray",
"matplotlib.pyplot.savefig",
"numpy.mean",
"matplotlib.pyplot.figure",
"numpy.loadtxt",
"matplotlib.pyplot.axis"
]
] |
fzfs/Multi-view-Chest-X-ray-Classification
|
[
"156149829629586d5a8d7946fc710b3b2dec1020"
] |
[
"Resnet.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass ResNet(nn.Module):\n\n def __init__(self, block, layers, view=4, num_classes=13, img_size = 224):\n\n self.inplanes = 64\n super(ResNet, self).__init__()\n c = 1\n if view == 4:\n c = 2\n \n self.conv1 = nn.Conv2d(c, 64, kernel_size=7, stride=2, padding=3,bias=False) \n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n self.avgpool = nn.AdaptiveAvgPool2d(1)\n self.fc = nn.Linear(512 * block.expansion, num_classes)\n \n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n x = self.avgpool(x)\n x = x.view(x.size(0), -1) \n x = self.fc(x)\n\n return x\n \nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, reduction=16, k_size=9):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out"
] |
[
[
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.Sequential",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.AdaptiveAvgPool2d"
]
] |
INF800/CLIP-rsicd
|
[
"80eb09a71b4ab8a4566eeaa72ec8890630c0d7ee"
] |
[
"utils/data.py"
] |
[
"import torch\nfrom torchvision.datasets import VisionDataset\nfrom torchvision.io import ImageReadMode, read_image\nfrom torchvision.transforms import (\n # added for image augmentation\n ToPILImage,\n RandomCrop,\n ColorJitter,\n RandomHorizontalFlip,\n RandomVerticalFlip,\n RandomResizedCrop,\n ToTensor,\n # /added for image augmentation\n CenterCrop, \n ConvertImageDtype, \n Normalize, \n Resize\n)\nfrom torchvision.transforms.functional import InterpolationMode\n\nimport jsonlines\nfrom pathlib import Path\nfrom typing import Optional, Callable\n\n\n# adopted form https://github.com/huggingface/transformers/blob/master/examples/research_projects/jax-projects/hybrid_clip/run_hybrid_clip.py\nclass Transform(torch.nn.Module):\n def __init__(self, image_size, augment_images, augmentation_args):\n super().__init__()\n if augment_images:\n crop_size = int(image_size * 0.8)\n self.transforms = torch.nn.Sequential(\n # image augmentation transforms\n RandomCrop(crop_size),\n ColorJitter(),\n RandomHorizontalFlip(augmentation_args.random_horizontal_flip),\n RandomVerticalFlip(augmentation_args.random_vertical_flip),\n RandomResizedCrop(crop_size, scale=(0.8, 1.2), ratio=(1.0, 1.0)),\n # /image augmentation transforms\n Resize([image_size], interpolation=InterpolationMode.BICUBIC),\n CenterCrop(image_size),\n ConvertImageDtype(torch.float),\n Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),\n )\n else:\n self.transforms = torch.nn.Sequential(\n Resize([image_size], interpolation=InterpolationMode.BICUBIC),\n CenterCrop(image_size),\n ConvertImageDtype(torch.float),\n Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),\n )\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n with torch.no_grad():\n x = self.transforms(x)\n return x\n\nclass ImageTextDataset(VisionDataset):\n \"\"\"\n Dtaset for loading image-text data for tasks like CLIP training, Image Captioning.\n\n Args:\n root: (string): The root path where the dataset is stored.\n The expected format is jsonlines where each line is a json object containing to keys.\n `filename`: The path to the image.\n `captions`: An `array` of captions.\n split: (string): Dataset split name. Is used for parsing jsonl files from `root` folder.\n captions_per_image: (int): number of captions per image to use. Defaults to 5.\n augment_captions: (bool): If true the jsonl files with `textaug_` prefix are selected from root\n folder. \n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.ToTensor``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n transforms (callable, optional): A function/transform that takes input sample and its target as entry\n and returns a transformed version.\n \"\"\"\n\n def __init__(\n self,\n root: str,\n split: str, \n captions_per_image:int = 5,\n augment_captions:bool = True,\n transform: Optional[Callable] = None,\n target_transform: Optional[Callable] = None,\n transforms: Optional[Callable] = None,\n ):\n super().__init__(root, transforms, transform, target_transform)\n self.root = root\n if augment_captions:\n prefix = \"textaug_\"\n else:\n prefix = \"\"\n filepaths = list(Path(root).glob(f\"{prefix}{split}*.jsonl\"))\n fps_empty_msg = f\"\"\"\\\n The `filepaths` is empty. Please make sure that `root` folder contains jsonl files\n named properly: [textaug_]{split}*.jsonl.\n `textaug_` prefix is expected if `augment_captions` is `True`.\n \"\"\"\n assert len(filepaths) > 0, fps_empty_msg\n \n self.captions = []\n self.image_paths = []\n for count, filepath in enumerate(filepaths):\n with jsonlines.open(filepath, \"r\") as reader:\n for example in reader:\n self.captions.extend(example[\"captions\"][:captions_per_image])\n self.image_paths.extend([example[\"filename\"]] * captions_per_image)\n print(f\"{count+1} input files for {split} split found\")\n \n def _load_image(self, idx: int):\n path = f\"{self.root}/{self.image_paths[idx]}\"\n return read_image(path, mode=ImageReadMode.RGB)\n\n def _load_target(self, idx):\n return self.captions[idx]\n\n def __getitem__(self, index: int):\n image = self._load_image(index)\n target = self._load_target(index)\n\n if self.transforms is not None:\n image, target = self.transforms(image, target)\n\n return image, target\n\n def __len__(self) -> int:\n return len(self.captions)"
] |
[
[
"torch.no_grad"
]
] |
google-research/DBAP-algorithm
|
[
"545a4e780f9d9d480c96b67e7a8ae590a983db6b",
"545a4e780f9d9d480c96b67e7a8ae590a983db6b",
"545a4e780f9d9d480c96b67e7a8ae590a983db6b",
"545a4e780f9d9d480c96b67e7a8ae590a983db6b"
] |
[
"third_party/rlkit_library/rlkit/torch/skewfit/video_gen.py",
"third_party/rlkit_library/rlkit/demos/spacemouse/input_server.py",
"third_party/rlkit_library/rlkit/envs/wrappers/discretize_env.py",
"third_party/rlkit_library/generate_video.py"
] |
[
"# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport os.path as osp\n\nimport uuid\nfrom rlkit.envs.vae_wrapper import VAEWrappedEnv\n\nfilename = str(uuid.uuid4())\n\nimport skvideo.io\nimport numpy as np\nimport time\n\nimport scipy.misc\n\ndef add_border(img, pad_length, pad_color, imsize=84):\n H = 3*imsize\n W = imsize\n img = img.reshape((3*imsize, imsize, -1))\n img2 = np.ones((H + 2 * pad_length, W + 2 * pad_length, img.shape[2]), dtype=np.uint8) * pad_color\n img2[pad_length:-pad_length, pad_length:-pad_length, :] = img\n return img2\n\n\ndef get_image(goal, obs, recon_obs, imsize=84, pad_length=1, pad_color=255):\n if len(goal.shape) == 1:\n goal = goal.reshape(-1, imsize, imsize).transpose(2, 1, 0)\n obs = obs.reshape(-1, imsize, imsize).transpose(2,1,0)\n recon_obs = recon_obs.reshape(-1, imsize, imsize).transpose(2,1,0)\n img = np.concatenate((goal, obs, recon_obs))\n img = np.uint8(255 * img)\n if pad_length > 0:\n img = add_border(img, pad_length, pad_color)\n return img\n\n\ndef dump_video(\n env,\n policy,\n filename,\n rollout_function,\n rows=3,\n columns=6,\n pad_length=0,\n pad_color=255,\n do_timer=True,\n horizon=100,\n dirname_to_save_images=None,\n subdirname=\"rollouts\",\n imsize=84,\n):\n # num_channels = env.vae.input_channels\n num_channels = 1 if env.grayscale else 3\n frames = []\n H = 3*imsize\n W=imsize\n N = rows * columns\n for i in range(N):\n start = time.time()\n path = rollout_function(\n env,\n policy,\n max_path_length=horizon,\n render=False,\n )\n is_vae_env = isinstance(env, VAEWrappedEnv)\n l = []\n for d in path['full_observations']:\n if is_vae_env:\n recon = np.clip(env._reconstruct_img(d['image_observation']), 0, 1)\n else:\n recon = d['image_observation']\n l.append(\n get_image(\n d['image_desired_goal'],\n d['image_observation'],\n recon,\n pad_length=pad_length,\n pad_color=pad_color,\n imsize=imsize,\n )\n )\n frames += l\n\n if dirname_to_save_images:\n rollout_dir = osp.join(dirname_to_save_images, subdirname, str(i))\n os.makedirs(rollout_dir, exist_ok=True)\n rollout_frames = frames[-101:]\n goal_img = np.flip(rollout_frames[0][:imsize, :imsize, :], 0)\n scipy.misc.imsave(rollout_dir+\"/goal.png\", goal_img)\n goal_img = np.flip(rollout_frames[1][:imsize, :imsize, :], 0)\n scipy.misc.imsave(rollout_dir+\"/z_goal.png\", goal_img)\n for j in range(0, 101, 1):\n img = np.flip(rollout_frames[j][imsize:, :imsize, :], 0)\n scipy.misc.imsave(rollout_dir+\"/\"+str(j)+\".png\", img)\n if do_timer:\n print(i, time.time() - start)\n\n frames = np.array(frames, dtype=np.uint8)\n path_length = frames.size // (\n N * (H + 2*pad_length) * (W + 2*pad_length) * num_channels\n )\n frames = np.array(frames, dtype=np.uint8).reshape(\n (N, path_length, H + 2 * pad_length, W + 2 * pad_length, num_channels)\n )\n f1 = []\n for k1 in range(columns):\n f2 = []\n for k2 in range(rows):\n k = k1 * rows + k2\n f2.append(frames[k:k+1, :, :, :, :].reshape(\n (path_length, H + 2 * pad_length, W + 2 * pad_length, num_channels)\n ))\n f1.append(np.concatenate(f2, axis=1))\n outputdata = np.concatenate(f1, axis=2)\n skvideo.io.vwrite(filename, outputdata)\n print(\"Saved video to \", filename)\n",
"# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# first to start the nameserver start: python -m Pyro4.naming\n\nimport Pyro4\nfrom threading import Thread\nimport time\nimport numpy as np\nfrom rlkit.launchers import conf as config\n\nPyro4.config.SERIALIZERS_ACCEPTED = set(['pickle','json', 'marshal', 'serpent'])\nPyro4.config.SERIALIZER='pickle'\n\ndevice_state = None\n\[email protected]\nclass DeviceState(object):\n state = None\n\n def get_state(self):\n return device_state\n\n def set_state(self, state):\n global device_state\n device_state = state\n\nclass SpaceMouseExpert:\n def __init__(\n self,\n xyz_dims=3,\n xyz_remap=[0, 1, 2],\n xyz_scale=[1, 1, 1],\n xyz_abs_threshold=0.0,\n rot_dims=3,\n rot_remap=[0, 1, 2],\n rot_scale=[1, 1, 1],\n rot_abs_threshold=0.0,\n rot_discrete=False,\n min_clip=-np.inf,\n max_clip=np.inf\n ):\n \"\"\"TODO: fill in other params\"\"\"\n self.xyz_dims = xyz_dims\n self.xyz_remap = np.array(xyz_remap)\n self.xyz_scale = np.array(xyz_scale)\n self.xyz_abs_threshold = xyz_abs_threshold\n self.rot_dims = rot_dims\n self.rot_remap = rot_remap\n self.rot_scale = rot_scale\n self.rot_abs_threshold = rot_abs_threshold\n self.rot_discrete = rot_discrete\n self.min_clip = min_clip\n self.max_clip = max_clip\n self.thread = Thread(target = start_server)\n self.thread.daemon = True\n self.thread.start()\n self.device_state = DeviceState()\n\n def get_action(self, obs):\n \"\"\"Must return (action, valid, reset, accept)\"\"\"\n state = self.device_state.get_state()\n # time.sleep(0.1)\n if state is None:\n return None, False, False, False\n\n dpos, rotation, roll, pitch, yaw, accept, reset = (\n state[\"dpos\"],\n state[\"rotation\"],\n state[\"roll\"],\n state[\"pitch\"],\n state[\"yaw\"],\n state[\"grasp\"], #[\"left_click\"],\n state[\"reset\"], #[\"right_click\"],\n )\n\n xyz = dpos[self.xyz_remap]\n xyz[np.abs(xyz) < self.xyz_abs_threshold] = 0.0\n xyz = xyz * self.xyz_scale\n xyz = np.clip(xyz, self.min_clip, self.max_clip)\n\n rot = np.array([roll, pitch, yaw])\n rot[np.abs(rot) < self.rot_abs_threshold] = 0.0\n if self.rot_discrete:\n max_i = np.argmax(np.abs(rot))\n for i in range(len(rot)):\n if i != max_i:\n rot[i] = 0.0\n rot = rot * self.rot_scale\n rot = np.clip(rot, self.min_clip, self.max_clip)\n\n a = np.concatenate([xyz[:self.xyz_dims], rot[:self.rot_dims]])\n\n valid = not np.all(np.isclose(a, 0))\n\n # print(a, roll, pitch, yaw, valid)\n\n return (a, valid, reset, accept)\n\n\ndef start_server():\n daemon = Pyro4.Daemon(config.SPACEMOUSE_HOSTNAME)\n ns = Pyro4.locateNS() # find the name server\n uri = daemon.register(DeviceState) # register the greeting maker as a Pyro object\n ns.register(\"example.greeting\", uri) # register the object with a name in the name server\n print(\"uri:\", uri)\n print(\"Server ready.\")\n daemon.requestLoop() # start the event loop of the server to wait for calls\n\nif __name__ == \"__main__\":\n expert = SpaceMouseExpert()\n\n for i in range(100):\n time.sleep(1)\n print(expert.get_action(None))\n",
"# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport itertools\n\nimport numpy as np\nfrom gym import Env\nfrom gym.spaces import Discrete\n\nfrom rlkit.envs.proxy_env import ProxyEnv\n\n\nclass DiscretizeEnv(ProxyEnv, Env):\n def __init__(self, wrapped_env, num_bins):\n super().__init__(wrapped_env)\n low = self.wrapped_env.action_space.low\n high = self.wrapped_env.action_space.high\n action_ranges = [\n np.linspace(low[i], high[i], num_bins)\n for i in range(len(low))\n ]\n self.idx_to_continuous_action = [\n np.array(x) for x in itertools.product(*action_ranges)\n ]\n self.action_space = Discrete(len(self.idx_to_continuous_action))\n\n def step(self, action):\n continuous_action = self.idx_to_continuous_action[action]\n return super().step(continuous_action)\n\n\n",
"# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom PIL import Image\nimport glob\nimport os\nimport numpy as np\nimport skvideo.io\nimage_list_expl = []\nimage_list_eval = []\nimport glob\nimport os\n# path = \"/usr/local/google/home/abhishekunique/sim_franka/rlkit/20201230-07h36m-lowervariance-targeteddata-backforth/test-4/test-4/12-30-dev-example-awac-script/12-30-dev-example-awac-script_2020_12_30_15_38_43_0000--s-76665/*.png\"\npath = \"/usr/local/google/home/abhishekunique/sim_franka/rlkit/20201230-07h36m-lowervariance-targeteddata-backforth/test-36/test-36/12-30-dev-example-awac-script/12-30-dev-example-awac-script_2020_12_30_15_38_42_0000--s-7314/*.png\"\nfiles = glob.glob(path)\nfiles.sort(key=os.path.getmtime)\nfor filename in files: #assuming gif\n im=Image.open(filename)\n if 'expl' in filename:\n image_list_expl.append(np.array(im))\n elif 'eval' in filename:\n image_list_eval.append(np.array(im))\nskvideo.io.vwrite('eval_vid.mp4', np.array(image_list_eval))\nskvideo.io.vwrite('expl_vid.mp4', np.array(image_list_expl))\n\n"
] |
[
[
"numpy.concatenate",
"numpy.array",
"numpy.uint8",
"numpy.ones",
"numpy.flip"
],
[
"numpy.concatenate",
"numpy.array",
"numpy.isclose",
"numpy.abs",
"numpy.clip"
],
[
"numpy.array",
"numpy.linspace"
],
[
"numpy.array"
]
] |
Ennosigaeon/scipy
|
[
"2d872f7cf2098031b9be863ec25e366a550b229c",
"2d872f7cf2098031b9be863ec25e366a550b229c",
"2d872f7cf2098031b9be863ec25e366a550b229c",
"2d872f7cf2098031b9be863ec25e366a550b229c",
"2d872f7cf2098031b9be863ec25e366a550b229c",
"2d872f7cf2098031b9be863ec25e366a550b229c"
] |
[
"scipy/sparse/bsr.py",
"scipy/fftpack/tests/gendata.py",
"scipy/linalg/decomp_cholesky.py",
"scipy/optimize/_trustregion.py",
"scipy/signal/tests/test_spectral.py",
"scipy/fftpack/helper.py"
] |
[
"\"\"\"Compressed Block Sparse Row matrix format\"\"\"\n\n__docformat__ = \"restructuredtext en\"\n\n__all__ = ['bsr_matrix', 'isspmatrix_bsr']\n\nfrom warnings import warn\n\nimport numpy as np\n\nfrom .data import _data_matrix, _minmax_mixin\nfrom .compressed import _cs_matrix\nfrom .base import isspmatrix, _formats, spmatrix\nfrom .sputils import (isshape, getdtype, getdata, to_native, upcast,\n get_index_dtype, check_shape)\nfrom . import _sparsetools\nfrom ._sparsetools import (bsr_matvec, bsr_matvecs, csr_matmat_maxnnz,\n bsr_matmat, bsr_transpose, bsr_sort_indices,\n bsr_tocsr)\n\n\nclass bsr_matrix(_cs_matrix, _minmax_mixin):\n \"\"\"Block Sparse Row matrix\n\n This can be instantiated in several ways:\n bsr_matrix(D, [blocksize=(R,C)])\n where D is a dense matrix or 2-D ndarray.\n\n bsr_matrix(S, [blocksize=(R,C)])\n with another sparse matrix S (equivalent to S.tobsr())\n\n bsr_matrix((M, N), [blocksize=(R,C), dtype])\n to construct an empty matrix with shape (M, N)\n dtype is optional, defaulting to dtype='d'.\n\n bsr_matrix((data, ij), [blocksize=(R,C), shape=(M, N)])\n where ``data`` and ``ij`` satisfy ``a[ij[0, k], ij[1, k]] = data[k]``\n\n bsr_matrix((data, indices, indptr), [shape=(M, N)])\n is the standard BSR representation where the block column\n indices for row i are stored in ``indices[indptr[i]:indptr[i+1]]``\n and their corresponding block values are stored in\n ``data[ indptr[i]: indptr[i+1] ]``. If the shape parameter is not\n supplied, the matrix dimensions are inferred from the index arrays.\n\n Attributes\n ----------\n dtype : dtype\n Data type of the matrix\n shape : 2-tuple\n Shape of the matrix\n ndim : int\n Number of dimensions (this is always 2)\n nnz\n Number of stored values, including explicit zeros\n data\n Data array of the matrix\n indices\n BSR format index array\n indptr\n BSR format index pointer array\n blocksize\n Block size of the matrix\n has_sorted_indices\n Whether indices are sorted\n\n Notes\n -----\n Sparse matrices can be used in arithmetic operations: they support\n addition, subtraction, multiplication, division, and matrix power.\n\n **Summary of BSR format**\n\n The Block Compressed Row (BSR) format is very similar to the Compressed\n Sparse Row (CSR) format. BSR is appropriate for sparse matrices with dense\n sub matrices like the last example below. Block matrices often arise in\n vector-valued finite element discretizations. In such cases, BSR is\n considerably more efficient than CSR and CSC for many sparse arithmetic\n operations.\n\n **Blocksize**\n\n The blocksize (R,C) must evenly divide the shape of the matrix (M,N).\n That is, R and C must satisfy the relationship ``M % R = 0`` and\n ``N % C = 0``.\n\n If no blocksize is specified, a simple heuristic is applied to determine\n an appropriate blocksize.\n\n Examples\n --------\n >>> from scipy.sparse import bsr_matrix\n >>> bsr_matrix((3, 4), dtype=np.int8).toarray()\n array([[0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0]], dtype=int8)\n\n >>> row = np.array([0, 0, 1, 2, 2, 2])\n >>> col = np.array([0, 2, 2, 0, 1, 2])\n >>> data = np.array([1, 2, 3 ,4, 5, 6])\n >>> bsr_matrix((data, (row, col)), shape=(3, 3)).toarray()\n array([[1, 0, 2],\n [0, 0, 3],\n [4, 5, 6]])\n\n >>> indptr = np.array([0, 2, 3, 6])\n >>> indices = np.array([0, 2, 2, 0, 1, 2])\n >>> data = np.array([1, 2, 3, 4, 5, 6]).repeat(4).reshape(6, 2, 2)\n >>> bsr_matrix((data,indices,indptr), shape=(6, 6)).toarray()\n array([[1, 1, 0, 0, 2, 2],\n [1, 1, 0, 0, 2, 2],\n [0, 0, 0, 0, 3, 3],\n [0, 0, 0, 0, 3, 3],\n [4, 4, 5, 5, 6, 6],\n [4, 4, 5, 5, 6, 6]])\n\n \"\"\"\n format = 'bsr'\n\n def __init__(self, arg1, shape=None, dtype=None, copy=False, blocksize=None):\n _data_matrix.__init__(self)\n\n if isspmatrix(arg1):\n if isspmatrix_bsr(arg1) and copy:\n arg1 = arg1.copy()\n else:\n arg1 = arg1.tobsr(blocksize=blocksize)\n self._set_self(arg1)\n\n elif isinstance(arg1,tuple):\n if isshape(arg1):\n # it's a tuple of matrix dimensions (M,N)\n self._shape = check_shape(arg1)\n M,N = self.shape\n # process blocksize\n if blocksize is None:\n blocksize = (1,1)\n else:\n if not isshape(blocksize):\n raise ValueError('invalid blocksize=%s' % blocksize)\n blocksize = tuple(blocksize)\n self.data = np.zeros((0,) + blocksize, getdtype(dtype, default=float))\n\n R,C = blocksize\n if (M % R) != 0 or (N % C) != 0:\n raise ValueError('shape must be multiple of blocksize')\n\n # Select index dtype large enough to pass array and\n # scalar parameters to sparsetools\n idx_dtype = get_index_dtype(maxval=max(M//R, N//C, R, C))\n self.indices = np.zeros(0, dtype=idx_dtype)\n self.indptr = np.zeros(M//R + 1, dtype=idx_dtype)\n\n elif len(arg1) == 2:\n # (data,(row,col)) format\n from .coo import coo_matrix\n self._set_self(\n coo_matrix(arg1, dtype=dtype, shape=shape).tobsr(\n blocksize=blocksize\n )\n )\n\n elif len(arg1) == 3:\n # (data,indices,indptr) format\n (data, indices, indptr) = arg1\n\n # Select index dtype large enough to pass array and\n # scalar parameters to sparsetools\n maxval = 1\n if shape is not None:\n maxval = max(shape)\n if blocksize is not None:\n maxval = max(maxval, max(blocksize))\n idx_dtype = get_index_dtype((indices, indptr), maxval=maxval,\n check_contents=True)\n self.indices = np.array(indices, copy=copy, dtype=idx_dtype)\n self.indptr = np.array(indptr, copy=copy, dtype=idx_dtype)\n self.data = getdata(data, copy=copy, dtype=dtype)\n if self.data.ndim != 3:\n raise ValueError(\n 'BSR data must be 3-dimensional, got shape=%s' % (\n self.data.shape,))\n if blocksize is not None:\n if not isshape(blocksize):\n raise ValueError('invalid blocksize=%s' % (blocksize,))\n if tuple(blocksize) != self.data.shape[1:]:\n raise ValueError('mismatching blocksize=%s vs %s' % (\n blocksize, self.data.shape[1:]))\n else:\n raise ValueError('unrecognized bsr_matrix constructor usage')\n else:\n # must be dense\n try:\n arg1 = np.asarray(arg1)\n except Exception as e:\n raise ValueError(\"unrecognized form for\"\n \" %s_matrix constructor\" % self.format) from e\n from .coo import coo_matrix\n arg1 = coo_matrix(arg1, dtype=dtype).tobsr(blocksize=blocksize)\n self._set_self(arg1)\n\n if shape is not None:\n self._shape = check_shape(shape)\n else:\n if self.shape is None:\n # shape not already set, try to infer dimensions\n try:\n M = len(self.indptr) - 1\n N = self.indices.max() + 1\n except Exception as e:\n raise ValueError('unable to infer matrix dimensions') from e\n else:\n R,C = self.blocksize\n self._shape = check_shape((M*R,N*C))\n\n if self.shape is None:\n if shape is None:\n # TODO infer shape here\n raise ValueError('need to infer shape')\n else:\n self._shape = check_shape(shape)\n\n if dtype is not None:\n self.data = self.data.astype(dtype, copy=False)\n\n self.check_format(full_check=False)\n\n def check_format(self, full_check=True):\n \"\"\"check whether the matrix format is valid\n\n *Parameters*:\n full_check:\n True - rigorous check, O(N) operations : default\n False - basic check, O(1) operations\n\n \"\"\"\n M,N = self.shape\n R,C = self.blocksize\n\n # index arrays should have integer data types\n if self.indptr.dtype.kind != 'i':\n warn(\"indptr array has non-integer dtype (%s)\"\n % self.indptr.dtype.name)\n if self.indices.dtype.kind != 'i':\n warn(\"indices array has non-integer dtype (%s)\"\n % self.indices.dtype.name)\n\n idx_dtype = get_index_dtype((self.indices, self.indptr))\n self.indptr = np.asarray(self.indptr, dtype=idx_dtype)\n self.indices = np.asarray(self.indices, dtype=idx_dtype)\n self.data = to_native(self.data)\n\n # check array shapes\n if self.indices.ndim != 1 or self.indptr.ndim != 1:\n raise ValueError(\"indices, and indptr should be 1-D\")\n if self.data.ndim != 3:\n raise ValueError(\"data should be 3-D\")\n\n # check index pointer\n if (len(self.indptr) != M//R + 1):\n raise ValueError(\"index pointer size (%d) should be (%d)\" %\n (len(self.indptr), M//R + 1))\n if (self.indptr[0] != 0):\n raise ValueError(\"index pointer should start with 0\")\n\n # check index and data arrays\n if (len(self.indices) != len(self.data)):\n raise ValueError(\"indices and data should have the same size\")\n if (self.indptr[-1] > len(self.indices)):\n raise ValueError(\"Last value of index pointer should be less than \"\n \"the size of index and data arrays\")\n\n self.prune()\n\n if full_check:\n # check format validity (more expensive)\n if self.nnz > 0:\n if self.indices.max() >= N//C:\n raise ValueError(\"column index values must be < %d (now max %d)\" % (N//C, self.indices.max()))\n if self.indices.min() < 0:\n raise ValueError(\"column index values must be >= 0\")\n if np.diff(self.indptr).min() < 0:\n raise ValueError(\"index pointer values must form a \"\n \"non-decreasing sequence\")\n\n # if not self.has_sorted_indices():\n # warn('Indices were not in sorted order. Sorting indices.')\n # self.sort_indices(check_first=False)\n\n def _get_blocksize(self):\n return self.data.shape[1:]\n blocksize = property(fget=_get_blocksize)\n\n def getnnz(self, axis=None):\n if axis is not None:\n raise NotImplementedError(\"getnnz over an axis is not implemented \"\n \"for BSR format\")\n R,C = self.blocksize\n return int(self.indptr[-1] * R * C)\n\n getnnz.__doc__ = spmatrix.getnnz.__doc__\n\n def __repr__(self):\n format = _formats[self.getformat()][1]\n return (\"<%dx%d sparse matrix of type '%s'\\n\"\n \"\\twith %d stored elements (blocksize = %dx%d) in %s format>\" %\n (self.shape + (self.dtype.type, self.nnz) + self.blocksize +\n (format,)))\n\n def diagonal(self, k=0):\n rows, cols = self.shape\n if k <= -rows or k >= cols:\n return np.empty(0, dtype=self.data.dtype)\n R, C = self.blocksize\n y = np.zeros(min(rows + min(k, 0), cols - max(k, 0)),\n dtype=upcast(self.dtype))\n _sparsetools.bsr_diagonal(k, rows // R, cols // C, R, C,\n self.indptr, self.indices,\n np.ravel(self.data), y)\n return y\n\n diagonal.__doc__ = spmatrix.diagonal.__doc__\n\n ##########################\n # NotImplemented methods #\n ##########################\n\n def __getitem__(self,key):\n raise NotImplementedError\n\n def __setitem__(self,key,val):\n raise NotImplementedError\n\n ######################\n # Arithmetic methods #\n ######################\n\n @np.deprecate(message=\"BSR matvec is deprecated in SciPy 0.19.0. \"\n \"Use * operator instead.\")\n def matvec(self, other):\n \"\"\"Multiply matrix by vector.\"\"\"\n return self * other\n\n @np.deprecate(message=\"BSR matmat is deprecated in SciPy 0.19.0. \"\n \"Use * operator instead.\")\n def matmat(self, other):\n \"\"\"Multiply this sparse matrix by other matrix.\"\"\"\n return self * other\n\n def _add_dense(self, other):\n return self.tocoo(copy=False)._add_dense(other)\n\n def _mul_vector(self, other):\n M,N = self.shape\n R,C = self.blocksize\n\n result = np.zeros(self.shape[0], dtype=upcast(self.dtype, other.dtype))\n\n bsr_matvec(M//R, N//C, R, C,\n self.indptr, self.indices, self.data.ravel(),\n other, result)\n\n return result\n\n def _mul_multivector(self,other):\n R,C = self.blocksize\n M,N = self.shape\n n_vecs = other.shape[1] # number of column vectors\n\n result = np.zeros((M,n_vecs), dtype=upcast(self.dtype,other.dtype))\n\n bsr_matvecs(M//R, N//C, n_vecs, R, C,\n self.indptr, self.indices, self.data.ravel(),\n other.ravel(), result.ravel())\n\n return result\n\n def _mul_sparse_matrix(self, other):\n M, K1 = self.shape\n K2, N = other.shape\n\n R,n = self.blocksize\n\n # convert to this format\n if isspmatrix_bsr(other):\n C = other.blocksize[1]\n else:\n C = 1\n\n from .csr import isspmatrix_csr\n\n if isspmatrix_csr(other) and n == 1:\n other = other.tobsr(blocksize=(n,C), copy=False) # lightweight conversion\n else:\n other = other.tobsr(blocksize=(n,C))\n\n idx_dtype = get_index_dtype((self.indptr, self.indices,\n other.indptr, other.indices))\n\n bnnz = csr_matmat_maxnnz(M//R, N//C,\n self.indptr.astype(idx_dtype),\n self.indices.astype(idx_dtype),\n other.indptr.astype(idx_dtype),\n other.indices.astype(idx_dtype))\n\n idx_dtype = get_index_dtype((self.indptr, self.indices,\n other.indptr, other.indices),\n maxval=bnnz)\n indptr = np.empty(self.indptr.shape, dtype=idx_dtype)\n indices = np.empty(bnnz, dtype=idx_dtype)\n data = np.empty(R*C*bnnz, dtype=upcast(self.dtype,other.dtype))\n\n bsr_matmat(bnnz, M//R, N//C, R, C, n,\n self.indptr.astype(idx_dtype),\n self.indices.astype(idx_dtype),\n np.ravel(self.data),\n other.indptr.astype(idx_dtype),\n other.indices.astype(idx_dtype),\n np.ravel(other.data),\n indptr,\n indices,\n data)\n\n data = data.reshape(-1,R,C)\n\n # TODO eliminate zeros\n\n return bsr_matrix((data,indices,indptr),shape=(M,N),blocksize=(R,C))\n\n ######################\n # Conversion methods #\n ######################\n\n def tobsr(self, blocksize=None, copy=False):\n \"\"\"Convert this matrix into Block Sparse Row Format.\n\n With copy=False, the data/indices may be shared between this\n matrix and the resultant bsr_matrix.\n\n If blocksize=(R, C) is provided, it will be used for determining\n block size of the bsr_matrix.\n \"\"\"\n if blocksize not in [None, self.blocksize]:\n return self.tocsr().tobsr(blocksize=blocksize)\n if copy:\n return self.copy()\n else:\n return self\n\n def tocsr(self, copy=False):\n M, N = self.shape\n R, C = self.blocksize\n nnz = self.nnz\n idx_dtype = get_index_dtype((self.indptr, self.indices),\n maxval=max(nnz, N))\n indptr = np.empty(M + 1, dtype=idx_dtype)\n indices = np.empty(nnz, dtype=idx_dtype)\n data = np.empty(nnz, dtype=upcast(self.dtype))\n\n bsr_tocsr(M // R, # n_brow\n N // C, # n_bcol\n R, C,\n self.indptr.astype(idx_dtype, copy=False),\n self.indices.astype(idx_dtype, copy=False),\n self.data,\n indptr,\n indices,\n data)\n from .csr import csr_matrix\n return csr_matrix((data, indices, indptr), shape=self.shape)\n\n tocsr.__doc__ = spmatrix.tocsr.__doc__\n\n def tocsc(self, copy=False):\n return self.tocsr(copy=False).tocsc(copy=copy)\n\n tocsc.__doc__ = spmatrix.tocsc.__doc__\n\n def tocoo(self, copy=True):\n \"\"\"Convert this matrix to COOrdinate format.\n\n When copy=False the data array will be shared between\n this matrix and the resultant coo_matrix.\n \"\"\"\n\n M,N = self.shape\n R,C = self.blocksize\n\n indptr_diff = np.diff(self.indptr)\n if indptr_diff.dtype.itemsize > np.dtype(np.intp).itemsize:\n # Check for potential overflow\n indptr_diff_limited = indptr_diff.astype(np.intp)\n if np.any(indptr_diff_limited != indptr_diff):\n raise ValueError(\"Matrix too big to convert\")\n indptr_diff = indptr_diff_limited\n\n row = (R * np.arange(M//R)).repeat(indptr_diff)\n row = row.repeat(R*C).reshape(-1,R,C)\n row += np.tile(np.arange(R).reshape(-1,1), (1,C))\n row = row.reshape(-1)\n\n col = (C * self.indices).repeat(R*C).reshape(-1,R,C)\n col += np.tile(np.arange(C), (R,1))\n col = col.reshape(-1)\n\n data = self.data.reshape(-1)\n\n if copy:\n data = data.copy()\n\n from .coo import coo_matrix\n return coo_matrix((data,(row,col)), shape=self.shape)\n\n def toarray(self, order=None, out=None):\n return self.tocoo(copy=False).toarray(order=order, out=out)\n\n toarray.__doc__ = spmatrix.toarray.__doc__\n\n def transpose(self, axes=None, copy=False):\n if axes is not None:\n raise ValueError((\"Sparse matrices do not support \"\n \"an 'axes' parameter because swapping \"\n \"dimensions is the only logical permutation.\"))\n\n R, C = self.blocksize\n M, N = self.shape\n NBLK = self.nnz//(R*C)\n\n if self.nnz == 0:\n return bsr_matrix((N, M), blocksize=(C, R),\n dtype=self.dtype, copy=copy)\n\n indptr = np.empty(N//C + 1, dtype=self.indptr.dtype)\n indices = np.empty(NBLK, dtype=self.indices.dtype)\n data = np.empty((NBLK, C, R), dtype=self.data.dtype)\n\n bsr_transpose(M//R, N//C, R, C,\n self.indptr, self.indices, self.data.ravel(),\n indptr, indices, data.ravel())\n\n return bsr_matrix((data, indices, indptr),\n shape=(N, M), copy=copy)\n\n transpose.__doc__ = spmatrix.transpose.__doc__\n\n ##############################################################\n # methods that examine or modify the internal data structure #\n ##############################################################\n\n def eliminate_zeros(self):\n \"\"\"Remove zero elements in-place.\"\"\"\n\n if not self.nnz:\n return # nothing to do\n\n R,C = self.blocksize\n M,N = self.shape\n\n mask = (self.data != 0).reshape(-1,R*C).sum(axis=1) # nonzero blocks\n\n nonzero_blocks = mask.nonzero()[0]\n\n self.data[:len(nonzero_blocks)] = self.data[nonzero_blocks]\n\n # modifies self.indptr and self.indices *in place*\n _sparsetools.csr_eliminate_zeros(M//R, N//C, self.indptr,\n self.indices, mask)\n self.prune()\n\n def sum_duplicates(self):\n \"\"\"Eliminate duplicate matrix entries by adding them together\n\n The is an *in place* operation\n \"\"\"\n if self.has_canonical_format:\n return\n self.sort_indices()\n R, C = self.blocksize\n M, N = self.shape\n\n # port of _sparsetools.csr_sum_duplicates\n n_row = M // R\n nnz = 0\n row_end = 0\n for i in range(n_row):\n jj = row_end\n row_end = self.indptr[i+1]\n while jj < row_end:\n j = self.indices[jj]\n x = self.data[jj]\n jj += 1\n while jj < row_end and self.indices[jj] == j:\n x += self.data[jj]\n jj += 1\n self.indices[nnz] = j\n self.data[nnz] = x\n nnz += 1\n self.indptr[i+1] = nnz\n\n self.prune() # nnz may have changed\n self.has_canonical_format = True\n\n def sort_indices(self):\n \"\"\"Sort the indices of this matrix *in place*\n \"\"\"\n if self.has_sorted_indices:\n return\n\n R,C = self.blocksize\n M,N = self.shape\n\n bsr_sort_indices(M//R, N//C, R, C, self.indptr, self.indices, self.data.ravel())\n\n self.has_sorted_indices = True\n\n def prune(self):\n \"\"\" Remove empty space after all non-zero elements.\n \"\"\"\n\n R,C = self.blocksize\n M,N = self.shape\n\n if len(self.indptr) != M//R + 1:\n raise ValueError(\"index pointer has invalid length\")\n\n bnnz = self.indptr[-1]\n\n if len(self.indices) < bnnz:\n raise ValueError(\"indices array has too few elements\")\n if len(self.data) < bnnz:\n raise ValueError(\"data array has too few elements\")\n\n self.data = self.data[:bnnz]\n self.indices = self.indices[:bnnz]\n\n # utility functions\n def _binopt(self, other, op, in_shape=None, out_shape=None):\n \"\"\"Apply the binary operation fn to two sparse matrices.\"\"\"\n\n # Ideally we'd take the GCDs of the blocksize dimensions\n # and explode self and other to match.\n other = self.__class__(other, blocksize=self.blocksize)\n\n # e.g. bsr_plus_bsr, etc.\n fn = getattr(_sparsetools, self.format + op + self.format)\n\n R,C = self.blocksize\n\n max_bnnz = len(self.data) + len(other.data)\n idx_dtype = get_index_dtype((self.indptr, self.indices,\n other.indptr, other.indices),\n maxval=max_bnnz)\n indptr = np.empty(self.indptr.shape, dtype=idx_dtype)\n indices = np.empty(max_bnnz, dtype=idx_dtype)\n\n bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_']\n if op in bool_ops:\n data = np.empty(R*C*max_bnnz, dtype=np.bool_)\n else:\n data = np.empty(R*C*max_bnnz, dtype=upcast(self.dtype,other.dtype))\n\n fn(self.shape[0]//R, self.shape[1]//C, R, C,\n self.indptr.astype(idx_dtype),\n self.indices.astype(idx_dtype),\n self.data,\n other.indptr.astype(idx_dtype),\n other.indices.astype(idx_dtype),\n np.ravel(other.data),\n indptr,\n indices,\n data)\n\n actual_bnnz = indptr[-1]\n indices = indices[:actual_bnnz]\n data = data[:R*C*actual_bnnz]\n\n if actual_bnnz < max_bnnz/2:\n indices = indices.copy()\n data = data.copy()\n\n data = data.reshape(-1,R,C)\n\n return self.__class__((data, indices, indptr), shape=self.shape)\n\n # needed by _data_matrix\n def _with_data(self,data,copy=True):\n \"\"\"Returns a matrix with the same sparsity structure as self,\n but with different data. By default the structure arrays\n (i.e. .indptr and .indices) are copied.\n \"\"\"\n if copy:\n return self.__class__((data,self.indices.copy(),self.indptr.copy()),\n shape=self.shape,dtype=data.dtype)\n else:\n return self.__class__((data,self.indices,self.indptr),\n shape=self.shape,dtype=data.dtype)\n\n# # these functions are used by the parent class\n# # to remove redudancy between bsc_matrix and bsr_matrix\n# def _swap(self,x):\n# \"\"\"swap the members of x if this is a column-oriented matrix\n# \"\"\"\n# return (x[0],x[1])\n\n\ndef isspmatrix_bsr(x):\n \"\"\"Is x of a bsr_matrix type?\n\n Parameters\n ----------\n x\n object to check for being a bsr matrix\n\n Returns\n -------\n bool\n True if x is a bsr matrix, False otherwise\n\n Examples\n --------\n >>> from scipy.sparse import bsr_matrix, isspmatrix_bsr\n >>> isspmatrix_bsr(bsr_matrix([[5]]))\n True\n\n >>> from scipy.sparse import bsr_matrix, csr_matrix, isspmatrix_bsr\n >>> isspmatrix_bsr(csr_matrix([[5]]))\n False\n \"\"\"\n return isinstance(x, bsr_matrix)\n",
"import numpy as np\nfrom scipy.io import loadmat\n\nm = loadmat('test.mat', squeeze_me=True, struct_as_record=True,\n mat_dtype=True)\nnp.savez('test.npz', **m)\n",
"\"\"\"Cholesky decomposition functions.\"\"\"\n\nfrom numpy import asarray_chkfinite, asarray, atleast_2d\n\n# Local imports\nfrom .misc import LinAlgError, _datacopied\nfrom .lapack import get_lapack_funcs\n\n__all__ = ['cholesky', 'cho_factor', 'cho_solve', 'cholesky_banded',\n 'cho_solve_banded']\n\n\ndef _cholesky(a, lower=False, overwrite_a=False, clean=True,\n check_finite=True):\n \"\"\"Common code for cholesky() and cho_factor().\"\"\"\n\n a1 = asarray_chkfinite(a) if check_finite else asarray(a)\n a1 = atleast_2d(a1)\n\n # Dimension check\n if a1.ndim != 2:\n raise ValueError('Input array needs to be 2D but received '\n 'a {}d-array.'.format(a1.ndim))\n # Squareness check\n if a1.shape[0] != a1.shape[1]:\n raise ValueError('Input array is expected to be square but has '\n 'the shape: {}.'.format(a1.shape))\n\n # Quick return for square empty array\n if a1.size == 0:\n return a1.copy(), lower\n\n overwrite_a = overwrite_a or _datacopied(a1, a)\n potrf, = get_lapack_funcs(('potrf',), (a1,))\n c, info = potrf(a1, lower=lower, overwrite_a=overwrite_a, clean=clean)\n if info > 0:\n raise LinAlgError(\"%d-th leading minor of the array is not positive \"\n \"definite\" % info)\n if info < 0:\n raise ValueError('LAPACK reported an illegal value in {}-th argument'\n 'on entry to \"POTRF\".'.format(-info))\n return c, lower\n\n\ndef cholesky(a, lower=False, overwrite_a=False, check_finite=True):\n \"\"\"\n Compute the Cholesky decomposition of a matrix.\n\n Returns the Cholesky decomposition, :math:`A = L L^*` or\n :math:`A = U^* U` of a Hermitian positive-definite matrix A.\n\n Parameters\n ----------\n a : (M, M) array_like\n Matrix to be decomposed\n lower : bool, optional\n Whether to compute the upper- or lower-triangular Cholesky\n factorization. Default is upper-triangular.\n overwrite_a : bool, optional\n Whether to overwrite data in `a` (may improve performance).\n check_finite : bool, optional\n Whether to check that the input matrix contains only finite numbers.\n Disabling may give a performance gain, but may result in problems\n (crashes, non-termination) if the inputs do contain infinities or NaNs.\n\n Returns\n -------\n c : (M, M) ndarray\n Upper- or lower-triangular Cholesky factor of `a`.\n\n Raises\n ------\n LinAlgError : if decomposition fails.\n\n Examples\n --------\n >>> from scipy.linalg import cholesky\n >>> a = np.array([[1,-2j],[2j,5]])\n >>> L = cholesky(a, lower=True)\n >>> L\n array([[ 1.+0.j, 0.+0.j],\n [ 0.+2.j, 1.+0.j]])\n >>> L @ L.T.conj()\n array([[ 1.+0.j, 0.-2.j],\n [ 0.+2.j, 5.+0.j]])\n\n \"\"\"\n c, lower = _cholesky(a, lower=lower, overwrite_a=overwrite_a, clean=True,\n check_finite=check_finite)\n return c\n\n\ndef cho_factor(a, lower=False, overwrite_a=False, check_finite=True):\n \"\"\"\n Compute the Cholesky decomposition of a matrix, to use in cho_solve\n\n Returns a matrix containing the Cholesky decomposition,\n ``A = L L*`` or ``A = U* U`` of a Hermitian positive-definite matrix `a`.\n The return value can be directly used as the first parameter to cho_solve.\n\n .. warning::\n The returned matrix also contains random data in the entries not\n used by the Cholesky decomposition. If you need to zero these\n entries, use the function `cholesky` instead.\n\n Parameters\n ----------\n a : (M, M) array_like\n Matrix to be decomposed\n lower : bool, optional\n Whether to compute the upper or lower triangular Cholesky factorization\n (Default: upper-triangular)\n overwrite_a : bool, optional\n Whether to overwrite data in a (may improve performance)\n check_finite : bool, optional\n Whether to check that the input matrix contains only finite numbers.\n Disabling may give a performance gain, but may result in problems\n (crashes, non-termination) if the inputs do contain infinities or NaNs.\n\n Returns\n -------\n c : (M, M) ndarray\n Matrix whose upper or lower triangle contains the Cholesky factor\n of `a`. Other parts of the matrix contain random data.\n lower : bool\n Flag indicating whether the factor is in the lower or upper triangle\n\n Raises\n ------\n LinAlgError\n Raised if decomposition fails.\n\n See also\n --------\n cho_solve : Solve a linear set equations using the Cholesky factorization\n of a matrix.\n\n Examples\n --------\n >>> from scipy.linalg import cho_factor\n >>> A = np.array([[9, 3, 1, 5], [3, 7, 5, 1], [1, 5, 9, 2], [5, 1, 2, 6]])\n >>> c, low = cho_factor(A)\n >>> c\n array([[3. , 1. , 0.33333333, 1.66666667],\n [3. , 2.44948974, 1.90515869, -0.27216553],\n [1. , 5. , 2.29330749, 0.8559528 ],\n [5. , 1. , 2. , 1.55418563]])\n >>> np.allclose(np.triu(c).T @ np. triu(c) - A, np.zeros((4, 4)))\n True\n\n \"\"\"\n c, lower = _cholesky(a, lower=lower, overwrite_a=overwrite_a, clean=False,\n check_finite=check_finite)\n return c, lower\n\n\ndef cho_solve(c_and_lower, b, overwrite_b=False, check_finite=True):\n \"\"\"Solve the linear equations A x = b, given the Cholesky factorization of A.\n\n Parameters\n ----------\n (c, lower) : tuple, (array, bool)\n Cholesky factorization of a, as given by cho_factor\n b : array\n Right-hand side\n overwrite_b : bool, optional\n Whether to overwrite data in b (may improve performance)\n check_finite : bool, optional\n Whether to check that the input matrices contain only finite numbers.\n Disabling may give a performance gain, but may result in problems\n (crashes, non-termination) if the inputs do contain infinities or NaNs.\n\n Returns\n -------\n x : array\n The solution to the system A x = b\n\n See also\n --------\n cho_factor : Cholesky factorization of a matrix\n\n Examples\n --------\n >>> from scipy.linalg import cho_factor, cho_solve\n >>> A = np.array([[9, 3, 1, 5], [3, 7, 5, 1], [1, 5, 9, 2], [5, 1, 2, 6]])\n >>> c, low = cho_factor(A)\n >>> x = cho_solve((c, low), [1, 1, 1, 1])\n >>> np.allclose(A @ x - [1, 1, 1, 1], np.zeros(4))\n True\n\n \"\"\"\n (c, lower) = c_and_lower\n if check_finite:\n b1 = asarray_chkfinite(b)\n c = asarray_chkfinite(c)\n else:\n b1 = asarray(b)\n c = asarray(c)\n if c.ndim != 2 or c.shape[0] != c.shape[1]:\n raise ValueError(\"The factored matrix c is not square.\")\n if c.shape[1] != b1.shape[0]:\n raise ValueError(\"incompatible dimensions ({} and {})\"\n .format(c.shape, b1.shape))\n\n overwrite_b = overwrite_b or _datacopied(b1, b)\n\n potrs, = get_lapack_funcs(('potrs',), (c, b1))\n x, info = potrs(c, b1, lower=lower, overwrite_b=overwrite_b)\n if info != 0:\n raise ValueError('illegal value in %dth argument of internal potrs'\n % -info)\n return x\n\n\ndef cholesky_banded(ab, overwrite_ab=False, lower=False, check_finite=True):\n \"\"\"\n Cholesky decompose a banded Hermitian positive-definite matrix\n\n The matrix a is stored in ab either in lower-diagonal or upper-\n diagonal ordered form::\n\n ab[u + i - j, j] == a[i,j] (if upper form; i <= j)\n ab[ i - j, j] == a[i,j] (if lower form; i >= j)\n\n Example of ab (shape of a is (6,6), u=2)::\n\n upper form:\n * * a02 a13 a24 a35\n * a01 a12 a23 a34 a45\n a00 a11 a22 a33 a44 a55\n\n lower form:\n a00 a11 a22 a33 a44 a55\n a10 a21 a32 a43 a54 *\n a20 a31 a42 a53 * *\n\n Parameters\n ----------\n ab : (u + 1, M) array_like\n Banded matrix\n overwrite_ab : bool, optional\n Discard data in ab (may enhance performance)\n lower : bool, optional\n Is the matrix in the lower form. (Default is upper form)\n check_finite : bool, optional\n Whether to check that the input matrix contains only finite numbers.\n Disabling may give a performance gain, but may result in problems\n (crashes, non-termination) if the inputs do contain infinities or NaNs.\n\n Returns\n -------\n c : (u + 1, M) ndarray\n Cholesky factorization of a, in the same banded format as ab\n\n See also\n --------\n cho_solve_banded : Solve a linear set equations, given the Cholesky factorization\n of a banded Hermitian.\n\n Examples\n --------\n >>> from scipy.linalg import cholesky_banded\n >>> from numpy import allclose, zeros, diag\n >>> Ab = np.array([[0, 0, 1j, 2, 3j], [0, -1, -2, 3, 4], [9, 8, 7, 6, 9]])\n >>> A = np.diag(Ab[0,2:], k=2) + np.diag(Ab[1,1:], k=1)\n >>> A = A + A.conj().T + np.diag(Ab[2, :])\n >>> c = cholesky_banded(Ab)\n >>> C = np.diag(c[0, 2:], k=2) + np.diag(c[1, 1:], k=1) + np.diag(c[2, :])\n >>> np.allclose(C.conj().T @ C - A, np.zeros((5, 5)))\n True\n\n \"\"\"\n if check_finite:\n ab = asarray_chkfinite(ab)\n else:\n ab = asarray(ab)\n\n pbtrf, = get_lapack_funcs(('pbtrf',), (ab,))\n c, info = pbtrf(ab, lower=lower, overwrite_ab=overwrite_ab)\n if info > 0:\n raise LinAlgError(\"%d-th leading minor not positive definite\" % info)\n if info < 0:\n raise ValueError('illegal value in %d-th argument of internal pbtrf'\n % -info)\n return c\n\n\ndef cho_solve_banded(cb_and_lower, b, overwrite_b=False, check_finite=True):\n \"\"\"\n Solve the linear equations ``A x = b``, given the Cholesky factorization of\n the banded Hermitian ``A``.\n\n Parameters\n ----------\n (cb, lower) : tuple, (ndarray, bool)\n `cb` is the Cholesky factorization of A, as given by cholesky_banded.\n `lower` must be the same value that was given to cholesky_banded.\n b : array_like\n Right-hand side\n overwrite_b : bool, optional\n If True, the function will overwrite the values in `b`.\n check_finite : bool, optional\n Whether to check that the input matrices contain only finite numbers.\n Disabling may give a performance gain, but may result in problems\n (crashes, non-termination) if the inputs do contain infinities or NaNs.\n\n Returns\n -------\n x : array\n The solution to the system A x = b\n\n See also\n --------\n cholesky_banded : Cholesky factorization of a banded matrix\n\n Notes\n -----\n\n .. versionadded:: 0.8.0\n\n Examples\n --------\n >>> from scipy.linalg import cholesky_banded, cho_solve_banded\n >>> Ab = np.array([[0, 0, 1j, 2, 3j], [0, -1, -2, 3, 4], [9, 8, 7, 6, 9]])\n >>> A = np.diag(Ab[0,2:], k=2) + np.diag(Ab[1,1:], k=1)\n >>> A = A + A.conj().T + np.diag(Ab[2, :])\n >>> c = cholesky_banded(Ab)\n >>> x = cho_solve_banded((c, False), np.ones(5))\n >>> np.allclose(A @ x - np.ones(5), np.zeros(5))\n True\n\n \"\"\"\n (cb, lower) = cb_and_lower\n if check_finite:\n cb = asarray_chkfinite(cb)\n b = asarray_chkfinite(b)\n else:\n cb = asarray(cb)\n b = asarray(b)\n\n # Validate shapes.\n if cb.shape[-1] != b.shape[0]:\n raise ValueError(\"shapes of cb and b are not compatible.\")\n\n pbtrs, = get_lapack_funcs(('pbtrs',), (cb, b))\n x, info = pbtrs(cb, b, lower=lower, overwrite_b=overwrite_b)\n if info > 0:\n raise LinAlgError(\"%dth leading minor not positive definite\" % info)\n if info < 0:\n raise ValueError('illegal value in %dth argument of internal pbtrs'\n % -info)\n return x\n",
"\"\"\"Trust-region optimization.\"\"\"\nimport math\n\nimport numpy as np\nimport scipy.linalg\nfrom .optimize import (_check_unknown_options, _wrap_function, _status_message,\n OptimizeResult, _prepare_scalar_function)\nfrom scipy.optimize._hessian_update_strategy import HessianUpdateStrategy\nfrom scipy.optimize._differentiable_functions import FD_METHODS\n__all__ = []\n\n\nclass BaseQuadraticSubproblem:\n \"\"\"\n Base/abstract class defining the quadratic model for trust-region\n minimization. Child classes must implement the ``solve`` method.\n\n Values of the objective function, Jacobian and Hessian (if provided) at\n the current iterate ``x`` are evaluated on demand and then stored as\n attributes ``fun``, ``jac``, ``hess``.\n \"\"\"\n\n def __init__(self, x, fun, jac, hess=None, hessp=None):\n self._x = x\n self._f = None\n self._g = None\n self._h = None\n self._g_mag = None\n self._cauchy_point = None\n self._newton_point = None\n self._fun = fun\n self._jac = jac\n self._hess = hess\n self._hessp = hessp\n\n def __call__(self, p):\n return self.fun + np.dot(self.jac, p) + 0.5 * np.dot(p, self.hessp(p))\n\n @property\n def fun(self):\n \"\"\"Value of objective function at current iteration.\"\"\"\n if self._f is None:\n self._f = self._fun(self._x)\n return self._f\n\n @property\n def jac(self):\n \"\"\"Value of Jacobian of objective function at current iteration.\"\"\"\n if self._g is None:\n self._g = self._jac(self._x)\n return self._g\n\n @property\n def hess(self):\n \"\"\"Value of Hessian of objective function at current iteration.\"\"\"\n if self._h is None:\n self._h = self._hess(self._x)\n return self._h\n\n def hessp(self, p):\n if self._hessp is not None:\n return self._hessp(self._x, p)\n else:\n return np.dot(self.hess, p)\n\n @property\n def jac_mag(self):\n \"\"\"Magnitude of jacobian of objective function at current iteration.\"\"\"\n if self._g_mag is None:\n self._g_mag = scipy.linalg.norm(self.jac)\n return self._g_mag\n\n def get_boundaries_intersections(self, z, d, trust_radius):\n \"\"\"\n Solve the scalar quadratic equation ||z + t d|| == trust_radius.\n This is like a line-sphere intersection.\n Return the two values of t, sorted from low to high.\n \"\"\"\n a = np.dot(d, d)\n b = 2 * np.dot(z, d)\n c = np.dot(z, z) - trust_radius**2\n sqrt_discriminant = math.sqrt(b*b - 4*a*c)\n\n # The following calculation is mathematically\n # equivalent to:\n # ta = (-b - sqrt_discriminant) / (2*a)\n # tb = (-b + sqrt_discriminant) / (2*a)\n # but produce smaller round off errors.\n # Look at Matrix Computation p.97\n # for a better justification.\n aux = b + math.copysign(sqrt_discriminant, b)\n ta = -aux / (2*a)\n tb = -2*c / aux\n return sorted([ta, tb])\n\n def solve(self, trust_radius):\n raise NotImplementedError('The solve method should be implemented by '\n 'the child class')\n\n\ndef _minimize_trust_region(fun, x0, args=(), jac=None, hess=None, hessp=None,\n subproblem=None, initial_trust_radius=1.0,\n max_trust_radius=1000.0, eta=0.15, gtol=1e-4,\n maxiter=None, disp=False, return_all=False,\n callback=None, inexact=True, **unknown_options):\n \"\"\"\n Minimization of scalar function of one or more variables using a\n trust-region algorithm.\n\n Options for the trust-region algorithm are:\n initial_trust_radius : float\n Initial trust radius.\n max_trust_radius : float\n Never propose steps that are longer than this value.\n eta : float\n Trust region related acceptance stringency for proposed steps.\n gtol : float\n Gradient norm must be less than `gtol`\n before successful termination.\n maxiter : int\n Maximum number of iterations to perform.\n disp : bool\n If True, print convergence message.\n inexact : bool\n Accuracy to solve subproblems. If True requires less nonlinear\n iterations, but more vector products. Only effective for method\n trust-krylov.\n\n This function is called by the `minimize` function.\n It is not supposed to be called directly.\n \"\"\"\n _check_unknown_options(unknown_options)\n\n if jac is None:\n raise ValueError('Jacobian is currently required for trust-region '\n 'methods')\n if hess is None and hessp is None:\n raise ValueError('Either the Hessian or the Hessian-vector product '\n 'is currently required for trust-region methods')\n if subproblem is None:\n raise ValueError('A subproblem solving strategy is required for '\n 'trust-region methods')\n if not (0 <= eta < 0.25):\n raise Exception('invalid acceptance stringency')\n if max_trust_radius <= 0:\n raise Exception('the max trust radius must be positive')\n if initial_trust_radius <= 0:\n raise ValueError('the initial trust radius must be positive')\n if initial_trust_radius >= max_trust_radius:\n raise ValueError('the initial trust radius must be less than the '\n 'max trust radius')\n\n # force the initial guess into a nice format\n x0 = np.asarray(x0).flatten()\n\n # A ScalarFunction representing the problem. This caches calls to fun, jac,\n # hess.\n sf = _prepare_scalar_function(fun, x0, jac=jac, hess=hess, args=args)\n fun = sf.fun\n jac = sf.grad\n if callable(hess):\n hess = sf.hess\n elif callable(hessp):\n # this elif statement must come before examining whether hess\n # is estimated by FD methods or a HessianUpdateStrategy\n pass\n elif (hess in FD_METHODS or isinstance(hess, HessianUpdateStrategy)):\n # If the Hessian is being estimated by finite differences or a\n # Hessian update strategy then ScalarFunction.hess returns a\n # LinearOperator or a HessianUpdateStrategy. This enables the\n # calculation/creation of a hessp. BUT you only want to do this\n # if the user *hasn't* provided a callable(hessp) function.\n hess = None\n def hessp(x, p, *args):\n return sf.hess(x).dot(p)\n else:\n raise ValueError('Either the Hessian or the Hessian-vector product '\n 'is currently required for trust-region methods')\n\n # ScalarFunction doesn't represent hessp\n nhessp, hessp = _wrap_function(hessp, args)\n\n # limit the number of iterations\n if maxiter is None:\n maxiter = len(x0)*200\n\n # init the search status\n warnflag = 0\n\n # initialize the search\n trust_radius = initial_trust_radius\n x = x0\n if return_all:\n allvecs = [x]\n m = subproblem(x, fun, jac, hess, hessp)\n k = 0\n\n # search for the function min\n # do not even start if the gradient is small enough\n while m.jac_mag >= gtol:\n\n # Solve the sub-problem.\n # This gives us the proposed step relative to the current position\n # and it tells us whether the proposed step\n # has reached the trust region boundary or not.\n try:\n p, hits_boundary = m.solve(trust_radius)\n except np.linalg.linalg.LinAlgError:\n warnflag = 3\n break\n\n # calculate the predicted value at the proposed point\n predicted_value = m(p)\n\n # define the local approximation at the proposed point\n x_proposed = x + p\n m_proposed = subproblem(x_proposed, fun, jac, hess, hessp)\n\n # evaluate the ratio defined in equation (4.4)\n actual_reduction = m.fun - m_proposed.fun\n predicted_reduction = m.fun - predicted_value\n if predicted_reduction <= 0:\n warnflag = 2\n break\n rho = actual_reduction / predicted_reduction\n\n # update the trust radius according to the actual/predicted ratio\n if rho < 0.25:\n trust_radius *= 0.25\n elif rho > 0.75 and hits_boundary:\n trust_radius = min(2*trust_radius, max_trust_radius)\n\n # if the ratio is high enough then accept the proposed step\n if rho > eta:\n x = x_proposed\n m = m_proposed\n\n # append the best guess, call back, increment the iteration count\n if return_all:\n allvecs.append(np.copy(x))\n if callback is not None:\n callback(np.copy(x))\n k += 1\n\n # check if the gradient is small enough to stop\n if m.jac_mag < gtol:\n warnflag = 0\n break\n\n # check if we have looked at enough iterations\n if k >= maxiter:\n warnflag = 1\n break\n\n # print some stuff if requested\n status_messages = (\n _status_message['success'],\n _status_message['maxiter'],\n 'A bad approximation caused failure to predict improvement.',\n 'A linalg error occurred, such as a non-psd Hessian.',\n )\n if disp:\n if warnflag == 0:\n print(status_messages[warnflag])\n else:\n print('Warning: ' + status_messages[warnflag])\n print(\" Current function value: %f\" % m.fun)\n print(\" Iterations: %d\" % k)\n print(\" Function evaluations: %d\" % sf.nfev)\n print(\" Gradient evaluations: %d\" % sf.ngev)\n print(\" Hessian evaluations: %d\" % (sf.nhev + nhessp[0]))\n\n result = OptimizeResult(x=x, success=(warnflag == 0), status=warnflag,\n fun=m.fun, jac=m.jac, nfev=sf.nfev, njev=sf.ngev,\n nhev=sf.nhev + nhessp[0], nit=k,\n message=status_messages[warnflag])\n\n if hess is not None:\n result['hess'] = m.hess\n\n if return_all:\n result['allvecs'] = allvecs\n\n return result\n",
"import numpy as np\nfrom numpy.testing import (assert_, assert_approx_equal,\n assert_allclose, assert_array_equal, assert_equal,\n assert_array_almost_equal_nulp, suppress_warnings)\nimport pytest\nfrom pytest import raises as assert_raises\n\nfrom scipy import signal\nfrom scipy.fft import fftfreq\nfrom scipy.signal import (periodogram, welch, lombscargle, csd, coherence,\n spectrogram, stft, istft, check_COLA, check_NOLA)\nfrom scipy.signal.spectral import _spectral_helper\n\n\nclass TestPeriodogram:\n def test_real_onesided_even(self):\n x = np.zeros(16)\n x[0] = 1\n f, p = periodogram(x)\n assert_allclose(f, np.linspace(0, 0.5, 9))\n q = np.ones(9)\n q[0] = 0\n q[-1] /= 2.0\n q /= 8\n assert_allclose(p, q)\n\n def test_real_onesided_odd(self):\n x = np.zeros(15)\n x[0] = 1\n f, p = periodogram(x)\n assert_allclose(f, np.arange(8.0)/15.0)\n q = np.ones(8)\n q[0] = 0\n q *= 2.0/15.0\n assert_allclose(p, q, atol=1e-15)\n\n def test_real_twosided(self):\n x = np.zeros(16)\n x[0] = 1\n f, p = periodogram(x, return_onesided=False)\n assert_allclose(f, fftfreq(16, 1.0))\n q = np.full(16, 1/16.0)\n q[0] = 0\n assert_allclose(p, q)\n\n def test_real_spectrum(self):\n x = np.zeros(16)\n x[0] = 1\n f, p = periodogram(x, scaling='spectrum')\n g, q = periodogram(x, scaling='density')\n assert_allclose(f, np.linspace(0, 0.5, 9))\n assert_allclose(p, q/16.0)\n\n def test_integer_even(self):\n x = np.zeros(16, dtype=int)\n x[0] = 1\n f, p = periodogram(x)\n assert_allclose(f, np.linspace(0, 0.5, 9))\n q = np.ones(9)\n q[0] = 0\n q[-1] /= 2.0\n q /= 8\n assert_allclose(p, q)\n\n def test_integer_odd(self):\n x = np.zeros(15, dtype=int)\n x[0] = 1\n f, p = periodogram(x)\n assert_allclose(f, np.arange(8.0)/15.0)\n q = np.ones(8)\n q[0] = 0\n q *= 2.0/15.0\n assert_allclose(p, q, atol=1e-15)\n\n def test_integer_twosided(self):\n x = np.zeros(16, dtype=int)\n x[0] = 1\n f, p = periodogram(x, return_onesided=False)\n assert_allclose(f, fftfreq(16, 1.0))\n q = np.full(16, 1/16.0)\n q[0] = 0\n assert_allclose(p, q)\n\n def test_complex(self):\n x = np.zeros(16, np.complex128)\n x[0] = 1.0 + 2.0j\n f, p = periodogram(x, return_onesided=False)\n assert_allclose(f, fftfreq(16, 1.0))\n q = np.full(16, 5.0/16.0)\n q[0] = 0\n assert_allclose(p, q)\n\n def test_unk_scaling(self):\n assert_raises(ValueError, periodogram, np.zeros(4, np.complex128),\n scaling='foo')\n\n def test_nd_axis_m1(self):\n x = np.zeros(20, dtype=np.float64)\n x = x.reshape((2,1,10))\n x[:,:,0] = 1.0\n f, p = periodogram(x)\n assert_array_equal(p.shape, (2, 1, 6))\n assert_array_almost_equal_nulp(p[0,0,:], p[1,0,:], 60)\n f0, p0 = periodogram(x[0,0,:])\n assert_array_almost_equal_nulp(p0[np.newaxis,:], p[1,:], 60)\n\n def test_nd_axis_0(self):\n x = np.zeros(20, dtype=np.float64)\n x = x.reshape((10,2,1))\n x[0,:,:] = 1.0\n f, p = periodogram(x, axis=0)\n assert_array_equal(p.shape, (6,2,1))\n assert_array_almost_equal_nulp(p[:,0,0], p[:,1,0], 60)\n f0, p0 = periodogram(x[:,0,0])\n assert_array_almost_equal_nulp(p0, p[:,1,0])\n\n def test_window_external(self):\n x = np.zeros(16)\n x[0] = 1\n f, p = periodogram(x, 10, 'hann')\n win = signal.get_window('hann', 16)\n fe, pe = periodogram(x, 10, win)\n assert_array_almost_equal_nulp(p, pe)\n assert_array_almost_equal_nulp(f, fe)\n win_err = signal.get_window('hann', 32)\n assert_raises(ValueError, periodogram, x,\n 10, win_err) # win longer than signal\n\n def test_padded_fft(self):\n x = np.zeros(16)\n x[0] = 1\n f, p = periodogram(x)\n fp, pp = periodogram(x, nfft=32)\n assert_allclose(f, fp[::2])\n assert_allclose(p, pp[::2])\n assert_array_equal(pp.shape, (17,))\n\n def test_empty_input(self):\n f, p = periodogram([])\n assert_array_equal(f.shape, (0,))\n assert_array_equal(p.shape, (0,))\n for shape in [(0,), (3,0), (0,5,2)]:\n f, p = periodogram(np.empty(shape))\n assert_array_equal(f.shape, shape)\n assert_array_equal(p.shape, shape)\n\n def test_empty_input_other_axis(self):\n for shape in [(3,0), (0,5,2)]:\n f, p = periodogram(np.empty(shape), axis=1)\n assert_array_equal(f.shape, shape)\n assert_array_equal(p.shape, shape)\n\n def test_short_nfft(self):\n x = np.zeros(18)\n x[0] = 1\n f, p = periodogram(x, nfft=16)\n assert_allclose(f, np.linspace(0, 0.5, 9))\n q = np.ones(9)\n q[0] = 0\n q[-1] /= 2.0\n q /= 8\n assert_allclose(p, q)\n\n def test_nfft_is_xshape(self):\n x = np.zeros(16)\n x[0] = 1\n f, p = periodogram(x, nfft=16)\n assert_allclose(f, np.linspace(0, 0.5, 9))\n q = np.ones(9)\n q[0] = 0\n q[-1] /= 2.0\n q /= 8\n assert_allclose(p, q)\n\n def test_real_onesided_even_32(self):\n x = np.zeros(16, 'f')\n x[0] = 1\n f, p = periodogram(x)\n assert_allclose(f, np.linspace(0, 0.5, 9))\n q = np.ones(9, 'f')\n q[0] = 0\n q[-1] /= 2.0\n q /= 8\n assert_allclose(p, q)\n assert_(p.dtype == q.dtype)\n\n def test_real_onesided_odd_32(self):\n x = np.zeros(15, 'f')\n x[0] = 1\n f, p = periodogram(x)\n assert_allclose(f, np.arange(8.0)/15.0)\n q = np.ones(8, 'f')\n q[0] = 0\n q *= 2.0/15.0\n assert_allclose(p, q, atol=1e-7)\n assert_(p.dtype == q.dtype)\n\n def test_real_twosided_32(self):\n x = np.zeros(16, 'f')\n x[0] = 1\n f, p = periodogram(x, return_onesided=False)\n assert_allclose(f, fftfreq(16, 1.0))\n q = np.full(16, 1/16.0, 'f')\n q[0] = 0\n assert_allclose(p, q)\n assert_(p.dtype == q.dtype)\n\n def test_complex_32(self):\n x = np.zeros(16, 'F')\n x[0] = 1.0 + 2.0j\n f, p = periodogram(x, return_onesided=False)\n assert_allclose(f, fftfreq(16, 1.0))\n q = np.full(16, 5.0/16.0, 'f')\n q[0] = 0\n assert_allclose(p, q)\n assert_(p.dtype == q.dtype)\n\n\nclass TestWelch:\n def test_real_onesided_even(self):\n x = np.zeros(16)\n x[0] = 1\n x[8] = 1\n f, p = welch(x, nperseg=8)\n assert_allclose(f, np.linspace(0, 0.5, 5))\n q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222,\n 0.11111111])\n assert_allclose(p, q, atol=1e-7, rtol=1e-7)\n\n def test_real_onesided_odd(self):\n x = np.zeros(16)\n x[0] = 1\n x[8] = 1\n f, p = welch(x, nperseg=9)\n assert_allclose(f, np.arange(5.0)/9.0)\n q = np.array([0.12477455, 0.23430933, 0.17072113, 0.17072113,\n 0.17072113])\n assert_allclose(p, q, atol=1e-7, rtol=1e-7)\n\n def test_real_twosided(self):\n x = np.zeros(16)\n x[0] = 1\n x[8] = 1\n f, p = welch(x, nperseg=8, return_onesided=False)\n assert_allclose(f, fftfreq(8, 1.0))\n q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111,\n 0.11111111, 0.11111111, 0.11111111, 0.07638889])\n assert_allclose(p, q, atol=1e-7, rtol=1e-7)\n\n def test_real_spectrum(self):\n x = np.zeros(16)\n x[0] = 1\n x[8] = 1\n f, p = welch(x, nperseg=8, scaling='spectrum')\n assert_allclose(f, np.linspace(0, 0.5, 5))\n q = np.array([0.015625, 0.02864583, 0.04166667, 0.04166667,\n 0.02083333])\n assert_allclose(p, q, atol=1e-7, rtol=1e-7)\n\n def test_integer_onesided_even(self):\n x = np.zeros(16, dtype=int)\n x[0] = 1\n x[8] = 1\n f, p = welch(x, nperseg=8)\n assert_allclose(f, np.linspace(0, 0.5, 5))\n q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222,\n 0.11111111])\n assert_allclose(p, q, atol=1e-7, rtol=1e-7)\n\n def test_integer_onesided_odd(self):\n x = np.zeros(16, dtype=int)\n x[0] = 1\n x[8] = 1\n f, p = welch(x, nperseg=9)\n assert_allclose(f, np.arange(5.0)/9.0)\n q = np.array([0.12477455, 0.23430933, 0.17072113, 0.17072113,\n 0.17072113])\n assert_allclose(p, q, atol=1e-7, rtol=1e-7)\n\n def test_integer_twosided(self):\n x = np.zeros(16, dtype=int)\n x[0] = 1\n x[8] = 1\n f, p = welch(x, nperseg=8, return_onesided=False)\n assert_allclose(f, fftfreq(8, 1.0))\n q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111,\n 0.11111111, 0.11111111, 0.11111111, 0.07638889])\n assert_allclose(p, q, atol=1e-7, rtol=1e-7)\n\n def test_complex(self):\n x = np.zeros(16, np.complex128)\n x[0] = 1.0 + 2.0j\n x[8] = 1.0 + 2.0j\n f, p = welch(x, nperseg=8, return_onesided=False)\n assert_allclose(f, fftfreq(8, 1.0))\n q = np.array([0.41666667, 0.38194444, 0.55555556, 0.55555556,\n 0.55555556, 0.55555556, 0.55555556, 0.38194444])\n assert_allclose(p, q, atol=1e-7, rtol=1e-7)\n\n def test_unk_scaling(self):\n assert_raises(ValueError, welch, np.zeros(4, np.complex128),\n scaling='foo', nperseg=4)\n\n def test_detrend_linear(self):\n x = np.arange(10, dtype=np.float64) + 0.04\n f, p = welch(x, nperseg=10, detrend='linear')\n assert_allclose(p, np.zeros_like(p), atol=1e-15)\n\n def test_no_detrending(self):\n x = np.arange(10, dtype=np.float64) + 0.04\n f1, p1 = welch(x, nperseg=10, detrend=False)\n f2, p2 = welch(x, nperseg=10, detrend=lambda x: x)\n assert_allclose(f1, f2, atol=1e-15)\n assert_allclose(p1, p2, atol=1e-15)\n\n def test_detrend_external(self):\n x = np.arange(10, dtype=np.float64) + 0.04\n f, p = welch(x, nperseg=10,\n detrend=lambda seg: signal.detrend(seg, type='l'))\n assert_allclose(p, np.zeros_like(p), atol=1e-15)\n\n def test_detrend_external_nd_m1(self):\n x = np.arange(40, dtype=np.float64) + 0.04\n x = x.reshape((2,2,10))\n f, p = welch(x, nperseg=10,\n detrend=lambda seg: signal.detrend(seg, type='l'))\n assert_allclose(p, np.zeros_like(p), atol=1e-15)\n\n def test_detrend_external_nd_0(self):\n x = np.arange(20, dtype=np.float64) + 0.04\n x = x.reshape((2,1,10))\n x = np.rollaxis(x, 2, 0)\n f, p = welch(x, nperseg=10, axis=0,\n detrend=lambda seg: signal.detrend(seg, axis=0, type='l'))\n assert_allclose(p, np.zeros_like(p), atol=1e-15)\n\n def test_nd_axis_m1(self):\n x = np.arange(20, dtype=np.float64) + 0.04\n x = x.reshape((2,1,10))\n f, p = welch(x, nperseg=10)\n assert_array_equal(p.shape, (2, 1, 6))\n assert_allclose(p[0,0,:], p[1,0,:], atol=1e-13, rtol=1e-13)\n f0, p0 = welch(x[0,0,:], nperseg=10)\n assert_allclose(p0[np.newaxis,:], p[1,:], atol=1e-13, rtol=1e-13)\n\n def test_nd_axis_0(self):\n x = np.arange(20, dtype=np.float64) + 0.04\n x = x.reshape((10,2,1))\n f, p = welch(x, nperseg=10, axis=0)\n assert_array_equal(p.shape, (6,2,1))\n assert_allclose(p[:,0,0], p[:,1,0], atol=1e-13, rtol=1e-13)\n f0, p0 = welch(x[:,0,0], nperseg=10)\n assert_allclose(p0, p[:,1,0], atol=1e-13, rtol=1e-13)\n\n def test_window_external(self):\n x = np.zeros(16)\n x[0] = 1\n x[8] = 1\n f, p = welch(x, 10, 'hann', nperseg=8)\n win = signal.get_window('hann', 8)\n fe, pe = welch(x, 10, win, nperseg=None)\n assert_array_almost_equal_nulp(p, pe)\n assert_array_almost_equal_nulp(f, fe)\n assert_array_equal(fe.shape, (5,)) # because win length used as nperseg\n assert_array_equal(pe.shape, (5,))\n assert_raises(ValueError, welch, x,\n 10, win, nperseg=4) # because nperseg != win.shape[-1]\n win_err = signal.get_window('hann', 32)\n assert_raises(ValueError, welch, x,\n 10, win_err, nperseg=None) # win longer than signal\n\n def test_empty_input(self):\n f, p = welch([])\n assert_array_equal(f.shape, (0,))\n assert_array_equal(p.shape, (0,))\n for shape in [(0,), (3,0), (0,5,2)]:\n f, p = welch(np.empty(shape))\n assert_array_equal(f.shape, shape)\n assert_array_equal(p.shape, shape)\n\n def test_empty_input_other_axis(self):\n for shape in [(3,0), (0,5,2)]:\n f, p = welch(np.empty(shape), axis=1)\n assert_array_equal(f.shape, shape)\n assert_array_equal(p.shape, shape)\n\n def test_short_data(self):\n x = np.zeros(8)\n x[0] = 1\n #for string-like window, input signal length < nperseg value gives\n #UserWarning, sets nperseg to x.shape[-1]\n with suppress_warnings() as sup:\n sup.filter(UserWarning, \"nperseg = 256 is greater than input length = 8, using nperseg = 8\")\n f, p = welch(x,window='hann') # default nperseg\n f1, p1 = welch(x,window='hann', nperseg=256) # user-specified nperseg\n f2, p2 = welch(x, nperseg=8) # valid nperseg, doesn't give warning\n assert_allclose(f, f2)\n assert_allclose(p, p2)\n assert_allclose(f1, f2)\n assert_allclose(p1, p2)\n\n def test_window_long_or_nd(self):\n assert_raises(ValueError, welch, np.zeros(4), 1, np.array([1,1,1,1,1]))\n assert_raises(ValueError, welch, np.zeros(4), 1,\n np.arange(6).reshape((2,3)))\n\n def test_nondefault_noverlap(self):\n x = np.zeros(64)\n x[::8] = 1\n f, p = welch(x, nperseg=16, noverlap=4)\n q = np.array([0, 1./12., 1./3., 1./5., 1./3., 1./5., 1./3., 1./5.,\n 1./6.])\n assert_allclose(p, q, atol=1e-12)\n\n def test_bad_noverlap(self):\n assert_raises(ValueError, welch, np.zeros(4), 1, 'hann', 2, 7)\n\n def test_nfft_too_short(self):\n assert_raises(ValueError, welch, np.ones(12), nfft=3, nperseg=4)\n\n def test_real_onesided_even_32(self):\n x = np.zeros(16, 'f')\n x[0] = 1\n x[8] = 1\n f, p = welch(x, nperseg=8)\n assert_allclose(f, np.linspace(0, 0.5, 5))\n q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222,\n 0.11111111], 'f')\n assert_allclose(p, q, atol=1e-7, rtol=1e-7)\n assert_(p.dtype == q.dtype)\n\n def test_real_onesided_odd_32(self):\n x = np.zeros(16, 'f')\n x[0] = 1\n x[8] = 1\n f, p = welch(x, nperseg=9)\n assert_allclose(f, np.arange(5.0)/9.0)\n q = np.array([0.12477458, 0.23430935, 0.17072113, 0.17072116,\n 0.17072113], 'f')\n assert_allclose(p, q, atol=1e-7, rtol=1e-7)\n assert_(p.dtype == q.dtype)\n\n def test_real_twosided_32(self):\n x = np.zeros(16, 'f')\n x[0] = 1\n x[8] = 1\n f, p = welch(x, nperseg=8, return_onesided=False)\n assert_allclose(f, fftfreq(8, 1.0))\n q = np.array([0.08333333, 0.07638889, 0.11111111,\n 0.11111111, 0.11111111, 0.11111111, 0.11111111,\n 0.07638889], 'f')\n assert_allclose(p, q, atol=1e-7, rtol=1e-7)\n assert_(p.dtype == q.dtype)\n\n def test_complex_32(self):\n x = np.zeros(16, 'F')\n x[0] = 1.0 + 2.0j\n x[8] = 1.0 + 2.0j\n f, p = welch(x, nperseg=8, return_onesided=False)\n assert_allclose(f, fftfreq(8, 1.0))\n q = np.array([0.41666666, 0.38194442, 0.55555552, 0.55555552,\n 0.55555558, 0.55555552, 0.55555552, 0.38194442], 'f')\n assert_allclose(p, q, atol=1e-7, rtol=1e-7)\n assert_(p.dtype == q.dtype,\n 'dtype mismatch, %s, %s' % (p.dtype, q.dtype))\n\n def test_padded_freqs(self):\n x = np.zeros(12)\n\n nfft = 24\n f = fftfreq(nfft, 1.0)[:nfft//2+1]\n f[-1] *= -1\n fodd, _ = welch(x, nperseg=5, nfft=nfft)\n feven, _ = welch(x, nperseg=6, nfft=nfft)\n assert_allclose(f, fodd)\n assert_allclose(f, feven)\n\n nfft = 25\n f = fftfreq(nfft, 1.0)[:(nfft + 1)//2]\n fodd, _ = welch(x, nperseg=5, nfft=nfft)\n feven, _ = welch(x, nperseg=6, nfft=nfft)\n assert_allclose(f, fodd)\n assert_allclose(f, feven)\n\n def test_window_correction(self):\n A = 20\n fs = 1e4\n nperseg = int(fs//10)\n fsig = 300\n ii = int(fsig*nperseg//fs) # Freq index of fsig\n\n tt = np.arange(fs)/fs\n x = A*np.sin(2*np.pi*fsig*tt)\n\n for window in ['hann', 'bartlett', ('tukey', 0.1), 'flattop']:\n _, p_spec = welch(x, fs=fs, nperseg=nperseg, window=window,\n scaling='spectrum')\n freq, p_dens = welch(x, fs=fs, nperseg=nperseg, window=window,\n scaling='density')\n\n # Check peak height at signal frequency for 'spectrum'\n assert_allclose(p_spec[ii], A**2/2.0)\n # Check integrated spectrum RMS for 'density'\n assert_allclose(np.sqrt(np.trapz(p_dens, freq)), A*np.sqrt(2)/2,\n rtol=1e-3)\n\n def test_axis_rolling(self):\n np.random.seed(1234)\n\n x_flat = np.random.randn(1024)\n _, p_flat = welch(x_flat)\n\n for a in range(3):\n newshape = [1,]*3\n newshape[a] = -1\n x = x_flat.reshape(newshape)\n\n _, p_plus = welch(x, axis=a) # Positive axis index\n _, p_minus = welch(x, axis=a-x.ndim) # Negative axis index\n\n assert_equal(p_flat, p_plus.squeeze(), err_msg=a)\n assert_equal(p_flat, p_minus.squeeze(), err_msg=a-x.ndim)\n\n def test_average(self):\n x = np.zeros(16)\n x[0] = 1\n x[8] = 1\n f, p = welch(x, nperseg=8, average='median')\n assert_allclose(f, np.linspace(0, 0.5, 5))\n q = np.array([.1, .05, 0., 1.54074396e-33, 0.])\n assert_allclose(p, q, atol=1e-7, rtol=1e-7)\n\n assert_raises(ValueError, welch, x, nperseg=8,\n average='unrecognised-average')\n\n\nclass TestCSD:\n def test_pad_shorter_x(self):\n x = np.zeros(8)\n y = np.zeros(12)\n\n f = np.linspace(0, 0.5, 7)\n c = np.zeros(7,dtype=np.complex128)\n f1, c1 = csd(x, y, nperseg=12)\n\n assert_allclose(f, f1)\n assert_allclose(c, c1)\n\n def test_pad_shorter_y(self):\n x = np.zeros(12)\n y = np.zeros(8)\n\n f = np.linspace(0, 0.5, 7)\n c = np.zeros(7,dtype=np.complex128)\n f1, c1 = csd(x, y, nperseg=12)\n\n assert_allclose(f, f1)\n assert_allclose(c, c1)\n\n def test_real_onesided_even(self):\n x = np.zeros(16)\n x[0] = 1\n x[8] = 1\n f, p = csd(x, x, nperseg=8)\n assert_allclose(f, np.linspace(0, 0.5, 5))\n q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222,\n 0.11111111])\n assert_allclose(p, q, atol=1e-7, rtol=1e-7)\n\n def test_real_onesided_odd(self):\n x = np.zeros(16)\n x[0] = 1\n x[8] = 1\n f, p = csd(x, x, nperseg=9)\n assert_allclose(f, np.arange(5.0)/9.0)\n q = np.array([0.12477455, 0.23430933, 0.17072113, 0.17072113,\n 0.17072113])\n assert_allclose(p, q, atol=1e-7, rtol=1e-7)\n\n def test_real_twosided(self):\n x = np.zeros(16)\n x[0] = 1\n x[8] = 1\n f, p = csd(x, x, nperseg=8, return_onesided=False)\n assert_allclose(f, fftfreq(8, 1.0))\n q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111,\n 0.11111111, 0.11111111, 0.11111111, 0.07638889])\n assert_allclose(p, q, atol=1e-7, rtol=1e-7)\n\n def test_real_spectrum(self):\n x = np.zeros(16)\n x[0] = 1\n x[8] = 1\n f, p = csd(x, x, nperseg=8, scaling='spectrum')\n assert_allclose(f, np.linspace(0, 0.5, 5))\n q = np.array([0.015625, 0.02864583, 0.04166667, 0.04166667,\n 0.02083333])\n assert_allclose(p, q, atol=1e-7, rtol=1e-7)\n\n def test_integer_onesided_even(self):\n x = np.zeros(16, dtype=int)\n x[0] = 1\n x[8] = 1\n f, p = csd(x, x, nperseg=8)\n assert_allclose(f, np.linspace(0, 0.5, 5))\n q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222,\n 0.11111111])\n assert_allclose(p, q, atol=1e-7, rtol=1e-7)\n\n def test_integer_onesided_odd(self):\n x = np.zeros(16, dtype=int)\n x[0] = 1\n x[8] = 1\n f, p = csd(x, x, nperseg=9)\n assert_allclose(f, np.arange(5.0)/9.0)\n q = np.array([0.12477455, 0.23430933, 0.17072113, 0.17072113,\n 0.17072113])\n assert_allclose(p, q, atol=1e-7, rtol=1e-7)\n\n def test_integer_twosided(self):\n x = np.zeros(16, dtype=int)\n x[0] = 1\n x[8] = 1\n f, p = csd(x, x, nperseg=8, return_onesided=False)\n assert_allclose(f, fftfreq(8, 1.0))\n q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111,\n 0.11111111, 0.11111111, 0.11111111, 0.07638889])\n assert_allclose(p, q, atol=1e-7, rtol=1e-7)\n\n def test_complex(self):\n x = np.zeros(16, np.complex128)\n x[0] = 1.0 + 2.0j\n x[8] = 1.0 + 2.0j\n f, p = csd(x, x, nperseg=8, return_onesided=False)\n assert_allclose(f, fftfreq(8, 1.0))\n q = np.array([0.41666667, 0.38194444, 0.55555556, 0.55555556,\n 0.55555556, 0.55555556, 0.55555556, 0.38194444])\n assert_allclose(p, q, atol=1e-7, rtol=1e-7)\n\n def test_unk_scaling(self):\n assert_raises(ValueError, csd, np.zeros(4, np.complex128),\n np.ones(4, np.complex128), scaling='foo', nperseg=4)\n\n def test_detrend_linear(self):\n x = np.arange(10, dtype=np.float64) + 0.04\n f, p = csd(x, x, nperseg=10, detrend='linear')\n assert_allclose(p, np.zeros_like(p), atol=1e-15)\n\n def test_no_detrending(self):\n x = np.arange(10, dtype=np.float64) + 0.04\n f1, p1 = csd(x, x, nperseg=10, detrend=False)\n f2, p2 = csd(x, x, nperseg=10, detrend=lambda x: x)\n assert_allclose(f1, f2, atol=1e-15)\n assert_allclose(p1, p2, atol=1e-15)\n\n def test_detrend_external(self):\n x = np.arange(10, dtype=np.float64) + 0.04\n f, p = csd(x, x, nperseg=10,\n detrend=lambda seg: signal.detrend(seg, type='l'))\n assert_allclose(p, np.zeros_like(p), atol=1e-15)\n\n def test_detrend_external_nd_m1(self):\n x = np.arange(40, dtype=np.float64) + 0.04\n x = x.reshape((2,2,10))\n f, p = csd(x, x, nperseg=10,\n detrend=lambda seg: signal.detrend(seg, type='l'))\n assert_allclose(p, np.zeros_like(p), atol=1e-15)\n\n def test_detrend_external_nd_0(self):\n x = np.arange(20, dtype=np.float64) + 0.04\n x = x.reshape((2,1,10))\n x = np.rollaxis(x, 2, 0)\n f, p = csd(x, x, nperseg=10, axis=0,\n detrend=lambda seg: signal.detrend(seg, axis=0, type='l'))\n assert_allclose(p, np.zeros_like(p), atol=1e-15)\n\n def test_nd_axis_m1(self):\n x = np.arange(20, dtype=np.float64) + 0.04\n x = x.reshape((2,1,10))\n f, p = csd(x, x, nperseg=10)\n assert_array_equal(p.shape, (2, 1, 6))\n assert_allclose(p[0,0,:], p[1,0,:], atol=1e-13, rtol=1e-13)\n f0, p0 = csd(x[0,0,:], x[0,0,:], nperseg=10)\n assert_allclose(p0[np.newaxis,:], p[1,:], atol=1e-13, rtol=1e-13)\n\n def test_nd_axis_0(self):\n x = np.arange(20, dtype=np.float64) + 0.04\n x = x.reshape((10,2,1))\n f, p = csd(x, x, nperseg=10, axis=0)\n assert_array_equal(p.shape, (6,2,1))\n assert_allclose(p[:,0,0], p[:,1,0], atol=1e-13, rtol=1e-13)\n f0, p0 = csd(x[:,0,0], x[:,0,0], nperseg=10)\n assert_allclose(p0, p[:,1,0], atol=1e-13, rtol=1e-13)\n\n def test_window_external(self):\n x = np.zeros(16)\n x[0] = 1\n x[8] = 1\n f, p = csd(x, x, 10, 'hann', 8)\n win = signal.get_window('hann', 8)\n fe, pe = csd(x, x, 10, win, nperseg=None)\n assert_array_almost_equal_nulp(p, pe)\n assert_array_almost_equal_nulp(f, fe)\n assert_array_equal(fe.shape, (5,)) # because win length used as nperseg\n assert_array_equal(pe.shape, (5,))\n assert_raises(ValueError, csd, x, x,\n 10, win, nperseg=256) # because nperseg != win.shape[-1]\n win_err = signal.get_window('hann', 32)\n assert_raises(ValueError, csd, x, x,\n 10, win_err, nperseg=None) # because win longer than signal\n\n def test_empty_input(self):\n f, p = csd([],np.zeros(10))\n assert_array_equal(f.shape, (0,))\n assert_array_equal(p.shape, (0,))\n\n f, p = csd(np.zeros(10),[])\n assert_array_equal(f.shape, (0,))\n assert_array_equal(p.shape, (0,))\n\n for shape in [(0,), (3,0), (0,5,2)]:\n f, p = csd(np.empty(shape), np.empty(shape))\n assert_array_equal(f.shape, shape)\n assert_array_equal(p.shape, shape)\n\n f, p = csd(np.ones(10), np.empty((5,0)))\n assert_array_equal(f.shape, (5,0))\n assert_array_equal(p.shape, (5,0))\n\n f, p = csd(np.empty((5,0)), np.ones(10))\n assert_array_equal(f.shape, (5,0))\n assert_array_equal(p.shape, (5,0))\n\n def test_empty_input_other_axis(self):\n for shape in [(3,0), (0,5,2)]:\n f, p = csd(np.empty(shape), np.empty(shape), axis=1)\n assert_array_equal(f.shape, shape)\n assert_array_equal(p.shape, shape)\n\n f, p = csd(np.empty((10,10,3)), np.zeros((10,0,1)), axis=1)\n assert_array_equal(f.shape, (10,0,3))\n assert_array_equal(p.shape, (10,0,3))\n\n f, p = csd(np.empty((10,0,1)), np.zeros((10,10,3)), axis=1)\n assert_array_equal(f.shape, (10,0,3))\n assert_array_equal(p.shape, (10,0,3))\n\n def test_short_data(self):\n x = np.zeros(8)\n x[0] = 1\n\n #for string-like window, input signal length < nperseg value gives\n #UserWarning, sets nperseg to x.shape[-1]\n with suppress_warnings() as sup:\n sup.filter(UserWarning, \"nperseg = 256 is greater than input length = 8, using nperseg = 8\")\n f, p = csd(x, x, window='hann') # default nperseg\n f1, p1 = csd(x, x, window='hann', nperseg=256) # user-specified nperseg\n f2, p2 = csd(x, x, nperseg=8) # valid nperseg, doesn't give warning\n assert_allclose(f, f2)\n assert_allclose(p, p2)\n assert_allclose(f1, f2)\n assert_allclose(p1, p2)\n\n def test_window_long_or_nd(self):\n assert_raises(ValueError, csd, np.zeros(4), np.ones(4), 1,\n np.array([1,1,1,1,1]))\n assert_raises(ValueError, csd, np.zeros(4), np.ones(4), 1,\n np.arange(6).reshape((2,3)))\n\n def test_nondefault_noverlap(self):\n x = np.zeros(64)\n x[::8] = 1\n f, p = csd(x, x, nperseg=16, noverlap=4)\n q = np.array([0, 1./12., 1./3., 1./5., 1./3., 1./5., 1./3., 1./5.,\n 1./6.])\n assert_allclose(p, q, atol=1e-12)\n\n def test_bad_noverlap(self):\n assert_raises(ValueError, csd, np.zeros(4), np.ones(4), 1, 'hann',\n 2, 7)\n\n def test_nfft_too_short(self):\n assert_raises(ValueError, csd, np.ones(12), np.zeros(12), nfft=3,\n nperseg=4)\n\n def test_real_onesided_even_32(self):\n x = np.zeros(16, 'f')\n x[0] = 1\n x[8] = 1\n f, p = csd(x, x, nperseg=8)\n assert_allclose(f, np.linspace(0, 0.5, 5))\n q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222,\n 0.11111111], 'f')\n assert_allclose(p, q, atol=1e-7, rtol=1e-7)\n assert_(p.dtype == q.dtype)\n\n def test_real_onesided_odd_32(self):\n x = np.zeros(16, 'f')\n x[0] = 1\n x[8] = 1\n f, p = csd(x, x, nperseg=9)\n assert_allclose(f, np.arange(5.0)/9.0)\n q = np.array([0.12477458, 0.23430935, 0.17072113, 0.17072116,\n 0.17072113], 'f')\n assert_allclose(p, q, atol=1e-7, rtol=1e-7)\n assert_(p.dtype == q.dtype)\n\n def test_real_twosided_32(self):\n x = np.zeros(16, 'f')\n x[0] = 1\n x[8] = 1\n f, p = csd(x, x, nperseg=8, return_onesided=False)\n assert_allclose(f, fftfreq(8, 1.0))\n q = np.array([0.08333333, 0.07638889, 0.11111111,\n 0.11111111, 0.11111111, 0.11111111, 0.11111111,\n 0.07638889], 'f')\n assert_allclose(p, q, atol=1e-7, rtol=1e-7)\n assert_(p.dtype == q.dtype)\n\n def test_complex_32(self):\n x = np.zeros(16, 'F')\n x[0] = 1.0 + 2.0j\n x[8] = 1.0 + 2.0j\n f, p = csd(x, x, nperseg=8, return_onesided=False)\n assert_allclose(f, fftfreq(8, 1.0))\n q = np.array([0.41666666, 0.38194442, 0.55555552, 0.55555552,\n 0.55555558, 0.55555552, 0.55555552, 0.38194442], 'f')\n assert_allclose(p, q, atol=1e-7, rtol=1e-7)\n assert_(p.dtype == q.dtype,\n 'dtype mismatch, %s, %s' % (p.dtype, q.dtype))\n\n def test_padded_freqs(self):\n x = np.zeros(12)\n y = np.ones(12)\n\n nfft = 24\n f = fftfreq(nfft, 1.0)[:nfft//2+1]\n f[-1] *= -1\n fodd, _ = csd(x, y, nperseg=5, nfft=nfft)\n feven, _ = csd(x, y, nperseg=6, nfft=nfft)\n assert_allclose(f, fodd)\n assert_allclose(f, feven)\n\n nfft = 25\n f = fftfreq(nfft, 1.0)[:(nfft + 1)//2]\n fodd, _ = csd(x, y, nperseg=5, nfft=nfft)\n feven, _ = csd(x, y, nperseg=6, nfft=nfft)\n assert_allclose(f, fodd)\n assert_allclose(f, feven)\n\nclass TestCoherence:\n def test_identical_input(self):\n x = np.random.randn(20)\n y = np.copy(x) # So `y is x` -> False\n\n f = np.linspace(0, 0.5, 6)\n C = np.ones(6)\n f1, C1 = coherence(x, y, nperseg=10)\n\n assert_allclose(f, f1)\n assert_allclose(C, C1)\n\n def test_phase_shifted_input(self):\n x = np.random.randn(20)\n y = -x\n\n f = np.linspace(0, 0.5, 6)\n C = np.ones(6)\n f1, C1 = coherence(x, y, nperseg=10)\n\n assert_allclose(f, f1)\n assert_allclose(C, C1)\n\n\nclass TestSpectrogram:\n def test_average_all_segments(self):\n x = np.random.randn(1024)\n\n fs = 1.0\n window = ('tukey', 0.25)\n nperseg = 16\n noverlap = 2\n\n f, _, P = spectrogram(x, fs, window, nperseg, noverlap)\n fw, Pw = welch(x, fs, window, nperseg, noverlap)\n assert_allclose(f, fw)\n assert_allclose(np.mean(P, axis=-1), Pw)\n\n def test_window_external(self):\n x = np.random.randn(1024)\n\n fs = 1.0\n window = ('tukey', 0.25)\n nperseg = 16\n noverlap = 2\n f, _, P = spectrogram(x, fs, window, nperseg, noverlap)\n\n win = signal.get_window(('tukey', 0.25), 16)\n fe, _, Pe = spectrogram(x, fs, win, nperseg=None, noverlap=2)\n assert_array_equal(fe.shape, (9,)) # because win length used as nperseg\n assert_array_equal(Pe.shape, (9,73))\n assert_raises(ValueError, spectrogram, x,\n fs, win, nperseg=8) # because nperseg != win.shape[-1]\n win_err = signal.get_window(('tukey', 0.25), 2048)\n assert_raises(ValueError, spectrogram, x,\n fs, win_err, nperseg=None) # win longer than signal\n\n def test_short_data(self):\n x = np.random.randn(1024)\n fs = 1.0\n\n #for string-like window, input signal length < nperseg value gives\n #UserWarning, sets nperseg to x.shape[-1]\n f, _, p = spectrogram(x, fs, window=('tukey',0.25)) # default nperseg\n with suppress_warnings() as sup:\n sup.filter(UserWarning,\n \"nperseg = 1025 is greater than input length = 1024, using nperseg = 1024\")\n f1, _, p1 = spectrogram(x, fs, window=('tukey',0.25),\n nperseg=1025) # user-specified nperseg\n f2, _, p2 = spectrogram(x, fs, nperseg=256) # to compare w/default\n f3, _, p3 = spectrogram(x, fs, nperseg=1024) # compare w/user-spec'd\n assert_allclose(f, f2)\n assert_allclose(p, p2)\n assert_allclose(f1, f3)\n assert_allclose(p1, p3)\n\nclass TestLombscargle:\n def test_frequency(self):\n \"\"\"Test if frequency location of peak corresponds to frequency of\n generated input signal.\n \"\"\"\n\n # Input parameters\n ampl = 2.\n w = 1.\n phi = 0.5 * np.pi\n nin = 100\n nout = 1000\n p = 0.7 # Fraction of points to select\n\n # Randomly select a fraction of an array with timesteps\n np.random.seed(2353425)\n r = np.random.rand(nin)\n t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p]\n\n # Plot a sine wave for the selected times\n x = ampl * np.sin(w*t + phi)\n\n # Define the array of frequencies for which to compute the periodogram\n f = np.linspace(0.01, 10., nout)\n\n # Calculate Lomb-Scargle periodogram\n P = lombscargle(t, x, f)\n\n # Check if difference between found frequency maximum and input\n # frequency is less than accuracy\n delta = f[1] - f[0]\n assert_(w - f[np.argmax(P)] < (delta/2.))\n\n def test_amplitude(self):\n # Test if height of peak in normalized Lomb-Scargle periodogram\n # corresponds to amplitude of the generated input signal.\n\n # Input parameters\n ampl = 2.\n w = 1.\n phi = 0.5 * np.pi\n nin = 100\n nout = 1000\n p = 0.7 # Fraction of points to select\n\n # Randomly select a fraction of an array with timesteps\n np.random.seed(2353425)\n r = np.random.rand(nin)\n t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p]\n\n # Plot a sine wave for the selected times\n x = ampl * np.sin(w*t + phi)\n\n # Define the array of frequencies for which to compute the periodogram\n f = np.linspace(0.01, 10., nout)\n\n # Calculate Lomb-Scargle periodogram\n pgram = lombscargle(t, x, f)\n\n # Normalize\n pgram = np.sqrt(4 * pgram / t.shape[0])\n\n # Check if difference between found frequency maximum and input\n # frequency is less than accuracy\n assert_approx_equal(np.max(pgram), ampl, significant=2)\n\n def test_precenter(self):\n # Test if precenter gives the same result as manually precentering.\n\n # Input parameters\n ampl = 2.\n w = 1.\n phi = 0.5 * np.pi\n nin = 100\n nout = 1000\n p = 0.7 # Fraction of points to select\n offset = 0.15 # Offset to be subtracted in pre-centering\n\n # Randomly select a fraction of an array with timesteps\n np.random.seed(2353425)\n r = np.random.rand(nin)\n t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p]\n\n # Plot a sine wave for the selected times\n x = ampl * np.sin(w*t + phi) + offset\n\n # Define the array of frequencies for which to compute the periodogram\n f = np.linspace(0.01, 10., nout)\n\n # Calculate Lomb-Scargle periodogram\n pgram = lombscargle(t, x, f, precenter=True)\n pgram2 = lombscargle(t, x - x.mean(), f, precenter=False)\n\n # check if centering worked\n assert_allclose(pgram, pgram2)\n\n def test_normalize(self):\n # Test normalize option of Lomb-Scarge.\n\n # Input parameters\n ampl = 2.\n w = 1.\n phi = 0.5 * np.pi\n nin = 100\n nout = 1000\n p = 0.7 # Fraction of points to select\n\n # Randomly select a fraction of an array with timesteps\n np.random.seed(2353425)\n r = np.random.rand(nin)\n t = np.linspace(0.01*np.pi, 10.*np.pi, nin)[r >= p]\n\n # Plot a sine wave for the selected times\n x = ampl * np.sin(w*t + phi)\n\n # Define the array of frequencies for which to compute the periodogram\n f = np.linspace(0.01, 10., nout)\n\n # Calculate Lomb-Scargle periodogram\n pgram = lombscargle(t, x, f)\n pgram2 = lombscargle(t, x, f, normalize=True)\n\n # check if normalization works as expected\n assert_allclose(pgram * 2 / np.dot(x, x), pgram2)\n assert_approx_equal(np.max(pgram2), 1.0, significant=2)\n\n def test_wrong_shape(self):\n t = np.linspace(0, 1, 1)\n x = np.linspace(0, 1, 2)\n f = np.linspace(0, 1, 3)\n assert_raises(ValueError, lombscargle, t, x, f)\n\n def test_zero_division(self):\n t = np.zeros(1)\n x = np.zeros(1)\n f = np.zeros(1)\n assert_raises(ZeroDivisionError, lombscargle, t, x, f)\n\n def test_lombscargle_atan_vs_atan2(self):\n # https://github.com/scipy/scipy/issues/3787\n # This raised a ZeroDivisionError.\n t = np.linspace(0, 10, 1000, endpoint=False)\n x = np.sin(4*t)\n f = np.linspace(0, 50, 500, endpoint=False) + 0.1\n lombscargle(t, x, f*2*np.pi)\n\n\nclass TestSTFT:\n def test_input_validation(self):\n assert_raises(ValueError, check_COLA, 'hann', -10, 0)\n assert_raises(ValueError, check_COLA, 'hann', 10, 20)\n assert_raises(ValueError, check_COLA, np.ones((2,2)), 10, 0)\n assert_raises(ValueError, check_COLA, np.ones(20), 10, 0)\n\n assert_raises(ValueError, check_NOLA, 'hann', -10, 0)\n assert_raises(ValueError, check_NOLA, 'hann', 10, 20)\n assert_raises(ValueError, check_NOLA, np.ones((2,2)), 10, 0)\n assert_raises(ValueError, check_NOLA, np.ones(20), 10, 0)\n assert_raises(ValueError, check_NOLA, 'hann', 64, -32)\n\n x = np.zeros(1024)\n z = np.array(stft(x), dtype=object)\n\n assert_raises(ValueError, stft, x, window=np.ones((2,2)))\n assert_raises(ValueError, stft, x, window=np.ones(10), nperseg=256)\n assert_raises(ValueError, stft, x, nperseg=-256)\n assert_raises(ValueError, stft, x, nperseg=256, noverlap=1024)\n assert_raises(ValueError, stft, x, nperseg=256, nfft=8)\n\n assert_raises(ValueError, istft, x) # Not 2d\n assert_raises(ValueError, istft, z, window=np.ones((2,2)))\n assert_raises(ValueError, istft, z, window=np.ones(10), nperseg=256)\n assert_raises(ValueError, istft, z, nperseg=-256)\n assert_raises(ValueError, istft, z, nperseg=256, noverlap=1024)\n assert_raises(ValueError, istft, z, nperseg=256, nfft=8)\n assert_raises(ValueError, istft, z, nperseg=256, noverlap=0,\n window='hann') # Doesn't meet COLA\n assert_raises(ValueError, istft, z, time_axis=0, freq_axis=0)\n\n assert_raises(ValueError, _spectral_helper, x, x, mode='foo')\n assert_raises(ValueError, _spectral_helper, x[:512], x[512:],\n mode='stft')\n assert_raises(ValueError, _spectral_helper, x, x, boundary='foo')\n\n def test_check_COLA(self):\n settings = [\n ('boxcar', 10, 0),\n ('boxcar', 10, 9),\n ('bartlett', 51, 26),\n ('hann', 256, 128),\n ('hann', 256, 192),\n ('blackman', 300, 200),\n (('tukey', 0.5), 256, 64),\n ('hann', 256, 255),\n ]\n\n for setting in settings:\n msg = '{0}, {1}, {2}'.format(*setting)\n assert_equal(True, check_COLA(*setting), err_msg=msg)\n\n def test_check_NOLA(self):\n settings_pass = [\n ('boxcar', 10, 0),\n ('boxcar', 10, 9),\n ('boxcar', 10, 7),\n ('bartlett', 51, 26),\n ('bartlett', 51, 10),\n ('hann', 256, 128),\n ('hann', 256, 192),\n ('hann', 256, 37),\n ('blackman', 300, 200),\n ('blackman', 300, 123),\n (('tukey', 0.5), 256, 64),\n (('tukey', 0.5), 256, 38),\n ('hann', 256, 255),\n ('hann', 256, 39),\n ]\n for setting in settings_pass:\n msg = '{0}, {1}, {2}'.format(*setting)\n assert_equal(True, check_NOLA(*setting), err_msg=msg)\n\n w_fail = np.ones(16)\n w_fail[::2] = 0\n settings_fail = [\n (w_fail, len(w_fail), len(w_fail) // 2),\n ('hann', 64, 0),\n ]\n for setting in settings_fail:\n msg = '{0}, {1}, {2}'.format(*setting)\n assert_equal(False, check_NOLA(*setting), err_msg=msg)\n\n def test_average_all_segments(self):\n np.random.seed(1234)\n x = np.random.randn(1024)\n\n fs = 1.0\n window = 'hann'\n nperseg = 16\n noverlap = 8\n\n # Compare twosided, because onesided welch doubles non-DC terms to\n # account for power at negative frequencies. stft doesn't do this,\n # because it breaks invertibility.\n f, _, Z = stft(x, fs, window, nperseg, noverlap, padded=False,\n return_onesided=False, boundary=None)\n fw, Pw = welch(x, fs, window, nperseg, noverlap, return_onesided=False,\n scaling='spectrum', detrend=False)\n\n assert_allclose(f, fw)\n assert_allclose(np.mean(np.abs(Z)**2, axis=-1), Pw)\n\n def test_permute_axes(self):\n np.random.seed(1234)\n x = np.random.randn(1024)\n\n fs = 1.0\n window = 'hann'\n nperseg = 16\n noverlap = 8\n\n f1, t1, Z1 = stft(x, fs, window, nperseg, noverlap)\n f2, t2, Z2 = stft(x.reshape((-1, 1, 1)), fs, window, nperseg, noverlap,\n axis=0)\n\n t3, x1 = istft(Z1, fs, window, nperseg, noverlap)\n t4, x2 = istft(Z2.T, fs, window, nperseg, noverlap, time_axis=0,\n freq_axis=-1)\n\n assert_allclose(f1, f2)\n assert_allclose(t1, t2)\n assert_allclose(t3, t4)\n assert_allclose(Z1, Z2[:, 0, 0, :])\n assert_allclose(x1, x2[:, 0, 0])\n\n def test_roundtrip_real(self):\n np.random.seed(1234)\n\n settings = [\n ('boxcar', 100, 10, 0), # Test no overlap\n ('boxcar', 100, 10, 9), # Test high overlap\n ('bartlett', 101, 51, 26), # Test odd nperseg\n ('hann', 1024, 256, 128), # Test defaults\n (('tukey', 0.5), 1152, 256, 64), # Test Tukey\n ('hann', 1024, 256, 255), # Test overlapped hann\n ]\n\n for window, N, nperseg, noverlap in settings:\n t = np.arange(N)\n x = 10*np.random.randn(t.size)\n\n _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap,\n window=window, detrend=None, padded=False)\n\n tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap,\n window=window)\n\n msg = '{0}, {1}'.format(window, noverlap)\n assert_allclose(t, tr, err_msg=msg)\n assert_allclose(x, xr, err_msg=msg)\n\n def test_roundtrip_not_nola(self):\n np.random.seed(1234)\n\n w_fail = np.ones(16)\n w_fail[::2] = 0\n settings = [\n (w_fail, 256, len(w_fail), len(w_fail) // 2),\n ('hann', 256, 64, 0),\n ]\n\n for window, N, nperseg, noverlap in settings:\n msg = '{0}, {1}, {2}, {3}'.format(window, N, nperseg, noverlap)\n assert not check_NOLA(window, nperseg, noverlap), msg\n\n t = np.arange(N)\n x = 10 * np.random.randn(t.size)\n\n _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap,\n window=window, detrend=None, padded=True,\n boundary='zeros')\n with pytest.warns(UserWarning, match='NOLA'):\n tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap,\n window=window, boundary=True)\n\n assert np.allclose(t, tr[:len(t)]), msg\n assert not np.allclose(x, xr[:len(x)]), msg\n\n def test_roundtrip_nola_not_cola(self):\n np.random.seed(1234)\n\n settings = [\n ('boxcar', 100, 10, 3), # NOLA True, COLA False\n ('bartlett', 101, 51, 37), # NOLA True, COLA False\n ('hann', 1024, 256, 127), # NOLA True, COLA False\n (('tukey', 0.5), 1152, 256, 14), # NOLA True, COLA False\n ('hann', 1024, 256, 5), # NOLA True, COLA False\n ]\n\n for window, N, nperseg, noverlap in settings:\n msg = '{0}, {1}, {2}'.format(window, nperseg, noverlap)\n assert check_NOLA(window, nperseg, noverlap), msg\n assert not check_COLA(window, nperseg, noverlap), msg\n\n t = np.arange(N)\n x = 10 * np.random.randn(t.size)\n\n _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap,\n window=window, detrend=None, padded=True,\n boundary='zeros')\n\n tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap,\n window=window, boundary=True)\n\n msg = '{0}, {1}'.format(window, noverlap)\n assert_allclose(t, tr[:len(t)], err_msg=msg)\n assert_allclose(x, xr[:len(x)], err_msg=msg)\n\n def test_roundtrip_float32(self):\n np.random.seed(1234)\n\n settings = [('hann', 1024, 256, 128)]\n\n for window, N, nperseg, noverlap in settings:\n t = np.arange(N)\n x = 10*np.random.randn(t.size)\n x = x.astype(np.float32)\n\n _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap,\n window=window, detrend=None, padded=False)\n\n tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap,\n window=window)\n\n msg = '{0}, {1}'.format(window, noverlap)\n assert_allclose(t, t, err_msg=msg)\n assert_allclose(x, xr, err_msg=msg, rtol=1e-4, atol=1e-5)\n assert_(x.dtype == xr.dtype)\n\n def test_roundtrip_complex(self):\n np.random.seed(1234)\n\n settings = [\n ('boxcar', 100, 10, 0), # Test no overlap\n ('boxcar', 100, 10, 9), # Test high overlap\n ('bartlett', 101, 51, 26), # Test odd nperseg\n ('hann', 1024, 256, 128), # Test defaults\n (('tukey', 0.5), 1152, 256, 64), # Test Tukey\n ('hann', 1024, 256, 255), # Test overlapped hann\n ]\n\n for window, N, nperseg, noverlap in settings:\n t = np.arange(N)\n x = 10*np.random.randn(t.size) + 10j*np.random.randn(t.size)\n\n _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap,\n window=window, detrend=None, padded=False,\n return_onesided=False)\n\n tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap,\n window=window, input_onesided=False)\n\n msg = '{0}, {1}, {2}'.format(window, nperseg, noverlap)\n assert_allclose(t, tr, err_msg=msg)\n assert_allclose(x, xr, err_msg=msg)\n\n # Check that asking for onesided switches to twosided\n with suppress_warnings() as sup:\n sup.filter(UserWarning,\n \"Input data is complex, switching to return_onesided=False\")\n _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap,\n window=window, detrend=None, padded=False,\n return_onesided=True)\n\n tr, xr = istft(zz, nperseg=nperseg, noverlap=noverlap,\n window=window, input_onesided=False)\n\n msg = '{0}, {1}, {2}'.format(window, nperseg, noverlap)\n assert_allclose(t, tr, err_msg=msg)\n assert_allclose(x, xr, err_msg=msg)\n\n def test_roundtrip_boundary_extension(self):\n np.random.seed(1234)\n\n # Test against boxcar, since window is all ones, and thus can be fully\n # recovered with no boundary extension\n\n settings = [\n ('boxcar', 100, 10, 0), # Test no overlap\n ('boxcar', 100, 10, 9), # Test high overlap\n ]\n\n for window, N, nperseg, noverlap in settings:\n t = np.arange(N)\n x = 10*np.random.randn(t.size)\n\n _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap,\n window=window, detrend=None, padded=True,\n boundary=None)\n\n _, xr = istft(zz, noverlap=noverlap, window=window, boundary=False)\n\n for boundary in ['even', 'odd', 'constant', 'zeros']:\n _, _, zz_ext = stft(x, nperseg=nperseg, noverlap=noverlap,\n window=window, detrend=None, padded=True,\n boundary=boundary)\n\n _, xr_ext = istft(zz_ext, noverlap=noverlap, window=window,\n boundary=True)\n\n msg = '{0}, {1}, {2}'.format(window, noverlap, boundary)\n assert_allclose(x, xr, err_msg=msg)\n assert_allclose(x, xr_ext, err_msg=msg)\n\n def test_roundtrip_padded_signal(self):\n np.random.seed(1234)\n\n settings = [\n ('boxcar', 101, 10, 0),\n ('hann', 1000, 256, 128),\n ]\n\n for window, N, nperseg, noverlap in settings:\n t = np.arange(N)\n x = 10*np.random.randn(t.size)\n\n _, _, zz = stft(x, nperseg=nperseg, noverlap=noverlap,\n window=window, detrend=None, padded=True)\n\n tr, xr = istft(zz, noverlap=noverlap, window=window)\n\n msg = '{0}, {1}'.format(window, noverlap)\n # Account for possible zero-padding at the end\n assert_allclose(t, tr[:t.size], err_msg=msg)\n assert_allclose(x, xr[:x.size], err_msg=msg)\n\n def test_roundtrip_padded_FFT(self):\n np.random.seed(1234)\n\n settings = [\n ('hann', 1024, 256, 128, 512),\n ('hann', 1024, 256, 128, 501),\n ('boxcar', 100, 10, 0, 33),\n (('tukey', 0.5), 1152, 256, 64, 1024),\n ]\n\n for window, N, nperseg, noverlap, nfft in settings:\n t = np.arange(N)\n x = 10*np.random.randn(t.size)\n xc = x*np.exp(1j*np.pi/4)\n\n # real signal\n _, _, z = stft(x, nperseg=nperseg, noverlap=noverlap, nfft=nfft,\n window=window, detrend=None, padded=True)\n\n # complex signal\n _, _, zc = stft(xc, nperseg=nperseg, noverlap=noverlap, nfft=nfft,\n window=window, detrend=None, padded=True,\n return_onesided=False)\n\n tr, xr = istft(z, nperseg=nperseg, noverlap=noverlap, nfft=nfft,\n window=window)\n\n tr, xcr = istft(zc, nperseg=nperseg, noverlap=noverlap, nfft=nfft,\n window=window, input_onesided=False)\n\n msg = '{0}, {1}'.format(window, noverlap)\n assert_allclose(t, tr, err_msg=msg)\n assert_allclose(x, xr, err_msg=msg)\n assert_allclose(xc, xcr, err_msg=msg)\n\n def test_axis_rolling(self):\n np.random.seed(1234)\n\n x_flat = np.random.randn(1024)\n _, _, z_flat = stft(x_flat)\n\n for a in range(3):\n newshape = [1,]*3\n newshape[a] = -1\n x = x_flat.reshape(newshape)\n\n _, _, z_plus = stft(x, axis=a) # Positive axis index\n _, _, z_minus = stft(x, axis=a-x.ndim) # Negative axis index\n\n assert_equal(z_flat, z_plus.squeeze(), err_msg=a)\n assert_equal(z_flat, z_minus.squeeze(), err_msg=a-x.ndim)\n\n # z_flat has shape [n_freq, n_time]\n\n # Test vs. transpose\n _, x_transpose_m = istft(z_flat.T, time_axis=-2, freq_axis=-1)\n _, x_transpose_p = istft(z_flat.T, time_axis=0, freq_axis=1)\n\n assert_allclose(x_flat, x_transpose_m, err_msg='istft transpose minus')\n assert_allclose(x_flat, x_transpose_p, err_msg='istft transpose plus')\n",
"import operator\nfrom numpy.fft.helper import fftshift, ifftshift, fftfreq\nimport scipy.fft._pocketfft.helper as _helper\nimport numpy as np\n__all__ = ['fftshift', 'ifftshift', 'fftfreq', 'rfftfreq', 'next_fast_len']\n\n\ndef rfftfreq(n, d=1.0):\n \"\"\"DFT sample frequencies (for usage with rfft, irfft).\n\n The returned float array contains the frequency bins in\n cycles/unit (with zero at the start) given a window length `n` and a\n sample spacing `d`::\n\n f = [0,1,1,2,2,...,n/2-1,n/2-1,n/2]/(d*n) if n is even\n f = [0,1,1,2,2,...,n/2-1,n/2-1,n/2,n/2]/(d*n) if n is odd\n\n Parameters\n ----------\n n : int\n Window length.\n d : scalar, optional\n Sample spacing. Default is 1.\n\n Returns\n -------\n out : ndarray\n The array of length `n`, containing the sample frequencies.\n\n Examples\n --------\n >>> from scipy import fftpack\n >>> sig = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float)\n >>> sig_fft = fftpack.rfft(sig)\n >>> n = sig_fft.size\n >>> timestep = 0.1\n >>> freq = fftpack.rfftfreq(n, d=timestep)\n >>> freq\n array([ 0. , 1.25, 1.25, 2.5 , 2.5 , 3.75, 3.75, 5. ])\n\n \"\"\"\n n = operator.index(n)\n if n < 0:\n raise ValueError(\"n = %s is not valid. \"\n \"n must be a nonnegative integer.\" % n)\n\n return (np.arange(1, n + 1, dtype=int) // 2) / float(n * d)\n\n\ndef next_fast_len(target):\n \"\"\"\n Find the next fast size of input data to `fft`, for zero-padding, etc.\n\n SciPy's FFTPACK has efficient functions for radix {2, 3, 4, 5}, so this\n returns the next composite of the prime factors 2, 3, and 5 which is\n greater than or equal to `target`. (These are also known as 5-smooth\n numbers, regular numbers, or Hamming numbers.)\n\n Parameters\n ----------\n target : int\n Length to start searching from. Must be a positive integer.\n\n Returns\n -------\n out : int\n The first 5-smooth number greater than or equal to `target`.\n\n Notes\n -----\n .. versionadded:: 0.18.0\n\n Examples\n --------\n On a particular machine, an FFT of prime length takes 133 ms:\n\n >>> from scipy import fftpack\n >>> min_len = 10007 # prime length is worst case for speed\n >>> a = np.random.randn(min_len)\n >>> b = fftpack.fft(a)\n\n Zero-padding to the next 5-smooth length reduces computation time to\n 211 us, a speedup of 630 times:\n\n >>> fftpack.helper.next_fast_len(min_len)\n 10125\n >>> b = fftpack.fft(a, 10125)\n\n Rounding up to the next power of 2 is not optimal, taking 367 us to\n compute, 1.7 times as long as the 5-smooth size:\n\n >>> b = fftpack.fft(a, 16384)\n\n \"\"\"\n # Real transforms use regular sizes so this is backwards compatible\n return _helper.good_size(target, True)\n\n\ndef _good_shape(x, shape, axes):\n \"\"\"Ensure that shape argument is valid for scipy.fftpack\n\n scipy.fftpack does not support len(shape) < x.ndim when axes is not given.\n \"\"\"\n if shape is not None and axes is None:\n shape = _helper._iterable_of_int(shape, 'shape')\n if len(shape) != np.ndim(x):\n raise ValueError(\"when given, axes and shape arguments\"\n \" have to be of the same length\")\n return shape\n"
] |
[
[
"numpy.array",
"numpy.empty",
"numpy.asarray",
"numpy.zeros",
"numpy.diff",
"numpy.any",
"numpy.ravel",
"numpy.arange",
"numpy.deprecate",
"numpy.dtype"
],
[
"scipy.io.loadmat",
"numpy.savez"
],
[
"numpy.asarray",
"numpy.asarray_chkfinite",
"numpy.atleast_2d"
],
[
"numpy.copy",
"numpy.dot",
"numpy.asarray"
],
[
"numpy.testing.assert_allclose",
"scipy.signal.coherence",
"numpy.dot",
"numpy.random.rand",
"numpy.rollaxis",
"numpy.copy",
"numpy.exp",
"numpy.mean",
"numpy.testing.suppress_warnings",
"scipy.signal.stft",
"numpy.max",
"numpy.full",
"numpy.sin",
"numpy.zeros_like",
"numpy.empty",
"scipy.signal.istft",
"scipy.signal.csd",
"numpy.arange",
"numpy.trapz",
"numpy.sqrt",
"numpy.argmax",
"scipy.fft.fftfreq",
"numpy.array",
"scipy.signal.check_COLA",
"numpy.testing.assert_array_almost_equal_nulp",
"numpy.zeros",
"numpy.random.randn",
"numpy.testing.assert_",
"scipy.signal.welch",
"scipy.signal.periodogram",
"scipy.signal.check_NOLA",
"scipy.signal.spectrogram",
"numpy.random.seed",
"numpy.testing.assert_array_equal",
"numpy.ones",
"scipy.signal.detrend",
"scipy.signal.get_window",
"numpy.abs",
"numpy.linspace",
"scipy.signal.lombscargle"
],
[
"numpy.ndim",
"scipy.fft._pocketfft.helper.good_size",
"numpy.arange",
"scipy.fft._pocketfft.helper._iterable_of_int"
]
] |
dimitri-justeau/rasterio
|
[
"dda4b823473ba3cb27038e00ee7aa82f867f2a55"
] |
[
"tests/test_blocks.py"
] |
[
"from functools import partial\nimport logging\nimport os.path\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\nimport unittest\n\nimport numpy as np\nimport pytest\n\nimport rasterio\nfrom rasterio import windows\n\n\nlogging.basicConfig(stream=sys.stderr, level=logging.DEBUG)\n\nclass WindowTest(unittest.TestCase):\n def test_window_shape_errors(self):\n # Positive height and width are needed when stop is None.\n self.assertRaises(\n ValueError,\n rasterio.window_shape,\n (((10, 20),(10, None)),) )\n self.assertRaises(\n ValueError,\n rasterio.window_shape,\n (((None, 10),(10, 20)),) )\n def test_window_shape_None_start(self):\n self.assertEqual(\n rasterio.window_shape(((None,4),(None,102))),\n (4, 102))\n def test_window_shape_None_stop(self):\n self.assertEqual(\n rasterio.window_shape(((10, None),(10, None)), 100, 90),\n (90, 80))\n def test_window_shape_positive(self):\n self.assertEqual(\n rasterio.window_shape(((0,4),(1,102))),\n (4, 101))\n def test_window_shape_negative(self):\n self.assertEqual(\n rasterio.window_shape(((-10, None),(-10, None)), 100, 90),\n (10, 10))\n self.assertEqual(\n rasterio.window_shape(((~0, None),(~0, None)), 100, 90),\n (1, 1))\n self.assertEqual(\n rasterio.window_shape(((None, ~0),(None, ~0)), 100, 90),\n (99, 89))\n def test_eval(self):\n self.assertEqual(\n rasterio.eval_window(((-10, None), (-10, None)), 100, 90),\n ((90, 100), (80, 90)))\n self.assertEqual(\n rasterio.eval_window(((None, -10), (None, -10)), 100, 90),\n ((0, 90), (0, 80)))\n\ndef test_window_index():\n idx = rasterio.window_index(((0,4),(1,12)))\n assert len(idx) == 2\n r, c = idx\n assert r.start == 0\n assert r.stop == 4\n assert c.start == 1\n assert c.stop == 12\n arr = np.ones((20,20))\n assert arr[idx].shape == (4, 11)\n\nclass RasterBlocksTest(unittest.TestCase):\n def test_blocks(self):\n with rasterio.open('tests/data/RGB.byte.tif') as s:\n self.assertEqual(len(s.block_shapes), 3)\n self.assertEqual(s.block_shapes, ((3, 791), (3, 791), (3, 791)))\n windows = s.block_windows(1)\n (j,i), first = next(windows)\n self.assertEqual((j,i), (0, 0))\n self.assertEqual(first, ((0, 3), (0, 791)))\n windows = s.block_windows()\n (j,i), first = next(windows)\n self.assertEqual((j,i), (0, 0))\n self.assertEqual(first, ((0, 3), (0, 791)))\n (j, i), second = next(windows)\n self.assertEqual((j,i), (1, 0))\n self.assertEqual(second, ((3, 6), (0, 791)))\n (j, i), last = list(windows)[~0]\n self.assertEqual((j,i), (239, 0))\n self.assertEqual(last, ((717, 718), (0, 791)))\n def test_block_coverage(self):\n with rasterio.open('tests/data/RGB.byte.tif') as s:\n self.assertEqual(\n s.width*s.height,\n sum((w[0][1]-w[0][0])*(w[1][1]-w[1][0])\n for ji, w in s.block_windows(1)))\n\nclass WindowReadTest(unittest.TestCase):\n def test_read_window(self):\n with rasterio.open('tests/data/RGB.byte.tif') as s:\n windows = s.block_windows(1)\n ji, first_window = next(windows)\n first_block = s.read(1, window=first_window)\n self.assertEqual(first_block.dtype, rasterio.ubyte)\n self.assertEqual(\n first_block.shape,\n rasterio.window_shape(first_window))\n\nclass WindowWriteTest(unittest.TestCase):\n def setUp(self):\n self.tempdir = tempfile.mkdtemp()\n def tearDown(self):\n shutil.rmtree(self.tempdir)\n def test_write_window(self):\n name = os.path.join(self.tempdir, \"test_write_window.tif\")\n a = np.ones((50, 50), dtype=rasterio.ubyte) * 127\n with rasterio.open(\n name, 'w',\n driver='GTiff', width=100, height=100, count=1,\n dtype=a.dtype) as s:\n s.write(a, indexes=1, window=((30, 80), (10, 60)))\n # subprocess.call([\"open\", name])\n info = subprocess.check_output([\"gdalinfo\", \"-stats\", name])\n self.assert_(\n \"Minimum=0.000, Maximum=127.000, \"\n \"Mean=31.750, StdDev=54.993\" in info.decode('utf-8'),\n info)\n\n\ndef test_block_windows_unfiltered(path_rgb_byte_tif):\n \"\"\"Get all block windows\"\"\"\n with rasterio.open(path_rgb_byte_tif) as src:\n assert len(list(src.block_windows())) == 240\n\n\ndef test_block_windows_filtered_all(path_rgb_byte_tif):\n \"\"\"Get all block windows using filter\"\"\"\n with rasterio.open(path_rgb_byte_tif) as src:\n w, s, e, n = src.bounds\n focus_window = src.window(w, s, e, n)\n filter_func = partial(windows.intersect, focus_window)\n itr = ((ij, win) for ij, win in src.block_windows() if filter_func(win))\n assert len(list(itr)) == 240\n\n\ndef test_block_windows_filtered_one(path_rgb_byte_tif):\n \"\"\"Get the first block windows using filter\"\"\"\n with rasterio.open(path_rgb_byte_tif) as src:\n w, s, e, n = src.bounds\n focus_window = src.window(w, n - 1.0, w + 1.0, n)\n filter_func = partial(windows.intersect, focus_window)\n itr = ((ij, win) for ij, win in src.block_windows() if filter_func(win))\n assert next(itr) == ((0, 0), ((0, 3), (0, 791)))\n with pytest.raises(StopIteration):\n next(itr)\n\n\ndef test_block_windows_filtered_none(path_rgb_byte_tif):\n \"\"\"Get no block windows using filter\"\"\"\n with rasterio.open(path_rgb_byte_tif) as src:\n w, s, e, n = src.bounds\n focus_window = src.window(w - 100.0, n + 100.0, w - 1.0, n + 1.0)\n filter_func = partial(windows.intersect, focus_window)\n itr = ((ij, win) for ij, win in src.block_windows() if filter_func(win))\n with pytest.raises(StopIteration):\n next(itr)\n"
] |
[
[
"numpy.ones"
]
] |
Redhorde/biolabYoloTEST
|
[
"e74cdcffed9c2105f882bbd541b3746ce1b638be"
] |
[
"YoloFunctionality.py"
] |
[
"import cv2\r\nimport numpy as np\r\nimport os\r\n\r\n\r\ndef yolo_cut(image):\r\n net = cv2.dnn.readNet(\"hand-obj_final.weights\", \"hand-obj.cfg\")\r\n classes = []\r\n with open(\"obj.names\", \"r\") as f:\r\n classes = [line.strip() for line in f.readlines()]\r\n\r\n layer_names = net.getLayerNames()\r\n outputlayers = [layer_names[i[0]-1]for i in net.getUnconnectedOutLayers()]\r\n\r\n # img = cv2.imread(\"hand.JPG\")\r\n img = image\r\n height, width, channels = img.shape\r\n # cv2.imshow(\"Hand\", img)\r\n # cv2.waitKey(0)\r\n # cv2.destroyAllWindows()\r\n blob = cv2.dnn.blobFromImage(img, 0.00392, (416, 416), (0, 0, 0), True, crop=False)\r\n net.setInput(blob)\r\n outputs = net.forward(outputlayers)\r\n # print(outputs)\r\n\r\n class_ids = []\r\n confidences = []\r\n boxes = []\r\n for out in outputs:\r\n for detection in out:\r\n scores = detection[5:]\r\n class_id = np.argmax(scores)\r\n confidence = scores[class_id]\r\n if confidence > 0:\r\n center_x = int(detection[0]*width)\r\n center_y = int(detection[1]*height)\r\n w = int(detection[2]*width)\r\n h = int(detection[3]*height)\r\n\r\n x = int(center_x - w/2)\r\n y = int(center_y - h/2)\r\n # cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 1)\r\n boxes.append([x, y, w, h])\r\n confidences.append(float(confidence))\r\n class_ids.append(class_id)\r\n indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.4, 0.6)\r\n font = cv2.FONT_HERSHEY_PLAIN\r\n # print(\"Hand bounding box:\")\r\n # print(boxes)\r\n try:\r\n x = boxes[0][0]\r\n y = boxes[0][1]\r\n w = boxes[0][2]\r\n h = boxes[0][3]\r\n # p rint(str(x)+\" \"+str(y)+\" \"+str(w)+\" \"+str(h))\r\n expand = 20 # expand mask by number of pixels\r\n img_crop = img[y-expand:y+h+expand, x-expand:x+w+expand]\r\n # cv2.imshow(\"Hand_cropped\", img_crop)\r\n # cv2.waitKey(0)\r\n # cv2.destroyAllWindows()\r\n # cv2.imwrite(\"hand_crop.JPG\", img_crop)\r\n except:\r\n print(\"No hand found\")\r\n img_crop = img\r\n # cv2.imwrite(\"hand_crop.JPG\", img_crop)\r\n return img_crop\r\n\r\n\r\ndef vgg_detect():\r\n pass\r\n\r\n\r\ndef main():\r\n for folder in os.scandir(\"Dataset\"):\r\n for file in os.listdir(folder.path):\r\n pass\r\n\r\n\r\nif __name__ == '__main__':\r\n main()"
] |
[
[
"numpy.argmax"
]
] |
putcn/moviepy
|
[
"48ae70c5f46dab61bafe8f462faa19d844ad60d3"
] |
[
"moviepy/video/compositing/concatenate.py"
] |
[
"import numpy as np\n\nfrom moviepy.audio.AudioClip import CompositeAudioClip\nfrom moviepy.tools import deprecated_version_of\nfrom moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip\nfrom moviepy.video.compositing.on_color import on_color\nfrom moviepy.video.VideoClip import ColorClip, VideoClip\n\ntry: # Python 2\n reduce\nexcept NameError: # Python 3\n from functools import reduce\n\n\n\ndef concatenate_videoclips(clips, method=\"chain\", transition=None,\n bg_color=None, ismask=False, padding = 0):\n \"\"\" Concatenates several video clips\n\n Returns a video clip made by clip by concatenating several video clips.\n (Concatenated means that they will be played one after another).\n\n There are two methods:\n\n - method=\"chain\": will produce a clip that simply outputs\n the frames of the succesive clips, without any correction if they are\n not of the same size of anything. If none of the clips have masks the\n resulting clip has no mask, else the mask is a concatenation of masks\n (using completely opaque for clips that don't have masks, obviously).\n If you have clips of different size and you want to write directly the\n result of the concatenation to a file, use the method \"compose\" instead.\n\n - method=\"compose\", if the clips do not have the same\n resolution, the final resolution will be such that no clip has\n to be resized.\n As a consequence the final clip has the height of the highest\n clip and the width of the widest clip of the list. All the\n clips with smaller dimensions will appear centered. The border\n will be transparent if mask=True, else it will be of the\n color specified by ``bg_color``.\n\n The clip with the highest FPS will be the FPS of the result clip.\n\n Parameters\n -----------\n clips\n A list of video clips which must all have their ``duration``\n attributes set.\n method\n \"chain\" or \"compose\": see above.\n transition\n A clip that will be played between each two clips of the list.\n\n bg_color\n Only for method='compose'. Color of the background.\n Set to None for a transparent clip\n\n padding\n Only for method='compose'. Duration during two consecutive clips.\n Note that for negative padding, a clip will partly play at the same\n time as the clip it follows (negative padding is cool for clips who fade\n in on one another). A non-null padding automatically sets the method to\n `compose`.\n\n \"\"\"\n\n if transition is not None:\n l = [[v, transition] for v in clips[:-1]]\n clips = reduce(lambda x, y: x + y, l) + [clips[-1]]\n transition = None\n\n tt = np.cumsum([0] + [c.duration for c in clips])\n\n sizes = [v.size for v in clips]\n\n w = max([r[0] for r in sizes])\n h = max([r[1] for r in sizes])\n\n tt = np.maximum(0, tt + padding*np.arange(len(tt)))\n\n if method == \"chain\":\n def make_frame(t):\n i = max([i for i, e in enumerate(tt) if e <= t])\n return clips[i].get_frame(t - tt[i])\n\n def get_mask(c):\n mask = c.mask or ColorClip([1, 1], color=1, ismask=True)\n if mask.duration is None:\n mask.duration = c.duration\n return mask\n\n result = VideoClip(ismask = ismask, make_frame = make_frame)\n if any([c.mask is not None for c in clips]):\n masks = [get_mask(c) for c in clips]\n result.mask = concatenate_videoclips(masks, method=\"chain\",\n ismask=True)\n result.clips = clips\n elif method == \"compose\":\n result = CompositeVideoClip( [c.set_start(t).set_position('center')\n for (c, t) in zip(clips, tt)],\n size = (w, h), bg_color=bg_color, ismask=ismask)\n else:\n raise Exception(\"Moviepy Error: The 'method' argument of \"\n \"concatenate_videoclips must be 'chain' or 'compose'\")\n\n result.tt = tt\n\n result.start_times = tt[:-1]\n result.start, result.duration, result.end = 0, tt[-1] , tt[-1]\n\n audio_t = [(c.audio,t) for c,t in zip(clips,tt) if c.audio is not None]\n if len(audio_t)>0:\n result.audio = CompositeAudioClip([a.set_start(t)\n for a,t in audio_t])\n\n fpss = [c.fps for c in clips if hasattr(c,'fps') and c.fps is not None]\n if len(fpss) == 0:\n result.fps = None\n else:\n result.fps = max(fpss)\n\n return result\n\n\nconcatenate = deprecated_version_of(concatenate_videoclips,\n oldname=\"concatenate\")\n"
] |
[
[
"numpy.cumsum"
]
] |
raphaelavalos/multiagent-particle-envs
|
[
"d589429084031a58352d214b6147339a21f41cd5"
] |
[
"multiagent/rendering.py"
] |
[
"\"\"\"\n2D rendering framework\n\"\"\"\nfrom __future__ import division\nimport os\nimport six\nimport sys\n\nif \"Apple\" in sys.version:\n if 'DYLD_FALLBACK_LIBRARY_PATH' in os.environ:\n os.environ['DYLD_FALLBACK_LIBRARY_PATH'] += ':/usr/lib'\n # (JDS 2016/04/15): avoid bug on Anaconda 2.3.0 / Yosemite\n\n\nfrom gym import error\n\ntry:\n import pyglet\nexcept ImportError as e:\n raise ImportError(\"HINT: you can install pyglet directly via 'pip install pyglet'. But if you really just want to install all Gym dependencies and not have to think about it, 'pip install -e .[all]' or 'pip install gym[all]' will do it.\")\n\ntry:\n from pyglet.gl import *\nexcept ImportError as e:\n raise ImportError(\"Error occured while running `from pyglet.gl import *`\",suffix=\"HINT: make sure you have OpenGL install. On Ubuntu, you can run 'apt-get install python-opengl'. If you're running on a server, you may need a virtual frame buffer; something like this should work: 'xvfb-run -s \\\"-screen 0 1400x900x24\\\" python <your_script.py>'\")\n\nimport math\nimport numpy as np\n\nRAD2DEG = 57.29577951308232\n\ndef get_display(spec):\n \"\"\"Convert a display specification (such as :0) into an actual Display\n object.\n Pyglet only supports multiple Displays on Linux.\n \"\"\"\n if spec is None:\n return pyglet.canvas.get_display()\n # returns already available pyglet_display,\n # if there is no pyglet display available then it creates one\n elif isinstance(spec, str):\n return pyglet.canvas.Display(spec)\n else:\n raise error.Error('Invalid display specification: {}. (Must be a string like :0 or None.)'.format(spec))\n\nclass Viewer(object):\n def __init__(self, width, height, display=None):\n display = get_display(display)\n\n self.width = width\n self.height = height\n\n self.window = pyglet.window.Window(width=width, height=height, display=display)\n self.window.on_close = self.window_closed_by_user\n self.geoms = []\n self.onetime_geoms = []\n self.transform = Transform()\n\n glEnable(GL_BLEND)\n # glEnable(GL_MULTISAMPLE)\n glEnable(GL_LINE_SMOOTH)\n # glHint(GL_LINE_SMOOTH_HINT, GL_DONT_CARE)\n glHint(GL_LINE_SMOOTH_HINT, GL_NICEST)\n glLineWidth(2.0)\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\n\n def close(self):\n self.window.close()\n\n def window_closed_by_user(self):\n self.close()\n\n def set_bounds(self, left, right, bottom, top):\n assert right > left and top > bottom\n scalex = self.width/(right-left)\n scaley = self.height/(top-bottom)\n self.transform = Transform(\n translation=(-left*scalex, -bottom*scaley),\n scale=(scalex, scaley))\n\n def add_geom(self, geom):\n self.geoms.append(geom)\n\n def add_onetime(self, geom):\n self.onetime_geoms.append(geom)\n\n def render(self, return_rgb_array=False):\n glClearColor(1,1,1,1)\n self.window.clear()\n self.window.switch_to()\n self.window.dispatch_events()\n self.transform.enable()\n for geom in self.geoms:\n geom.render()\n for geom in self.onetime_geoms:\n geom.render()\n self.transform.disable()\n arr = None\n if return_rgb_array:\n buffer = pyglet.image.get_buffer_manager().get_color_buffer()\n image_data = buffer.get_image_data()\n arr = np.fromstring(image_data.get_data(), dtype=np.uint8, sep='')\n # In https://github.com/openai/gym-http-api/issues/2, we\n # discovered that someone using Xmonad on Arch was having\n # a window of size 598 x 398, though a 600 x 400 window\n # was requested. (Guess Xmonad was preserving a pixel for\n # the boundary.) So we use the buffer height/width rather\n # than the requested one.\n arr = arr.reshape(buffer.height, buffer.width, 4)\n arr = arr[::-1,:,0:3]\n self.window.flip()\n self.onetime_geoms = []\n return arr\n\n # Convenience\n def draw_circle(self, radius=10, res=30, filled=True, **attrs):\n geom = make_circle(radius=radius, res=res, filled=filled)\n _add_attrs(geom, attrs)\n self.add_onetime(geom)\n return geom\n\n def draw_polygon(self, v, filled=True, **attrs):\n geom = make_polygon(v=v, filled=filled)\n _add_attrs(geom, attrs)\n self.add_onetime(geom)\n return geom\n\n def draw_polyline(self, v, **attrs):\n geom = make_polyline(v=v)\n _add_attrs(geom, attrs)\n self.add_onetime(geom)\n return geom\n\n def draw_line(self, start, end, **attrs):\n geom = Line(start, end)\n _add_attrs(geom, attrs)\n self.add_onetime(geom)\n return geom\n\n def get_array(self):\n self.window.flip()\n image_data = pyglet.image.get_buffer_manager().get_color_buffer().get_image_data()\n self.window.flip()\n arr = np.fromstring(image_data.data, dtype=np.uint8, sep='')\n arr = arr.reshape(self.height, self.width, 4)\n return arr[::-1,:,0:3]\n\ndef _add_attrs(geom, attrs):\n if \"color\" in attrs:\n geom.set_color(*attrs[\"color\"])\n if \"linewidth\" in attrs:\n geom.set_linewidth(attrs[\"linewidth\"])\n\nclass Geom(object):\n def __init__(self):\n self._color=Color((0, 0, 0, 1.0))\n self.attrs = [self._color]\n def render(self):\n for attr in reversed(self.attrs):\n attr.enable()\n self.render1()\n for attr in self.attrs:\n attr.disable()\n def render1(self):\n raise NotImplementedError\n def add_attr(self, attr):\n self.attrs.append(attr)\n def set_color(self, r, g, b, alpha=1):\n self._color.vec4 = (r, g, b, alpha)\n\nclass Attr(object):\n def enable(self):\n raise NotImplementedError\n def disable(self):\n pass\n\nclass Transform(Attr):\n def __init__(self, translation=(0.0, 0.0), rotation=0.0, scale=(1,1)):\n self.set_translation(*translation)\n self.set_rotation(rotation)\n self.set_scale(*scale)\n def enable(self):\n glPushMatrix()\n glTranslatef(self.translation[0], self.translation[1], 0) # translate to GL loc ppint\n glRotatef(RAD2DEG * self.rotation, 0, 0, 1.0)\n glScalef(self.scale[0], self.scale[1], 1)\n def disable(self):\n glPopMatrix()\n def set_translation(self, newx, newy):\n self.translation = (float(newx), float(newy))\n def set_rotation(self, new):\n self.rotation = float(new)\n def set_scale(self, newx, newy):\n self.scale = (float(newx), float(newy))\n\nclass Color(Attr):\n def __init__(self, vec4):\n self.vec4 = vec4\n def enable(self):\n glColor4f(*self.vec4)\n\nclass LineStyle(Attr):\n def __init__(self, style):\n self.style = style\n def enable(self):\n glEnable(GL_LINE_STIPPLE)\n glLineStipple(1, self.style)\n def disable(self):\n glDisable(GL_LINE_STIPPLE)\n\nclass LineWidth(Attr):\n def __init__(self, stroke):\n self.stroke = stroke\n def enable(self):\n glLineWidth(self.stroke)\n\nclass Point(Geom):\n def __init__(self):\n Geom.__init__(self)\n def render1(self):\n glBegin(GL_POINTS) # draw point\n glVertex3f(0.0, 0.0, 0.0)\n glEnd()\n\nclass FilledPolygon(Geom):\n def __init__(self, v):\n Geom.__init__(self)\n self.v = v\n def render1(self):\n if len(self.v) == 4 : glBegin(GL_QUADS)\n elif len(self.v) > 4 : glBegin(GL_POLYGON)\n else: glBegin(GL_TRIANGLES)\n for p in self.v:\n glVertex3f(p[0], p[1],0) # draw each vertex\n glEnd()\n\n color = (self._color.vec4[0] * 0.5, self._color.vec4[1] * 0.5, self._color.vec4[2] * 0.5, self._color.vec4[3] * 0.5)\n glColor4f(*color)\n glBegin(GL_LINE_LOOP)\n for p in self.v:\n glVertex3f(p[0], p[1],0) # draw each vertex\n glEnd()\n\ndef make_circle(radius=10, res=30, filled=True):\n points = []\n for i in range(res):\n ang = 2*math.pi*i / res\n points.append((math.cos(ang)*radius, math.sin(ang)*radius))\n if filled:\n return FilledPolygon(points)\n else:\n return PolyLine(points, True)\n\ndef make_polygon(v, filled=True):\n if filled: return FilledPolygon(v)\n else: return PolyLine(v, True)\n\ndef make_polyline(v):\n return PolyLine(v, False)\n\ndef make_capsule(length, width):\n l, r, t, b = 0, length, width/2, -width/2\n box = make_polygon([(l,b), (l,t), (r,t), (r,b)])\n circ0 = make_circle(width/2)\n circ1 = make_circle(width/2)\n circ1.add_attr(Transform(translation=(length, 0)))\n geom = Compound([box, circ0, circ1])\n return geom\n\nclass Compound(Geom):\n def __init__(self, gs):\n Geom.__init__(self)\n self.gs = gs\n for g in self.gs:\n g.attrs = [a for a in g.attrs if not isinstance(a, Color)]\n def render1(self):\n for g in self.gs:\n g.render()\n\nclass PolyLine(Geom):\n def __init__(self, v, close):\n Geom.__init__(self)\n self.v = v\n self.close = close\n self.linewidth = LineWidth(1)\n self.add_attr(self.linewidth)\n def render1(self):\n glBegin(GL_LINE_LOOP if self.close else GL_LINE_STRIP)\n for p in self.v:\n glVertex3f(p[0], p[1],0) # draw each vertex\n glEnd()\n def set_linewidth(self, x):\n self.linewidth.stroke = x\n\nclass Line(Geom):\n def __init__(self, start=(0.0, 0.0), end=(0.0, 0.0)):\n Geom.__init__(self)\n self.start = start\n self.end = end\n self.linewidth = LineWidth(1)\n self.add_attr(self.linewidth)\n\n def render1(self):\n glBegin(GL_LINES)\n glVertex2f(*self.start)\n glVertex2f(*self.end)\n glEnd()\n\nclass Image(Geom):\n def __init__(self, fname, width, height):\n Geom.__init__(self)\n self.width = width\n self.height = height\n img = pyglet.image.load(fname)\n self.img = img\n self.flip = False\n def render1(self):\n self.img.blit(-self.width/2, -self.height/2, width=self.width, height=self.height)\n\n# ================================================================\n\nclass SimpleImageViewer(object):\n def __init__(self, display=None):\n self.window = None\n self.isopen = False\n self.display = display\n def imshow(self, arr):\n if self.window is None:\n height, width, channels = arr.shape\n self.window = pyglet.window.Window(width=width, height=height, display=self.display)\n self.width = width\n self.height = height\n self.isopen = True\n assert arr.shape == (self.height, self.width, 3), \"You passed in an image with the wrong number shape\"\n image = pyglet.image.ImageData(self.width, self.height, 'RGB', arr.tobytes(), pitch=self.width * -3)\n self.window.clear()\n self.window.switch_to()\n self.window.dispatch_events()\n image.blit(0,0)\n self.window.flip()\n def close(self):\n if self.isopen:\n self.window.close()\n self.isopen = False\n def __del__(self):\n self.close()"
] |
[
[
"numpy.fromstring"
]
] |
bsm8734/formula-image-latex-recognition
|
[
"86d5070e8f907571a47967d64facaee246d92a35"
] |
[
"checkpoint.py"
] |
[
"import os\nimport torch\nfrom tensorboardX import SummaryWriter\n\nuse_cuda = torch.cuda.is_available()\n\ndefault_checkpoint = {\n \"epoch\": 0,\n \"train_losses\": [],\n \"train_symbol_accuracy\": [],\n \"train_sentence_accuracy\": [],\n \"train_wer\": [],\n \"train_score\": [],\n \"validation_losses\": [],\n \"validation_symbol_accuracy\": [],\n \"validation_sentence_accuracy\": [],\n \"validation_wer\": [],\n \"validation_score\": [],\n \"lr\": [],\n \"grad_norm\": [],\n \"model\": {},\n \"configs\":{},\n \"token_to_id\":{},\n \"id_to_token\":{},\n}\n\n\ndef save_checkpoint(checkpoint, dir=\"./checkpoints\", prefix=\"\"):\n \"\"\" Saving check point\n\n Args:\n checkpoint(dict) : Checkpoint to save\n dir(str) : Path to save the checkpoint\n prefix(str) : Path of location of dir \n \"\"\"\n # Padded to 4 digits because of lexical sorting of numbers.\n # e.g. 0009.pth\n filename = \"{num:0>4}.pth\".format(num=checkpoint[\"epoch\"])\n if not os.path.exists(os.path.join(prefix, dir)):\n os.makedirs(os.path.join(prefix, dir))\n torch.save(checkpoint, os.path.join(prefix, dir, filename))\n\n\ndef load_checkpoint(path, cuda=use_cuda):\n \"\"\" Load check point\n\n Args:\n path(str) : Path checkpoint located\n cuda : Whether use cuda or not [Default: use_cuda]\n Returns\n Loaded checkpoints\n \"\"\"\n if cuda:\n return torch.load(path)\n else:\n # Load GPU model on CPU\n return torch.load(path, map_location=lambda storage, loc: storage)\n\n\ndef init_tensorboard(name=\"\", base_dir=\"./tensorboard\"):\n \"\"\"Init tensorboard\n Args:\n name(str) : name of tensorboard\n base_dir(str): path of tesnorboard\n \"\"\"\n return SummaryWriter(os.path.join(name, base_dir))\n\n\ndef write_tensorboard(\n writer,\n epoch,\n grad_norm,\n train_loss,\n train_symbol_accuracy,\n train_sentence_accuracy,\n train_wer,\n train_score,\n validation_loss,\n validation_symbol_accuracy,\n validation_sentence_accuracy,\n validation_wer,\n validation_score,\n model,\n):\n writer.add_scalar(\"train_loss\", train_loss, epoch)\n writer.add_scalar(\"train_symbol_accuracy\", train_symbol_accuracy, epoch)\n writer.add_scalar(\"train_sentence_accuracy\",train_sentence_accuracy,epoch)\n writer.add_scalar(\"train_wer\", train_wer, epoch)\n writer.add_scalar(\"train_score\", train_score, epoch)\n writer.add_scalar(\"validation_loss\", validation_loss, epoch)\n writer.add_scalar(\"validation_symbol_accuracy\", validation_symbol_accuracy, epoch)\n writer.add_scalar(\"validation_sentence_accuracy\",validation_sentence_accuracy,epoch)\n writer.add_scalar(\"validation_wer\",validation_wer,epoch)\n writer.add_scalar(\"validation_score\", validation_score, epoch)\n writer.add_scalar(\"grad_norm\", grad_norm, epoch)\n\n for name, param in model.encoder.named_parameters():\n writer.add_histogram(\n \"encoder/{}\".format(name), param.detach().cpu().numpy(), epoch\n )\n if param.grad is not None:\n writer.add_histogram(\n \"encoder/{}/grad\".format(name), param.grad.detach().cpu().numpy(), epoch\n )\n\n for name, param in model.decoder.named_parameters():\n writer.add_histogram(\n \"decoder/{}\".format(name), param.detach().cpu().numpy(), epoch\n )\n if param.grad is not None:\n writer.add_histogram(\n \"decoder/{}/grad\".format(name), param.grad.detach().cpu().numpy(), epoch\n )\n"
] |
[
[
"torch.cuda.is_available",
"torch.load"
]
] |
webclinic017/fastbt
|
[
"715982cc454ee6fabcaa605188fd1aad7a32a376"
] |
[
"tests/test_datasource.py"
] |
[
"import unittest\nimport pandas as pd\nimport numpy as np\nimport context\n\nfrom fastbt.datasource import DataSource\nimport talib\n\nclass TestDataSource(unittest.TestCase):\n\n def setUp(self): \n df = pd.read_csv('tests/data/sample.csv', parse_dates=['timestamp'])\n self.ds = DataSource(data=df)\n\n def test_data(self):\n self.assertEqual(self.ds.data.iloc[20,1], 'five')\n self.assertEqual(self.ds.data.iloc[14,3], 112)\n self.assertEqual(self.ds.data.iloc[24,7], 10.54)\n\n def test_data_without_sort(self):\n df = pd.read_csv('tests/data/sample.csv', parse_dates=['timestamp'])\n self.ds = DataSource(data=df, sort=False)\n self.assertEqual(self.ds.data.iloc[9,4], 999)\n self.assertEqual(self.ds.data.iloc[24,6], 41688)\n self.assertEqual(self.ds.data.at[4, 'close'], 10.6)\n\n def test_initialize_case(self):\n df = pd.read_csv('tests/data/sample.csv', parse_dates=['timestamp'])\n df.columns = [x.upper() for x in df.columns]\n self.assertEqual(df.columns[0], 'TIMESTAMP')\n self.ds = DataSource(data=df)\n self.assertEqual(self.ds.data.columns[0], 'timestamp')\n\n def test_initialize_column_rename(self):\n df = pd.read_csv('tests/data/sample.csv', parse_dates=['timestamp'])\n df.columns = ['TS', 'TRADINGSYMBOL', 'OPEN', 'HIGH', 'LOW',\n 'CLOSE', 'VOLUME', 'PREVCLOSE']\n self.ds = DataSource(data=df, timestamp='TS', symbol='TRADINGSYMBOL')\n self.assertEqual(self.ds.data.columns[0], 'timestamp')\n self.assertEqual(self.ds.data.columns[1], 'symbol')\n\n def test_add_lag(self):\n length = len(self.ds.data)\n idx = pd.IndexSlice\n self.ds.add_lag(on='close')\n self.ds.add_lag(on='volume', period=2)\n d = self.ds.data.set_index(['timestamp', 'symbol'])\n self.assertEqual(d.at[idx['2018-01-04', 'one'], 'lag_close_1'], 11)\n self.assertEqual(d.at[idx['2018-01-06', 'six'], 'lag_volume_2'], 86014)\n self.assertEqual(len(self.ds.data.columns), 10)\n self.assertEqual(len(self.ds.data), length)\n\n def test_add_lag_column_rename(self):\n idx = pd.IndexSlice\n self.ds.add_lag(on='close')\n self.ds.add_lag(on='close', col_name='some_col')\n d = self.ds.data.set_index(['timestamp', 'symbol'])\n self.assertEqual(d.at[idx['2018-01-04', 'one'], 'lag_close_1'], 11)\n self.assertEqual(d.at[idx['2018-01-04', 'one'], 'some_col'], 11)\n self.assertEqual(d.at[idx['2018-01-05', 'three'], 'some_col'], 109)\n\n def test_add_pct_change(self):\n idx = pd.IndexSlice\n self.ds.add_pct_change(on='close')\n self.ds.add_pct_change(on='close', period=2)\n self.ds.add_pct_change(on='close', period=2, col_name='new_col')\n d = self.ds.data.set_index(['timestamp', 'symbol'])\n R = lambda x: round(x,2)\n self.assertEqual(R(d.at[idx['2018-01-05', 'three'], 'chg_close_1']), -0.07)\n self.assertEqual(R(d.at[idx['2018-01-06', 'five'], 'chg_close_1']), 0.17)\n self.assertEqual(R(d.at[idx['2018-01-05', 'four'], 'chg_close_2']), 0.05)\n self.assertEqual(R(d.at[idx['2018-01-05', 'four'], 'new_col']), 0.05)\n self.assertEqual(R(d.at[idx['2018-01-03', 'six'], 'new_col']), -0.1)\n self.assertEqual(pd.isna(d.at[idx['2018-01-02', 'one'], 'new_col']), True)\n self.assertEqual(len(self.ds.data.columns), 11)\n\n def test_add_pct_change_lag(self):\n idx = pd.IndexSlice\n self.ds.add_pct_change(on='close', period=2, lag=1)\n self.ds.add_pct_change(on='close', period=1, lag=2)\n d = self.ds.data.set_index(['timestamp', 'symbol'])\n R = lambda x: round(x,2)\n self.assertEqual(R(d.at[idx['2018-01-04', 'four'], 'chg_close_2']), 0.09)\n self.assertEqual(R(d.at[idx['2018-01-04', 'four'], 'chg_close_1']), 0.01)\n self.assertEqual(R(d.at[idx['2018-01-06', 'three'], 'chg_close_1']), -0.01)\n\n\n def test_add_pct_change_lag_col_name(self):\n idx = pd.IndexSlice\n self.ds.add_pct_change(on='high', period=2, lag=1)\n self.ds.add_pct_change(on='close', period=1, lag=2, col_name='lagged_2')\n d = self.ds.data.set_index(['timestamp', 'symbol'])\n R = lambda x: round(x,2)\n self.assertEqual(R(d.at[idx['2018-01-05', 'six'], 'chg_high_2']), -0.04)\n self.assertEqual(R(d.at[idx['2018-01-04', 'four'], 'lagged_2']), 0.01)\n\n def test_formula_add_col_name(self):\n idx = pd.IndexSlice\n self.ds.add_formula('open+close', 'new_col')\n self.ds.add_formula('volume/close', 'new_col_2')\n d = self.ds.data.set_index(['timestamp', 'symbol'])\n R = lambda x: round(x,2)\n self.assertEqual(R(d.at[idx['2018-01-04', 'four'], 'new_col']), 336)\n self.assertEqual(R(d.at[idx['2018-01-06', 'one'], 'new_col_2']), 77755.77)\n\n def test_formula_case_insensitive(self):\n idx = pd.IndexSlice\n self.ds.add_formula('OPEN+CLOSE', 'new_col')\n self.ds.add_formula('volume/close', 'NEW_COL_2')\n d = self.ds.data.set_index(['timestamp', 'symbol'])\n R = lambda x: round(x,2)\n self.assertEqual(R(d.at[idx['2018-01-04', 'four'], 'new_col']), 336)\n self.assertEqual(R(d.at[idx['2018-01-06', 'one'], 'new_col_2']), 77755.77)\n\n\n def test_formula_calculated_column(self):\n idx = pd.IndexSlice\n self.ds.add_formula('(open+close)*100', 'new_col_1')\n self.ds.add_formula('volume/100', 'new_col_2')\n self.ds.add_formula('new_col_1+new_col_2', 'new_col_3')\n d = self.ds.data.set_index(['timestamp', 'symbol'])\n R = lambda x: round(x,2)\n self.assertEqual(R(d.at[idx['2018-01-06', 'one'], 'new_col_3']), 10190.6)\n self.assertEqual(R(d.at[idx['2018-01-05', 'two'], 'new_col_3']), 200389.97)\n\n def test_rolling_simple(self):\n from pandas import isna\n q = 'symbol == \"one\"'\n df = pd.read_csv('tests/data/sample.csv', parse_dates=['timestamp']).query(q)\n df['r2'] = df['close'].rolling(2).mean()\n self.ds.add_rolling(2, col_name='r2')\n df2 = self.ds.data.query(q)\n print('RESULT' , df['r2'], df2['r2'])\n for a,b in zip(df['r2'], df2['r2']):\n if not(isna(a)):\n assert a==b \n\n def test_rolling_values(self):\n idx = pd.IndexSlice\n self.ds.add_rolling(4, on='volume', function='max')\n d = self.ds.data.set_index(['timestamp', 'symbol'])\n R = lambda x: round(x,2)\n self.assertEqual(d.at[idx['2018-01-05', 'five'], 'rol_max_volume_4'], 971704)\n self.assertEqual(d.at[idx['2018-01-05', 'six'], 'rol_max_volume_4'], 195539)\n self.assertEqual(d.at[idx['2018-01-04', 'three'], 'rol_max_volume_4'], 433733)\n # Adding lag and testing\n self.ds.add_rolling(4, on='volume', function='max', lag=1)\n d = self.ds.data.set_index(['timestamp', 'symbol'])\n self.assertEqual(d.at[idx['2018-01-06', 'five'], 'rol_max_volume_4'], 971704)\n self.assertEqual(d.at[idx['2018-01-06', 'six'], 'rol_max_volume_4'], 195539)\n self.assertEqual(d.at[idx['2018-01-05', 'three'], 'rol_max_volume_4'], 433733)\n # Testing for 2 lags and column name\n self.ds.add_rolling(4, on='volume', function='max', lag=2, col_name='check')\n d = self.ds.data.set_index(['timestamp', 'symbol'])\n self.assertEqual(d.at[idx['2018-01-06', 'three'], 'check'], 433733) \n\n def test_batch(self):\n length = len(self.ds.data)\n batch = [\n {'P': {'on': 'close', 'period': 1, 'lag': 1}},\n {'L': {'on': 'volume', 'period': 1}},\n {'F': {'formula': '(open+close)/2', 'col_name': 'AvgPrice'}},\n {'I': {'indicator': 'SMA', 'period': 3, 'lag': 1, 'col_name': 'SMA3'}},\n {'F': {'formula': 'avgprice + sma3', 'col_name': 'final'}},\n {'R': {'window': 3, 'function': 'mean'}}\n ]\n d = self.ds.batch_process(batch).set_index(['timestamp', 'symbol'])\n self.assertEqual(len(d.columns), 12)\n self.assertEqual(len(self.ds.data.columns), 14)\n self.assertEqual(len(self.ds.data), length)\n\n def test_raise_error_if_not_dataframe(self):\n pass\n\n\ndef test_rolling_zscore():\n np.random.seed(100)\n df = pd.DataFrame(np.random.randn(100,4), \n columns=['open', 'high', 'low', 'close'])\n df['symbol'] = list('ABCD') * 25\n dates = list(pd.date_range(end='2018-04-25', periods=25)) * 4\n df['timestamp'] = dates\n from fastbt.datasource import DataSource\n ds = DataSource(df)\n ds.add_rolling(on='close', window=5, function='zscore')\n assert ds.data.query('symbol==\"A\"').iloc[8]['rol_zscore_close_5'].round(2) == 0.12\n assert ds.data.query('symbol==\"B\"').iloc[-7]['rol_zscore_close_5'].round(2) == 0.17\n assert ds.data.query('symbol==\"C\"').iloc[-6]['rol_zscore_close_5'].round(2) == -0.48\n\nclass TestDataSourceReindex(unittest.TestCase):\n\n def setUp(self): \n df = pd.DataFrame(np.arange(24).reshape(6, 4),\n columns=['open', 'high', 'low', 'close'])\n df['symbol'] = list('ABCABA')\n df['timestamp'] = [1, 1, 1, 2, 3, 3]\n self.df = df\n\n def test_reindex(self):\n ds = DataSource(self.df)\n ds.reindex([1,2,3])\n assert len(ds.data) == 9\n # Check values\n assert ds.data.set_index(['symbol', 'timestamp']).at[('A', 1), 'open'] == 0\n assert ds.data.set_index(['symbol', 'timestamp']).at[('B', 2), 'close'] == 7\n assert ds.data.set_index(['symbol', 'timestamp']).at[('C', 3), 'high'] == 9\n ds.reindex([1,2,3,4])\n assert len(ds.data) == 12 \n\n def test_reindex_different_fills(self):\n ds = DataSource(self.df)\n ds.reindex([1,2,3], method=None)\n print(ds.data)\n assert pd.isnull(ds.data.set_index(['symbol', 'timestamp']).at[('C', 3), 'high'])\n ds = DataSource(self.df)\n ds.reindex([1,2,3,4], method='bfill') \n assert ds.data.set_index(['symbol', 'timestamp']).at[('B', 2), 'close'] == 19\n\nclass TestDataSourceTALIB(unittest.TestCase):\n\n \"\"\"\n Test TALIB indicators\n \"\"\"\n\n def setUp(self): \n self.df = pd.read_csv('tests/data/sample.csv', parse_dates=['timestamp'])\n\n def test_single_symbol(self):\n df = self.df.query('symbol==\"one\"')\n ds = DataSource(df)\n ds.add_indicator('SMA', period=3, col_name='sma')\n assert len(ds.data) == 6\n\n sma = talib.SMA(df.close.values, timeperiod=3)\n # If both are equal, there should be no differences\n assert (ds.data.sma - sma).sum() == 0\n\n\n\n"
] |
[
[
"pandas.isna",
"numpy.random.seed",
"pandas.date_range",
"numpy.random.randn",
"numpy.arange",
"pandas.read_csv"
]
] |
OspreyData/lime
|
[
"ceec55cf074b6242ffdde3487afb08ab3250cd63"
] |
[
"lime/lime_tabular.py"
] |
[
"\"\"\"\nFunctions for explaining classifiers that use tabular data (matrices).\n\"\"\"\nimport collections\nimport copy\nfrom functools import partial\nimport json\nimport warnings\n\nimport numpy as np\nimport scipy as sp\nimport sklearn\nimport sklearn.preprocessing\nfrom sklearn.utils import check_random_state\n\nfrom lime.discretize import QuartileDiscretizer\nfrom lime.discretize import DecileDiscretizer\nfrom lime.discretize import EntropyDiscretizer\nfrom lime.discretize import BaseDiscretizer\nfrom lime.discretize import StatsDiscretizer\nfrom . import explanation\nfrom . import lime_base\n\n\nclass TableDomainMapper(explanation.DomainMapper):\n \"\"\"Maps feature ids to names, generates table views, etc\"\"\"\n\n def __init__(self, feature_names, feature_values, scaled_row,\n categorical_features, discretized_feature_names=None,\n feature_indexes=None):\n \"\"\"Init.\n\n Args:\n feature_names: list of feature names, in order\n feature_values: list of strings with the values of the original row\n scaled_row: scaled row\n categorical_features: list of categorical features ids (ints)\n feature_indexes: optional feature indexes used in the sparse case\n \"\"\"\n self.exp_feature_names = feature_names\n self.discretized_feature_names = discretized_feature_names\n self.feature_names = feature_names\n self.feature_values = feature_values\n self.feature_indexes = feature_indexes\n self.scaled_row = scaled_row\n if sp.sparse.issparse(scaled_row):\n self.all_categorical = False\n else:\n self.all_categorical = len(categorical_features) == len(scaled_row)\n self.categorical_features = categorical_features\n\n def map_exp_ids(self, exp):\n \"\"\"Maps ids to feature names.\n\n Args:\n exp: list of tuples [(id, weight), (id,weight)]\n\n Returns:\n list of tuples (feature_name, weight)\n \"\"\"\n names = self.exp_feature_names\n if self.discretized_feature_names is not None:\n names = self.discretized_feature_names\n return [(names[x[0]], x[1]) for x in exp]\n\n def visualize_instance_html(self,\n exp,\n label,\n div_name,\n exp_object_name,\n show_table=True,\n show_all=False):\n \"\"\"Shows the current example in a table format.\n\n Args:\n exp: list of tuples [(id, weight), (id,weight)]\n label: label id (integer)\n div_name: name of div object to be used for rendering(in js)\n exp_object_name: name of js explanation object\n show_table: if False, don't show table visualization.\n show_all: if True, show zero-weighted features in the table.\n \"\"\"\n if not show_table:\n return ''\n weights = [0] * len(self.feature_names)\n for x in exp:\n weights[x[0]] = x[1]\n if self.feature_indexes is not None:\n # Sparse case: only display the non-zero values and importances\n fnames = [self.exp_feature_names[i] for i in self.feature_indexes]\n fweights = [weights[i] for i in self.feature_indexes]\n if show_all:\n out_list = list(zip(fnames,\n self.feature_values,\n fweights))\n else:\n out_dict = dict(map(lambda x: (x[0], (x[1], x[2], x[3])),\n zip(self.feature_indexes,\n fnames,\n self.feature_values,\n fweights)))\n out_list = [out_dict.get(x[0], (str(x[0]), 0.0, 0.0)) for x in exp]\n else:\n out_list = list(zip(self.exp_feature_names,\n self.feature_values,\n weights))\n if not show_all:\n out_list = [out_list[x[0]] for x in exp]\n ret = u'''\n %s.show_raw_tabular(%s, %d, %s);\n ''' % (exp_object_name, json.dumps(out_list, ensure_ascii=False), label, div_name)\n return ret\n\n\nclass LimeTabularExplainer(object):\n \"\"\"Explains predictions on tabular (i.e. matrix) data.\n For numerical features, perturb them by sampling from a Normal(0,1) and\n doing the inverse operation of mean-centering and scaling, according to the\n means and stds in the training data. For categorical features, perturb by\n sampling according to the training distribution, and making a binary\n feature that is 1 when the value is the same as the instance being\n explained.\"\"\"\n\n def __init__(self,\n training_data,\n mode=\"classification\",\n training_labels=None,\n feature_names=None,\n categorical_features=None,\n categorical_names=None,\n kernel_width=None,\n kernel=None,\n verbose=False,\n class_names=None,\n feature_selection='auto',\n discretize_continuous=True,\n discretizer='quartile',\n sample_around_instance=False,\n random_state=None,\n training_data_stats=None):\n \"\"\"Init function.\n\n Args:\n training_data: numpy 2d array\n mode: \"classification\" or \"regression\"\n training_labels: labels for training data. Not required, but may be\n used by discretizer.\n feature_names: list of names (strings) corresponding to the columns\n in the training data.\n categorical_features: list of indices (ints) corresponding to the\n categorical columns. Everything else will be considered\n continuous. Values in these columns MUST be integers.\n categorical_names: map from int to list of names, where\n categorical_names[x][y] represents the name of the yth value of\n column x.\n kernel_width: kernel width for the exponential kernel.\n If None, defaults to sqrt (number of columns) * 0.75\n kernel: similarity kernel that takes euclidean distances and kernel\n width as input and outputs weights in (0,1). If None, defaults to\n an exponential kernel.\n verbose: if true, print local prediction values from linear model\n class_names: list of class names, ordered according to whatever the\n classifier is using. If not present, class names will be '0',\n '1', ...\n feature_selection: feature selection method. can be\n 'forward_selection', 'lasso_path', 'none' or 'auto'.\n See function 'explain_instance_with_data' in lime_base.py for\n details on what each of the options does.\n discretize_continuous: if True, all non-categorical features will\n be discretized into quartiles.\n discretizer: only matters if discretize_continuous is True\n and data is not sparse. Options are 'quartile', 'decile',\n 'entropy' or a BaseDiscretizer instance.\n sample_around_instance: if True, will sample continuous features\n in perturbed samples from a normal centered at the instance\n being explained. Otherwise, the normal is centered on the mean\n of the feature data.\n random_state: an integer or numpy.RandomState that will be used to\n generate random numbers. If None, the random state will be\n initialized using the internal numpy seed.\n training_data_stats: a dict object having the details of training data\n statistics. If None, training data information will be used, only matters\n if discretize_continuous is True. Must have the following keys:\n means\", \"mins\", \"maxs\", \"stds\", \"feature_values\",\n \"feature_frequencies\"\n \"\"\"\n self.random_state = check_random_state(random_state)\n self.mode = mode\n self.categorical_names = categorical_names or {}\n self.sample_around_instance = sample_around_instance\n self.training_data_stats = training_data_stats\n\n # Check and raise proper error in stats are supplied in non-descritized path\n if self.training_data_stats:\n self.validate_training_data_stats(self.training_data_stats)\n\n if categorical_features is None:\n categorical_features = []\n if feature_names is None:\n feature_names = [str(i) for i in range(training_data.shape[1])]\n\n self.categorical_features = list(categorical_features)\n self.feature_names = list(feature_names)\n\n self.discretizer = None\n if discretize_continuous and not sp.sparse.issparse(training_data):\n # Set the discretizer if training data stats are provided\n if self.training_data_stats:\n discretizer = StatsDiscretizer(training_data, self.categorical_features,\n self.feature_names, labels=training_labels,\n data_stats=self.training_data_stats,\n random_state=self.random_state)\n\n if discretizer == 'quartile':\n self.discretizer = QuartileDiscretizer(\n training_data, self.categorical_features,\n self.feature_names, labels=training_labels,\n random_state=self.random_state)\n elif discretizer == 'decile':\n self.discretizer = DecileDiscretizer(\n training_data, self.categorical_features,\n self.feature_names, labels=training_labels,\n random_state=self.random_state)\n elif discretizer == 'entropy':\n self.discretizer = EntropyDiscretizer(\n training_data, self.categorical_features,\n self.feature_names, labels=training_labels,\n random_state=self.random_state)\n elif isinstance(discretizer, BaseDiscretizer):\n self.discretizer = discretizer\n else:\n raise ValueError('''Discretizer must be 'quartile',''' +\n ''' 'decile', 'entropy' or a''' +\n ''' BaseDiscretizer instance''')\n self.categorical_features = list(range(training_data.shape[1]))\n\n # Get the discretized_training_data when the stats are not provided\n if(self.training_data_stats is None):\n discretized_training_data = self.discretizer.discretize(\n training_data)\n\n if kernel_width is None:\n kernel_width = np.sqrt(training_data.shape[1]) * .75\n kernel_width = float(kernel_width)\n\n if kernel is None:\n def kernel(d, kernel_width):\n return np.sqrt(np.exp(-(d ** 2) / kernel_width ** 2))\n\n kernel_fn = partial(kernel, kernel_width=kernel_width)\n\n self.feature_selection = feature_selection\n self.base = lime_base.LimeBase(kernel_fn, verbose, random_state=self.random_state)\n self.class_names = class_names\n\n # Though set has no role to play if training data stats are provided\n self.scaler = sklearn.preprocessing.StandardScaler(with_mean=False)\n self.scaler.fit(training_data)\n self.feature_values = {}\n self.feature_frequencies = {}\n\n for feature in self.categorical_features:\n if training_data_stats is None:\n if self.discretizer is not None:\n column = discretized_training_data[:, feature]\n else:\n column = training_data[:, feature]\n\n feature_count = collections.Counter(column)\n values, frequencies = map(list, zip(*(sorted(feature_count.items()))))\n else:\n values = training_data_stats[\"feature_values\"][feature]\n frequencies = training_data_stats[\"feature_frequencies\"][feature]\n\n self.feature_values[feature] = values\n self.feature_frequencies[feature] = (np.array(frequencies) /\n float(sum(frequencies)))\n self.scaler.mean_[feature] = 0\n self.scaler.scale_[feature] = 1\n\n @staticmethod\n def convert_and_round(values):\n return ['%.2f' % v for v in values]\n\n @staticmethod\n def validate_training_data_stats(training_data_stats):\n \"\"\"\n Method to validate the structure of training data stats\n \"\"\"\n stat_keys = list(training_data_stats.keys())\n valid_stat_keys = [\"means\", \"mins\", \"maxs\", \"stds\", \"feature_values\", \"feature_frequencies\"]\n missing_keys = list(set(valid_stat_keys) - set(stat_keys))\n if len(missing_keys) > 0:\n raise Exception(\"Missing keys in training_data_stats. Details: %s\" % (missing_keys))\n\n def explain_instance(self,\n data_row,\n predict_fn,\n labels=(1,),\n top_labels=None,\n num_features=10,\n num_samples=5000,\n distance_metric='euclidean',\n model_regressor=None):\n \"\"\"Generates explanations for a prediction.\n\n First, we generate neighborhood data by randomly perturbing features\n from the instance (see __data_inverse). We then learn locally weighted\n linear models on this neighborhood data to explain each of the classes\n in an interpretable way (see lime_base.py).\n\n Args:\n data_row: 1d numpy array or scipy.sparse matrix, corresponding to a row\n predict_fn: prediction function. For classifiers, this should be a\n function that takes a numpy array and outputs prediction\n probabilities. For regressors, this takes a numpy array and\n returns the predictions. For ScikitClassifiers, this is\n `classifier.predict_proba()`. For ScikitRegressors, this\n is `regressor.predict()`. The prediction function needs to work\n on multiple feature vectors (the vectors randomly perturbed\n from the data_row).\n labels: iterable with labels to be explained.\n top_labels: if not None, ignore labels and produce explanations for\n the K labels with highest prediction probabilities, where K is\n this parameter.\n num_features: maximum number of features present in explanation\n num_samples: size of the neighborhood to learn the linear model\n distance_metric: the distance metric to use for weights.\n model_regressor: sklearn regressor to use in explanation. Defaults\n to Ridge regression in LimeBase. Must have model_regressor.coef_\n and 'sample_weight' as a parameter to model_regressor.fit()\n\n Returns:\n An Explanation object (see explanation.py) with the corresponding\n explanations.\n \"\"\"\n if sp.sparse.issparse(data_row) and not sp.sparse.isspmatrix_csr(data_row):\n # Preventative code: if sparse, convert to csr format if not in csr format already\n data_row = data_row.tocsr()\n data, inverse = self.__data_inverse(data_row, num_samples)\n if sp.sparse.issparse(data):\n # Note in sparse case we don't subtract mean since data would become dense\n scaled_data = data.multiply(self.scaler.scale_)\n # Multiplying with csr matrix can return a coo sparse matrix\n if not sp.sparse.isspmatrix_csr(scaled_data):\n scaled_data = scaled_data.tocsr()\n else:\n scaled_data = (data - self.scaler.mean_) / self.scaler.scale_\n distances = sklearn.metrics.pairwise_distances(\n scaled_data,\n scaled_data[0].reshape(1, -1),\n metric=distance_metric\n ).ravel()\n\n yss = predict_fn(inverse)\n\n # for classification, the model needs to provide a list of tuples - classes\n # along with prediction probabilities\n if self.mode == \"classification\":\n if len(yss.shape) == 1:\n raise NotImplementedError(\"LIME does not currently support \"\n \"classifier models without probability \"\n \"scores. If this conflicts with your \"\n \"use case, please let us know: \"\n \"https://github.com/datascienceinc/lime/issues/16\")\n elif len(yss.shape) == 2:\n if self.class_names is None:\n self.class_names = [str(x) for x in range(yss[0].shape[0])]\n else:\n self.class_names = list(self.class_names)\n if not np.allclose(yss.sum(axis=1), 1.0):\n warnings.warn(\"\"\"\n Prediction probabilties do not sum to 1, and\n thus does not constitute a probability space.\n Check that you classifier outputs probabilities\n (Not log probabilities, or actual class predictions).\n \"\"\")\n else:\n raise ValueError(\"Your model outputs \"\n \"arrays with {} dimensions\".format(len(yss.shape)))\n\n # for regression, the output should be a one-dimensional array of predictions\n else:\n try:\n if len(yss.shape) != 1 and len(yss[0].shape) == 1:\n yss = np.array([v[0] for v in yss])\n assert isinstance(yss, np.ndarray) and len(yss.shape) == 1\n except AssertionError:\n raise ValueError(\"Your model needs to output single-dimensional \\\n numpyarrays, not arrays of {} dimensions\".format(yss.shape))\n\n predicted_value = yss[0]\n min_y = min(yss)\n max_y = max(yss)\n\n # add a dimension to be compatible with downstream machinery\n yss = yss[:, np.newaxis]\n\n feature_names = copy.deepcopy(self.feature_names)\n if feature_names is None:\n feature_names = [str(x) for x in range(data_row.shape[0])]\n\n if sp.sparse.issparse(data_row):\n values = self.convert_and_round(data_row.data)\n feature_indexes = data_row.indices\n else:\n values = self.convert_and_round(data_row)\n feature_indexes = None\n\n for i in self.categorical_features:\n if self.discretizer is not None and i in self.discretizer.lambdas:\n continue\n name = int(data_row[i])\n if i in self.categorical_names:\n name = self.categorical_names[i][name]\n feature_names[i] = '%s=%s' % (feature_names[i], name)\n values[i] = 'True'\n categorical_features = self.categorical_features\n\n discretized_feature_names = None\n if self.discretizer is not None:\n categorical_features = range(data.shape[1])\n discretized_instance = self.discretizer.discretize(data_row)\n discretized_feature_names = copy.deepcopy(feature_names)\n for f in self.discretizer.names:\n discretized_feature_names[f] = self.discretizer.names[f][int(\n discretized_instance[f])]\n\n domain_mapper = TableDomainMapper(feature_names,\n values,\n scaled_data[0],\n categorical_features=categorical_features,\n discretized_feature_names=discretized_feature_names,\n feature_indexes=feature_indexes)\n ret_exp = explanation.Explanation(domain_mapper,\n mode=self.mode,\n class_names=self.class_names)\n if self.mode == \"classification\":\n ret_exp.predict_proba = yss[0]\n if top_labels:\n labels = np.argsort(yss[0])[-top_labels:]\n ret_exp.top_labels = list(labels)\n ret_exp.top_labels.reverse()\n else:\n ret_exp.predicted_value = predicted_value\n ret_exp.min_value = min_y\n ret_exp.max_value = max_y\n labels = [0]\n for label in labels:\n (ret_exp.intercept[label],\n ret_exp.local_exp[label],\n ret_exp.score, ret_exp.local_pred) = self.base.explain_instance_with_data(\n scaled_data,\n yss,\n distances,\n label,\n num_features,\n model_regressor=model_regressor,\n feature_selection=self.feature_selection)\n\n if self.mode == \"regression\":\n ret_exp.intercept[1] = ret_exp.intercept[0]\n ret_exp.local_exp[1] = [x for x in ret_exp.local_exp[0]]\n ret_exp.local_exp[0] = [(i, -1 * j) for i, j in ret_exp.local_exp[1]]\n\n return ret_exp\n\n def __data_inverse(self,\n data_row,\n num_samples):\n \"\"\"Generates a neighborhood around a prediction.\n\n For numerical features, perturb them by sampling from a Normal(0,1) and\n doing the inverse operation of mean-centering and scaling, according to\n the means and stds in the training data. For categorical features,\n perturb by sampling according to the training distribution, and making\n a binary feature that is 1 when the value is the same as the instance\n being explained.\n\n Args:\n data_row: 1d numpy array, corresponding to a row\n num_samples: size of the neighborhood to learn the linear model\n\n Returns:\n A tuple (data, inverse), where:\n data: dense num_samples * K matrix, where categorical features\n are encoded with either 0 (not equal to the corresponding value\n in data_row) or 1. The first row is the original instance.\n inverse: same as data, except the categorical features are not\n binary, but categorical (as the original data)\n \"\"\"\n is_sparse = sp.sparse.issparse(data_row)\n if is_sparse:\n num_cols = data_row.shape[1]\n data = sp.sparse.csr_matrix((num_samples, num_cols), dtype=data_row.dtype)\n else:\n num_cols = data_row.shape[0]\n data = np.zeros((num_samples, num_cols))\n categorical_features = range(num_cols)\n if self.discretizer is None:\n instance_sample = data_row\n scale = self.scaler.scale_\n mean = self.scaler.mean_\n if is_sparse:\n # Perturb only the non-zero values\n non_zero_indexes = data_row.nonzero()[1]\n num_cols = len(non_zero_indexes)\n instance_sample = data_row[:, non_zero_indexes]\n scale = scale[non_zero_indexes]\n mean = mean[non_zero_indexes]\n data = self.random_state.normal(\n 0, 1, num_samples * num_cols).reshape(\n num_samples, num_cols)\n if self.sample_around_instance:\n data = data * scale + instance_sample\n else:\n data = data * scale + mean\n if is_sparse:\n if num_cols == 0:\n data = sp.sparse.csr_matrix((num_samples,\n data_row.shape[1]),\n dtype=data_row.dtype)\n else:\n indexes = np.tile(non_zero_indexes, num_samples)\n indptr = np.array(\n range(0, len(non_zero_indexes) * (num_samples + 1),\n len(non_zero_indexes)))\n data_1d_shape = data.shape[0] * data.shape[1]\n data_1d = data.reshape(data_1d_shape)\n data = sp.sparse.csr_matrix(\n (data_1d, indexes, indptr),\n shape=(num_samples, data_row.shape[1]))\n categorical_features = self.categorical_features\n first_row = data_row\n else:\n first_row = self.discretizer.discretize(data_row)\n data[0] = data_row.copy()\n inverse = data.copy()\n for column in categorical_features:\n values = self.feature_values[column]\n freqs = self.feature_frequencies[column]\n inverse_column = self.random_state.choice(values, size=num_samples,\n replace=True, p=freqs)\n binary_column = (inverse_column == first_row[column]).astype(int)\n binary_column[0] = 1\n inverse_column[0] = data[0, column]\n data[:, column] = binary_column\n inverse[:, column] = inverse_column\n if self.discretizer is not None:\n inverse[1:] = self.discretizer.undiscretize(inverse[1:])\n inverse[0] = data_row\n return data, inverse\n\n\nclass RecurrentTabularExplainer(LimeTabularExplainer):\n \"\"\"\n An explainer for keras-style recurrent neural networks, where the\n input shape is (n_samples, n_timesteps, n_features). This class\n just extends the LimeTabularExplainer class and reshapes the training\n data and feature names such that they become something like\n\n (val1_t1, val1_t2, val1_t3, ..., val2_t1, ..., valn_tn)\n\n Each of the methods that take data reshape it appropriately,\n so you can pass in the training/testing data exactly as you\n would to the recurrent neural network.\n\n \"\"\"\n\n def __init__(self, training_data, mode=\"classification\",\n training_labels=None, feature_names=None,\n categorical_features=None, categorical_names=None,\n kernel_width=None, kernel=None, verbose=False, class_names=None,\n feature_selection='auto', discretize_continuous=True,\n discretizer='quartile', random_state=None):\n \"\"\"\n Args:\n training_data: numpy 3d array with shape\n (n_samples, n_timesteps, n_features)\n mode: \"classification\" or \"regression\"\n training_labels: labels for training data. Not required, but may be\n used by discretizer.\n feature_names: list of names (strings) corresponding to the columns\n in the training data.\n categorical_features: list of indices (ints) corresponding to the\n categorical columns. Everything else will be considered\n continuous. Values in these columns MUST be integers.\n categorical_names: map from int to list of names, where\n categorical_names[x][y] represents the name of the yth value of\n column x.\n kernel_width: kernel width for the exponential kernel.\n If None, defaults to sqrt(number of columns) * 0.75\n kernel: similarity kernel that takes euclidean distances and kernel\n width as input and outputs weights in (0,1). If None, defaults to\n an exponential kernel.\n verbose: if true, print local prediction values from linear model\n class_names: list of class names, ordered according to whatever the\n classifier is using. If not present, class names will be '0',\n '1', ...\n feature_selection: feature selection method. can be\n 'forward_selection', 'lasso_path', 'none' or 'auto'.\n See function 'explain_instance_with_data' in lime_base.py for\n details on what each of the options does.\n discretize_continuous: if True, all non-categorical features will\n be discretized into quartiles.\n discretizer: only matters if discretize_continuous is True. Options\n are 'quartile', 'decile', 'entropy' or a BaseDiscretizer\n instance.\n random_state: an integer or numpy.RandomState that will be used to\n generate random numbers. If None, the random state will be\n initialized using the internal numpy seed.\n \"\"\"\n\n # Reshape X\n n_samples, n_timesteps, n_features = training_data.shape\n training_data = np.transpose(training_data, axes=(0, 2, 1)).reshape(\n n_samples, n_timesteps * n_features)\n self.n_timesteps = n_timesteps\n self.n_features = n_features\n\n # Update the feature names\n feature_names = ['{}_t-{}'.format(n, n_timesteps - (i + 1))\n for n in feature_names for i in range(n_timesteps)]\n\n # Send off the the super class to do its magic.\n super(RecurrentTabularExplainer, self).__init__(\n training_data,\n mode=mode,\n training_labels=training_labels,\n feature_names=feature_names,\n categorical_features=categorical_features,\n categorical_names=categorical_names,\n kernel_width=kernel_width,\n kernel=kernel,\n verbose=verbose,\n class_names=class_names,\n feature_selection=feature_selection,\n discretize_continuous=discretize_continuous,\n discretizer=discretizer,\n random_state=random_state)\n\n def _make_predict_proba(self, func):\n \"\"\"\n The predict_proba method will expect 3d arrays, but we are reshaping\n them to 2D so that LIME works correctly. This wraps the function\n you give in explain_instance to first reshape the data to have\n the shape the the keras-style network expects.\n \"\"\"\n\n def predict_proba(X):\n n_samples = X.shape[0]\n new_shape = (n_samples, self.n_features, self.n_timesteps)\n X = np.transpose(X.reshape(new_shape), axes=(0, 2, 1))\n return func(X)\n\n return predict_proba\n\n def explain_instance(self, data_row, classifier_fn, labels=(1,),\n top_labels=None, num_features=10, num_samples=5000,\n distance_metric='euclidean', model_regressor=None):\n \"\"\"Generates explanations for a prediction.\n\n First, we generate neighborhood data by randomly perturbing features\n from the instance (see __data_inverse). We then learn locally weighted\n linear models on this neighborhood data to explain each of the classes\n in an interpretable way (see lime_base.py).\n\n Args:\n data_row: 2d numpy array, corresponding to a row\n classifier_fn: classifier prediction probability function, which\n takes a numpy array and outputs prediction probabilities. For\n ScikitClassifiers , this is classifier.predict_proba.\n labels: iterable with labels to be explained.\n top_labels: if not None, ignore labels and produce explanations for\n the K labels with highest prediction probabilities, where K is\n this parameter.\n num_features: maximum number of features present in explanation\n num_samples: size of the neighborhood to learn the linear model\n distance_metric: the distance metric to use for weights.\n model_regressor: sklearn regressor to use in explanation. Defaults\n to Ridge regression in LimeBase. Must have\n model_regressor.coef_ and 'sample_weight' as a parameter\n to model_regressor.fit()\n\n Returns:\n An Explanation object (see explanation.py) with the corresponding\n explanations.\n \"\"\"\n\n # Flatten input so that the normal explainer can handle it\n data_row = data_row.T.reshape(self.n_timesteps * self.n_features)\n\n # Wrap the classifier to reshape input\n classifier_fn = self._make_predict_proba(classifier_fn)\n return super(RecurrentTabularExplainer, self).explain_instance(\n data_row, classifier_fn,\n labels=labels,\n top_labels=top_labels,\n num_features=num_features,\n num_samples=num_samples,\n distance_metric=distance_metric,\n model_regressor=model_regressor)\n"
] |
[
[
"scipy.sparse.issparse",
"numpy.array",
"numpy.zeros",
"sklearn.preprocessing.StandardScaler",
"scipy.sparse.isspmatrix_csr",
"numpy.tile",
"numpy.exp",
"sklearn.utils.check_random_state",
"numpy.transpose",
"numpy.sqrt",
"numpy.argsort",
"scipy.sparse.csr_matrix"
]
] |
benwmcdowell/charge_density_methods_VASP
|
[
"c1d965b62e638e4509c8b2b94fc797568aa46919"
] |
[
"charge_density_methods_VASP/2d_slice.py"
] |
[
"from numpy import zeros, shape, dot\nfrom numpy.linalg import norm\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Patch\n\nfrom lib import parse_CHGCAR, parse_LOCPOT\n\ndef plot_2d_slice(ifile,pos,**args):\n if 'dim' in args:\n dim=args['dim']\n else:\n dim=2\n \n if 'filetype' in args:\n filetype=args['filetype']\n else:\n filetype='LOCPOT'\n \n if filetype=='LOCPOT':\n e,lv,coord,atomtypes,atomnums=parse_LOCPOT(ifile)\n else:\n e,lv,coord,atomtypes,atomnums=parse_CHGCAR(ifile)\n \n if 'ref' in args:\n for i in args['ref']:\n if filetype=='LOCPOT':\n tempvar=parse_LOCPOT(i)[0]\n else:\n tempvar=parse_CHGCAR(i)[0]\n e-=tempvar\n \n if 'direct' in args:\n pos=norm(dot(pos,lv[dim]))\n \n if 'tol' in args:\n tol=round(args['tol']/norm(lv[dim])*shape(e)[dim])\n else:\n tol=0\n \n plot_atoms=[]\n if 'overlay_atoms' in args:\n ranges=args['overlay_atoms']\n for i in range(sum(atomnums)):\n for j in range(3):\n if coord[i][j] > max(ranges[j]) or coord[i][j] < min(ranges[j]):\n break\n else:\n plot_atoms.append(i)\n if 'atom_sizes' in args:\n sizes=args['atom_sizes']\n else:\n sizes=[800 for i in range(len(atomnums))]\n \n if 'atom_colors' in args:\n colors=args['atom_colors']\n else:\n colors=['black' for i in range(len(atomnums))]\n \n pos_dim=[]\n for i in range(3):\n if i!=dim:\n pos_dim.append(i)\n \n xy=zeros((shape(e)[pos_dim[0]],shape(e)[pos_dim[1]],2))\n for i in range(len(xy)):\n for j in range(len(xy[i])):\n xy[i][j]+=lv[pos_dim[0]][:2]*i/(len(xy)+1)+lv[pos_dim[1]][:2]*j/(len(xy[i])+1)\n \n pos=round(pos*shape(e)[dim]/norm(lv[dim]))\n z=zeros((shape(e)[pos_dim[0]],shape(e)[pos_dim[1]]))\n for i in range(-tol,tol+1):\n if dim==0:\n z+=e[pos,:,:]/(2*tol+1)\n if dim==1:\n z+=e[:,pos,:]/(2*tol+1)\n if dim==2:\n z+=e[:,:,pos]/(2*tol+1)\n \n plt.figure()\n plt.pcolormesh(xy[:,:,0],xy[:,:,1],z,shading='nearest',cmap='jet')\n plt.colorbar()\n for i in plot_atoms:\n for j in range(len(atomtypes)):\n if i < sum(atomnums[:j+1]):\n break\n plt.scatter(coord[i][pos_dim[0]],coord[i][pos_dim[1]],color=colors[j],s=sizes[j])\n patches=[]\n if len(plot_atoms)>0:\n for i in range(len(atomtypes)):\n patches.append(Patch(color=colors[i],label=atomtypes[i]))\n \n plt.xlabel('position / $\\AA$')\n plt.ylabel('position / $\\AA$')\n plt.legend(handles=patches)\n plt.show()\n"
] |
[
[
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.pcolormesh",
"numpy.dot",
"numpy.linalg.norm",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.legend",
"numpy.shape",
"matplotlib.pyplot.figure",
"matplotlib.patches.Patch",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.show"
]
] |
rickyHong/TensorRT-inference-server-repl
|
[
"024e6760d4efd2f1bbeb242d7a306851ccb5ea62"
] |
[
"qa/common/gen_qa_sequence_models.py"
] |
[
"# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of NVIDIA CORPORATION nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY\n# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY\n# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport argparse\nfrom builtins import range\nimport os\nimport sys\nimport numpy as np\nimport gen_ensemble_model_utils as emu\n\nFLAGS = None\nnp_dtype_string = np.dtype(object)\n\ndef np_to_model_dtype(np_dtype):\n if np_dtype == np.bool:\n return \"TYPE_BOOL\"\n elif np_dtype == np.int8:\n return \"TYPE_INT8\"\n elif np_dtype == np.int16:\n return \"TYPE_INT16\"\n elif np_dtype == np.int32:\n return \"TYPE_INT32\"\n elif np_dtype == np.int64:\n return \"TYPE_INT64\"\n elif np_dtype == np.uint8:\n return \"TYPE_UINT8\"\n elif np_dtype == np.uint16:\n return \"TYPE_UINT16\"\n elif np_dtype == np.float16:\n return \"TYPE_FP16\"\n elif np_dtype == np.float32:\n return \"TYPE_FP32\"\n elif np_dtype == np.float64:\n return \"TYPE_FP64\"\n elif np_dtype == np_dtype_string:\n return \"TYPE_STRING\"\n return None\n\ndef np_to_tf_dtype(np_dtype):\n if np_dtype == np.bool:\n return tf.bool\n elif np_dtype == np.int8:\n return tf.int8\n elif np_dtype == np.int16:\n return tf.int16\n elif np_dtype == np.int32:\n return tf.int32\n elif np_dtype == np.int64:\n return tf.int64\n elif np_dtype == np.uint8:\n return tf.uint8\n elif np_dtype == np.uint16:\n return tf.uint16\n elif np_dtype == np.float16:\n return tf.float16\n elif np_dtype == np.float32:\n return tf.float32\n elif np_dtype == np.float64:\n return tf.float64\n elif np_dtype == np_dtype_string:\n return tf.string\n return None\n\ndef np_to_c2_dtype(np_dtype):\n if np_dtype == np.bool:\n return c2core.DataType.BOOL\n elif np_dtype == np.int8:\n return c2core.DataType.INT8\n elif np_dtype == np.int16:\n return c2core.DataType.INT16\n elif np_dtype == np.int32:\n return c2core.DataType.INT32\n elif np_dtype == np.int64:\n return c2core.DataType.INT64\n elif np_dtype == np.uint8:\n return c2core.DataType.UINT8\n elif np_dtype == np.uint16:\n return c2core.DataType.UINT16\n elif np_dtype == np.float16:\n return c2core.DataType.FLOAT16\n elif np_dtype == np.float32:\n return c2core.DataType.FLOAT\n elif np_dtype == np.float64:\n return c2core.DataType.DOUBLE\n elif np_dtype == np_dtype_string:\n return c2core.DataType.STRING\n return None\n\ndef np_to_trt_dtype(np_dtype):\n if np_dtype == np.int8:\n return trt.infer.DataType.INT8\n elif np_dtype == np.int32:\n return trt.infer.DataType.INT32\n elif np_dtype == np.float16:\n return trt.infer.DataType.HALF\n elif np_dtype == np.float32:\n return trt.infer.DataType.FLOAT\n return None\n\ndef np_to_onnx_dtype(np_dtype):\n if np_dtype == np.bool:\n return onnx.TensorProto.BOOL\n elif np_dtype == np.int8:\n return onnx.TensorProto.INT8\n elif np_dtype == np.int16:\n return onnx.TensorProto.INT16\n elif np_dtype == np.int32:\n return onnx.TensorProto.INT32\n elif np_dtype == np.int64:\n return onnx.TensorProto.INT64\n elif np_dtype == np.uint8:\n return onnx.TensorProto.UINT8\n elif np_dtype == np.uint16:\n return onnx.TensorProto.UINT16\n elif np_dtype == np.float16:\n return onnx.TensorProto.FLOAT16\n elif np_dtype == np.float32:\n return onnx.TensorProto.FLOAT\n elif np_dtype == np.float64:\n return onnx.TensorProto.DOUBLE\n elif np_dtype == np_dtype_string:\n return onnx.TensorProto.STRING\n return None\n\ndef create_tf_modelfile(\n create_savedmodel, models_dir, model_version, max_batch, dtype, shape):\n\n if not tu.validate_for_tf_model(dtype, dtype, dtype, shape, shape, shape):\n return\n\n tf_input_dtype = np_to_tf_dtype(dtype)\n tf_dtype = tf_input_dtype\n\n # If the input is a string then use int32 for operation and just\n # cast to/from string for input and output.\n if tf_input_dtype == tf.string:\n tf_dtype = tf.int32\n\n # Create the model. If non-batching then don't include the batch\n # dimension.\n tf.reset_default_graph()\n if create_savedmodel and (max_batch == 0):\n input0 = tf.placeholder(tf_input_dtype, [1,], \"INPUT\")\n if tf_input_dtype == tf.string:\n input0 = tf.strings.to_number(tf.strings.join([\"0\", input0]), tf_dtype)\n start0 = tf.placeholder(tf_dtype, [1,], \"START\")\n ready0 = tf.placeholder(tf_dtype, [1,], \"READY\")\n acc = tf.get_variable(\"ACC\", [1,], dtype=tf_dtype)\n tmp = tf.where(tf.equal(start0, 1), input0, tf.add(acc, input0))\n newacc = tf.where(tf.equal(ready0, 1), tmp, acc)\n assign = tf.assign(acc, newacc)\n if tf_input_dtype == tf.string:\n output0 = tf.dtypes.as_string(assign, name=\"OUTPUT\")\n else:\n output0 = tf.identity(assign, name=\"OUTPUT\")\n else:\n # For batching we can't use a tf.variable to hold the\n # accumulated values since that forces the size of the output\n # to the size of the variable (which must be a max-batch-size\n # vector since require one accumulator each), instead of the\n # output shape being [None, 1]. So instead we just return 0 if\n # not-ready and 'INPUT'+'START' otherwise... the tests know to\n # expect this.\n input0 = tf.placeholder(tf_input_dtype, [None,] + tu.shape_to_tf_shape(shape), \"INPUT\")\n if tf_input_dtype == tf.string:\n input0 = tf.strings.to_number(tf.strings.join([\"0\", input0]), tf_dtype)\n start0 = tf.placeholder(tf_dtype, [None,1], \"START\")\n ready0 = tf.placeholder(tf_dtype, [None,1], \"READY\")\n tmp = tf.where(tf.equal(ready0, 1), tf.add(start0, input0),\n tf.zeros(tf.shape(input0), dtype=tf_dtype))\n if tf_input_dtype == tf.string:\n output0 = tf.dtypes.as_string(tmp, name=\"OUTPUT\")\n else:\n output0 = tf.identity(tmp, name=\"OUTPUT\")\n\n # Use a different model name for the non-batching variant\n if create_savedmodel:\n model_name = tu.get_sequence_model_name(\n \"savedmodel_nobatch\" if max_batch == 0 else \"savedmodel\", dtype)\n else:\n model_name = tu.get_sequence_model_name(\n \"graphdef_nobatch\" if max_batch == 0 else \"graphdef\", dtype)\n\n model_version_dir = models_dir + \"/\" + model_name + \"/\" + str(model_version)\n\n try:\n os.makedirs(model_version_dir)\n except OSError as ex:\n pass # ignore existing dir\n\n if create_savedmodel:\n with tf.Session() as sess:\n sess.run(tf.initializers.global_variables())\n input0_tensor = tf.get_default_graph().get_tensor_by_name(\"INPUT:0\")\n start0_tensor = tf.get_default_graph().get_tensor_by_name(\"START:0\")\n ready0_tensor = tf.get_default_graph().get_tensor_by_name(\"READY:0\")\n output0_tensor = tf.get_default_graph().get_tensor_by_name(\"OUTPUT:0\")\n tf.saved_model.simple_save(sess, model_version_dir + \"/model.savedmodel\",\n inputs={\"INPUT\": input0_tensor, \"START\": start0_tensor,\n \"READY\" : ready0_tensor},\n outputs={\"OUTPUT\": output0_tensor})\n else:\n with tf.Session() as sess:\n sess.run(tf.initializers.global_variables())\n graph_io.write_graph(sess.graph.as_graph_def(), model_version_dir,\n \"model.graphdef\", as_text=False)\n\ndef create_tf_modelconfig(\n create_savedmodel, models_dir, model_version, max_batch, dtype, shape):\n\n if not tu.validate_for_tf_model(dtype, dtype, dtype, shape, shape, shape):\n return\n\n # Use a different model name for the non-batching variant\n if create_savedmodel:\n model_name = tu.get_sequence_model_name(\n \"savedmodel_nobatch\" if max_batch == 0 else \"savedmodel\", dtype)\n else:\n model_name = tu.get_sequence_model_name(\n \"graphdef_nobatch\" if max_batch == 0 else \"graphdef\", dtype)\n\n config_dir = models_dir + \"/\" + model_name\n config = '''\nname: \"{}\"\nplatform: \"{}\"\nmax_batch_size: {}\nsequence_batching {{\n max_sequence_idle_microseconds: 5000000\n control_input [\n {{\n name: \"START\"\n control [\n {{\n kind: CONTROL_SEQUENCE_START\n {}_false_true: [ 0, 1 ]\n }}\n ]\n }},\n {{\n name: \"READY\"\n control [\n {{\n kind: CONTROL_SEQUENCE_READY\n {}_false_true: [ 0, 1 ]\n }}\n ]\n }}\n ]\n}}\ninput [\n {{\n name: \"INPUT\"\n data_type: {}\n dims: [ {} ]\n }}\n]\noutput [\n {{\n name: \"OUTPUT\"\n data_type: {}\n dims: [ 1 ]\n }}\n]\ninstance_group [\n {{\n kind: KIND_GPU\n }}\n]\n'''.format(model_name,\n \"tensorflow_savedmodel\" if create_savedmodel else \"tensorflow_graphdef\",\n max_batch,\n \"fp32\" if dtype == np.float32 else \"int32\",\n \"fp32\" if dtype == np.float32 else \"int32\",\n np_to_model_dtype(dtype), tu.shape_to_dims_str(shape),\n np_to_model_dtype(dtype))\n\n try:\n os.makedirs(config_dir)\n except OSError as ex:\n pass # ignore existing dir\n\n with open(config_dir + \"/config.pbtxt\", \"w\") as cfile:\n cfile.write(config)\n\n\ndef create_netdef_modelfile(\n models_dir, model_version, max_batch, dtype, shape):\n\n if not tu.validate_for_c2_model(dtype, dtype, dtype, shape, shape, shape):\n return\n\n c2_dtype = np_to_c2_dtype(dtype)\n model_name = tu.get_sequence_model_name(\n \"netdef_nobatch\" if max_batch == 0 else \"netdef\", dtype)\n\n # Create the model. For now don't implement a proper accumulator\n # just return 0 if not-ready and 'INPUT'+'START' otherwise... the\n # tests know to expect this.\n model = c2model_helper.ModelHelper(name=model_name)\n model.net.Add([\"INPUT\", \"START\"], \"add\")\n model.net.Sub([\"READY\", \"READY\"], \"zeros\")\n model.net.NE([\"READY\", \"zeros\"], \"compare\")\n model.net.Where([\"compare\", \"add\", \"zeros\"], \"OUTPUT\")\n\n model_version_dir = models_dir + \"/\" + model_name + \"/\" + str(model_version)\n\n try:\n os.makedirs(model_version_dir)\n except OSError as ex:\n pass # ignore existing dir\n\n with open(model_version_dir + \"/model.netdef\", \"wb\") as f:\n f.write(model.Proto().SerializeToString())\n with open(model_version_dir + \"/init_model.netdef\", \"wb\") as f:\n f.write(model.InitProto().SerializeToString())\n\n\ndef create_netdef_modelconfig(\n models_dir, model_version, max_batch, dtype, shape):\n\n if not tu.validate_for_c2_model(dtype, dtype, dtype, shape, shape, shape):\n return\n\n model_name = tu.get_sequence_model_name(\n \"netdef_nobatch\" if max_batch == 0 else \"netdef\", dtype)\n config_dir = models_dir + \"/\" + model_name\n config = '''\nname: \"{}\"\nplatform: \"caffe2_netdef\"\nmax_batch_size: {}\nsequence_batching {{\n max_sequence_idle_microseconds: 5000000\n control_input [\n {{\n name: \"START\"\n control [\n {{\n kind: CONTROL_SEQUENCE_START\n {}_false_true: [ 0, 1 ]\n }}\n ]\n }},\n {{\n name: \"READY\"\n control [\n {{\n kind: CONTROL_SEQUENCE_READY\n {}_false_true: [ 0, 1 ]\n }}\n ]\n }}\n ]\n}}\ninput [\n {{\n name: \"INPUT\"\n data_type: {}\n dims: [ {} ]\n }}\n]\noutput [\n {{\n name: \"OUTPUT\"\n data_type: {}\n dims: [ 1 ]\n }}\n]\ninstance_group [\n {{\n kind: KIND_CPU\n }}\n]\n'''.format(model_name, max_batch,\n \"int32\" if dtype == np.int32 else \"fp32\",\n \"int32\" if dtype == np.int32 else \"fp32\",\n np_to_model_dtype(dtype), tu.shape_to_dims_str(shape),\n np_to_model_dtype(dtype))\n\n try:\n os.makedirs(config_dir)\n except OSError as ex:\n pass # ignore existing dir\n\n with open(config_dir + \"/config.pbtxt\", \"w\") as cfile:\n cfile.write(config)\n\n\ndef create_plan_modelfile(\n models_dir, model_version, max_batch, dtype, shape):\n\n if not tu.validate_for_trt_model(dtype, dtype, dtype, shape, shape, shape):\n return\n\n trt_dtype = np_to_trt_dtype(dtype)\n\n # Create the model. For now don't implement a proper accumulator\n # just return 0 if not-ready and 'INPUT'+'START' otherwise... the\n # tests know to expect this.\n G_LOGGER = trt.infer.ConsoleLogger(trt.infer.LogSeverity.INFO)\n builder = trt.infer.create_infer_builder(G_LOGGER)\n network = builder.create_network()\n in0 = network.add_input(\"INPUT\", trt_dtype, shape)\n start0 = network.add_input(\"START\", trt_dtype, [1, 1, 1])\n ready0 = network.add_input(\"READY\", trt_dtype, [1, 1, 1])\n add = network.add_elementwise(in0, start0, trt.infer.ElementWiseOperation.SUM)\n out0 = network.add_elementwise(add.get_output(0), ready0, trt.infer.ElementWiseOperation.PROD)\n\n out0.get_output(0).set_name(\"OUTPUT\")\n network.mark_output(out0.get_output(0))\n\n builder.set_max_batch_size(max(1, max_batch))\n builder.set_max_workspace_size(1 << 20)\n engine = builder.build_cuda_engine(network)\n network.destroy()\n\n model_name = tu.get_sequence_model_name(\n \"plan_nobatch\" if max_batch == 0 else \"plan\", dtype)\n model_version_dir = models_dir + \"/\" + model_name + \"/\" + str(model_version)\n\n try:\n os.makedirs(model_version_dir)\n except OSError as ex:\n pass # ignore existing dir\n\n lengine = trt.lite.Engine(engine_stream=engine.serialize(),\n max_batch_size=max(1, max_batch))\n lengine.save(model_version_dir + \"/model.plan\")\n engine.destroy()\n builder.destroy()\n\n\ndef create_plan_modelconfig(\n models_dir, model_version, max_batch, dtype, shape):\n\n if not tu.validate_for_trt_model(dtype, dtype, dtype, shape, shape, shape):\n return\n\n model_name = tu.get_sequence_model_name(\n \"plan_nobatch\" if max_batch == 0 else \"plan\", dtype)\n config_dir = models_dir + \"/\" + model_name\n config = '''\nname: \"{}\"\nplatform: \"tensorrt_plan\"\nmax_batch_size: {}\nsequence_batching {{\n max_sequence_idle_microseconds: 5000000\n control_input [\n {{\n name: \"START\"\n control [\n {{\n kind: CONTROL_SEQUENCE_START\n {}_false_true: [ 0, 1 ]\n }}\n ]\n }},\n {{\n name: \"READY\"\n control [\n {{\n kind: CONTROL_SEQUENCE_READY\n {}_false_true: [ 0, 1 ]\n }}\n ]\n }}\n ]\n}}\ninput [\n {{\n name: \"INPUT\"\n data_type: {}\n dims: [ {} ]\n }}\n]\noutput [\n {{\n name: \"OUTPUT\"\n data_type: {}\n dims: [ 1, 1, 1 ]\n }}\n]\ninstance_group [\n {{\n kind: KIND_GPU\n }}\n]\n'''.format(model_name, max_batch,\n \"int32\" if dtype == np.int32 else \"fp32\",\n \"int32\" if dtype == np.int32 else \"fp32\",\n np_to_model_dtype(dtype), tu.shape_to_dims_str(shape),\n np_to_model_dtype(dtype))\n\n try:\n os.makedirs(config_dir)\n except OSError as ex:\n pass # ignore existing dir\n\n with open(config_dir + \"/config.pbtxt\", \"w\") as cfile:\n cfile.write(config)\n\ndef create_onnx_modelfile(\n models_dir, model_version, max_batch, dtype, shape):\n\n if not tu.validate_for_onnx_model(dtype, dtype, dtype, shape, shape, shape):\n return\n\n model_name = tu.get_sequence_model_name(\n \"onnx_nobatch\" if max_batch == 0 else \"onnx\", dtype)\n model_version_dir = models_dir + \"/\" + model_name + \"/\" + str(model_version)\n\n # Create the model. For now don't implement a proper accumulator\n # just return 0 if not-ready and 'INPUT'+'START' otherwise... the\n # tests know to expect this.\n onnx_dtype = np_to_onnx_dtype(dtype)\n onnx_input_shape, idx = tu.shape_to_onnx_shape(shape, 0)\n onnx_start_shape, idx = tu.shape_to_onnx_shape(shape, idx)\n onnx_ready_shape, idx = tu.shape_to_onnx_shape(shape, idx)\n onnx_output_shape, idx = tu.shape_to_onnx_shape(shape, idx)\n\n # If the input is a string then use int32 for operation and just\n # cast to/from string for input and output.\n onnx_control_dtype = onnx_dtype\n if onnx_dtype == onnx.TensorProto.STRING:\n onnx_control_dtype = onnx.TensorProto.INT32\n\n batch_dim = [] if max_batch == 0 else [max_batch]\n\n onnx_input = onnx.helper.make_tensor_value_info(\"INPUT\", onnx_dtype, batch_dim + onnx_input_shape)\n onnx_start = onnx.helper.make_tensor_value_info(\"START\", onnx_control_dtype, batch_dim + onnx_start_shape)\n onnx_ready = onnx.helper.make_tensor_value_info(\"READY\", onnx_control_dtype, batch_dim + onnx_ready_shape)\n onnx_output = onnx.helper.make_tensor_value_info(\"OUTPUT\", onnx_dtype, batch_dim + onnx_output_shape)\n\n internal_input = onnx.helper.make_node(\"Identity\", [\"INPUT\"], [\"_INPUT\"])\n\n # cast int8, int16 input to higer precision int as Onnx Add/Sub operator doesn't support those type\n # Also casting String data type to int32\n if ((onnx_dtype == onnx.TensorProto.INT8) or (onnx_dtype == onnx.TensorProto.INT16) or\n (onnx_dtype == onnx.TensorProto.STRING)):\n internal_input = onnx.helper.make_node(\"Cast\", [\"INPUT\"], [\"_INPUT\"], to=onnx.TensorProto.INT32)\n\n add = onnx.helper.make_node(\"Add\", [\"_INPUT\", \"START\"], [\"add\"])\n # Take advantage of knowledge that the READY false value is 0 and true is 1\n mul = onnx.helper.make_node(\"Mul\", [\"READY\", \"add\"], [\"CAST\"])\n cast = onnx.helper.make_node(\"Cast\", [\"CAST\"], [\"OUTPUT\"], to=onnx_dtype)\n\n # Avoid cast from float16 to float16\n # (bug in Onnx Runtime, cast from float16 to float16 will become cast from float16 to float32)\n if onnx_dtype == onnx.TensorProto.FLOAT16:\n cast = onnx.helper.make_node(\"Identity\", [\"CAST\"], [\"OUTPUT\"])\n\n onnx_nodes = [internal_input, add, mul, cast]\n onnx_inputs = [onnx_input, onnx_start, onnx_ready]\n onnx_outputs = [onnx_output]\n\n graph_proto = onnx.helper.make_graph(onnx_nodes, model_name, onnx_inputs, onnx_outputs)\n model_def = onnx.helper.make_model(graph_proto, producer_name=\"TRTIS\")\n\n try:\n os.makedirs(model_version_dir)\n except OSError as ex:\n pass # ignore existing dir\n\n onnx.save(model_def, model_version_dir + \"/model.onnx\")\n\n\ndef create_onnx_modelconfig(\n models_dir, model_version, max_batch, dtype, shape):\n\n if not tu.validate_for_onnx_model(dtype, dtype, dtype, shape, shape, shape):\n return\n\n model_name = tu.get_sequence_model_name(\n \"onnx_nobatch\" if max_batch == 0 else \"onnx\", dtype)\n config_dir = models_dir + \"/\" + model_name\n\n # Must make sure all Onnx models will be loaded to the same GPU if they are\n # run on GPU. This is due to the current limitation of Onnx Runtime\n # https://github.com/microsoft/onnxruntime/issues/1034\n instance_group_string = '''\ninstance_group [\n {\n kind: KIND_GPU\n gpus: [ 0 ]\n }\n]\n'''\n # [TODO] move create_general_modelconfig() out of emu as it is general\n # enough for all backends to use\n config = emu.create_general_modelconfig(model_name, \"onnxruntime_onnx\", max_batch,\n [dtype], [shape], [None], [dtype], [shape], [None], [None],\n force_tensor_number_suffix=False, instance_group_str=instance_group_string)\n\n config += '''\nsequence_batching {{\n max_sequence_idle_microseconds: 5000000\n control_input [\n {{\n name: \"START\"\n control [\n {{\n kind: CONTROL_SEQUENCE_START\n {type}_false_true: [ 0, 1 ]\n }}\n ]\n }},\n {{\n name: \"READY\"\n control [\n {{\n kind: CONTROL_SEQUENCE_READY\n {type}_false_true: [ 0, 1 ]\n }}\n ]\n }}\n ]\n}}\n'''.format(type=\"fp32\" if dtype == np.float32 else \"int32\")\n\n try:\n os.makedirs(config_dir)\n except OSError as ex:\n pass # ignore existing dir\n\n with open(config_dir + \"/config.pbtxt\", \"w\") as cfile:\n cfile.write(config)\n\n\ndef create_models(models_dir, dtype, shape, no_batch=True):\n model_version = 1\n\n if FLAGS.graphdef:\n create_tf_modelconfig(False, models_dir, model_version, 8, dtype, shape)\n create_tf_modelfile(False, models_dir, model_version, 8, dtype, shape)\n if no_batch:\n create_tf_modelconfig(False, models_dir, model_version, 0, dtype, shape)\n create_tf_modelfile(False, models_dir, model_version, 0, dtype, shape)\n\n if FLAGS.savedmodel:\n create_tf_modelconfig(True, models_dir, model_version, 8, dtype, shape)\n create_tf_modelfile(True, models_dir, model_version, 8, dtype, shape)\n if no_batch:\n create_tf_modelconfig(True, models_dir, model_version, 0, dtype, shape)\n create_tf_modelfile(True, models_dir, model_version, 0, dtype, shape)\n\n if FLAGS.netdef:\n create_netdef_modelconfig(models_dir, model_version, 8, dtype, shape)\n create_netdef_modelfile(models_dir, model_version, 8, dtype, shape)\n if no_batch:\n create_netdef_modelconfig(models_dir, model_version, 0, dtype, shape)\n create_netdef_modelfile(models_dir, model_version, 0, dtype, shape)\n\n if FLAGS.tensorrt:\n create_plan_modelconfig(models_dir, model_version, 8, dtype, shape + [1, 1])\n create_plan_modelfile(models_dir, model_version, 8, dtype, shape + [1, 1])\n if no_batch:\n create_plan_modelconfig(models_dir, model_version, 0, dtype, shape + [1, 1])\n create_plan_modelfile(models_dir, model_version, 0, dtype, shape + [1, 1])\n\n if FLAGS.onnx:\n create_onnx_modelconfig(models_dir, model_version, 8, dtype, shape)\n create_onnx_modelfile(models_dir, model_version, 8, dtype, shape)\n if no_batch:\n create_onnx_modelconfig(models_dir, model_version, 0, dtype, shape)\n create_onnx_modelfile(models_dir, model_version, 0, dtype, shape)\n\n if FLAGS.ensemble:\n for pair in emu.platform_types_and_validation():\n if pair[0] == \"plan\":\n shape = shape + [1, 1]\n if not pair[1](dtype, dtype, dtype,\n shape, shape, shape):\n continue\n\n emu.create_sequence_ensemble_modelconfig(\n pair[0], models_dir, 8, model_version, shape, dtype)\n emu.create_sequence_ensemble_modelfile(\n pair[0], models_dir, 8, model_version, shape, dtype)\n if no_batch:\n emu.create_sequence_ensemble_modelconfig(\n pair[0], models_dir, 0, model_version, shape, dtype)\n emu.create_sequence_ensemble_modelfile(\n pair[0], models_dir, 0, model_version, shape, dtype)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--models_dir', type=str, required=True,\n help='Top-level model directory')\n parser.add_argument('--graphdef', required=False, action='store_true',\n help='Generate GraphDef models')\n parser.add_argument('--savedmodel', required=False, action='store_true',\n help='Generate SavedModel models')\n parser.add_argument('--netdef', required=False, action='store_true',\n help='Generate NetDef models')\n parser.add_argument('--tensorrt', required=False, action='store_true',\n help='Generate TensorRT PLAN models')\n parser.add_argument('--onnx', required=False, action='store_true',\n help='Generate Onnx models')\n parser.add_argument('--variable', required=False, action='store_true',\n help='Used variable-shape tensors for input/output')\n parser.add_argument('--ensemble', required=False, action='store_true',\n help='Generate ensemble models against the models'\n + ' in all platforms. Note that the models generated'\n + ' are not completed.')\n FLAGS, unparsed = parser.parse_known_args()\n\n if FLAGS.netdef:\n from caffe2.python import core as c2core\n from caffe2.python import model_helper as c2model_helper\n if FLAGS.graphdef or FLAGS.savedmodel:\n import tensorflow as tf\n from tensorflow.python.framework import graph_io, graph_util\n if FLAGS.tensorrt:\n import tensorrt.legacy as trt\n if FLAGS.onnx:\n import onnx\n\n import test_util as tu\n\n # Tests with models that accept fixed-shape input/output tensors\n if not FLAGS.variable:\n create_models(FLAGS.models_dir, np.float32, [1,])\n create_models(FLAGS.models_dir, np.int32, [1,])\n create_models(FLAGS.models_dir, np_dtype_string, [1,])\n\n # Tests with models that accept variable-shape input/output tensors\n if FLAGS.variable:\n create_models(FLAGS.models_dir, np.int32, [-1,], False)\n create_models(FLAGS.models_dir, np.float32, [-1,], False)\n create_models(FLAGS.models_dir, np_dtype_string, [-1,], False)\n\n if FLAGS.ensemble:\n # Create nop models used in ensemble\n for model_dtype in [\"TYPE_INT32\", \"TYPE_FP32\"]:\n # 3D shape for TensorRT Plan\n for model_shape in [(-1,), (-1, -1, -1)]:\n emu.create_nop_modelconfig(FLAGS.models_dir, model_shape, model_dtype)\n"
] |
[
[
"tensorflow.strings.join",
"tensorflow.saved_model.simple_save",
"tensorflow.shape",
"tensorflow.assign",
"tensorflow.get_default_graph",
"tensorflow.reset_default_graph",
"tensorflow.equal",
"tensorflow.Session",
"tensorflow.placeholder",
"tensorflow.get_variable",
"tensorflow.identity",
"tensorflow.add",
"tensorflow.dtypes.as_string",
"numpy.dtype",
"tensorflow.initializers.global_variables"
]
] |
Rhcsky/cifar100-classification
|
[
"00a099b608d798f59f1781375687e10e7fd3a250"
] |
[
"model/resnet.py"
] |
[
"# Original code: https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py\n\nimport torch.nn as nn\nimport math\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"3x3 convolution with padding\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, planes * Bottleneck.expansion, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * Bottleneck.expansion)\n self.relu = nn.ReLU(inplace=True)\n\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n def __init__(self, depth, num_classes, bottleneck=False):\n super(ResNet, self).__init__()\n self.inplanes = 16\n\n if bottleneck:\n n = int((depth - 2) / 9)\n block = Bottleneck\n else:\n n = int((depth - 2) / 6)\n block = BasicBlock\n\n self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(self.inplanes)\n self.relu = nn.ReLU(inplace=True)\n self.layer1 = self._make_layer(block, 16, n)\n self.layer2 = self._make_layer(block, 32, n, stride=2)\n self.layer3 = self._make_layer(block, 64, n, stride=2)\n self.avgpool = nn.AvgPool2d(8)\n self.fc = nn.Linear(64 * block.expansion, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n\n return x\n\n\nif __name__ == '__main__':\n import torch\n\n img = torch.rand((1, 3, 32, 32))\n model = ResNet(20, 100, False)\n\n print(model)\n"
] |
[
[
"torch.nn.Linear",
"torch.rand",
"torch.nn.Sequential",
"torch.nn.AvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Conv2d"
]
] |
KushDen/deepimportance_code_release
|
[
"5d16f1f95568dc402be6dfed4ad993ec0dbaa356"
] |
[
"lrp_toolbox/modules/softmax.py"
] |
[
"'''\n@author: Sebastian Lapuschkin\n@author: Gregoire Montavon\n@maintainer: Sebastian Lapuschkin\n@contact: [email protected], [email protected]\n@date: 14.08.2015\n@version: 1.2+\n@copyright: Copyright (c) 2015-2017, Sebastian Lapuschkin, Alexander Binder, Gregoire Montavon, Klaus-Robert Mueller, Wojciech Samek\n@license : BSD-2-Clause\n'''\n\nimport numpy as np\nfrom .module import Module\n\n# -------------------------------\n# Softmax layer\n# -------------------------------\nclass SoftMax(Module):\n '''\n Softmax Layer\n '''\n\n def __init__(self):\n Module.__init__(self)\n\n def forward(self,X,*args,**kwargs):\n self.X = X\n self.Y = np.exp(X) / np.exp(X).sum(axis=1,keepdims=True)\n return self.Y\n\n\n def lrp(self,R,*args,**kwargs):\n # just propagate R further down.\n # makes sure subroutines never get called.\n #return R*self.X\n return R\n\n def clean(self):\n self.X = None\n self.Y = None"
] |
[
[
"numpy.exp"
]
] |
jhaux/triplet-reid
|
[
"ac475c38c1de083482634db75dde53f12ef69cb1"
] |
[
"triplet_reid/edflow_implementations/deepfashion/eval_tsne.py"
] |
[
"import sys\nsys.path.append(\".\")\nimport yaml, os, json\nfrom triplet_reid.edflow_implementations.deepfashion.data import (\n FromCSVWithEmbedding, FromCSVWithMultiEmbedding)\nfrom tqdm import trange, tqdm\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nfrom triplet_reid.excluders.diagonal import Excluder as DiagonalExcluder\nfrom scipy.spatial.distance import cdist\nfrom sklearn.metrics import average_precision_score\n\n\ndef make_tsne_plot(outpath, dataset):\n indices = np.random.permutation(len(dataset))\n N = 1000\n indices = indices[:N]\n data = list()\n for i in tqdm(indices):\n data.append(dataset[i][\"embedding\"])\n data = np.stack(data)\n\n from sklearn.manifold import TSNE\n tsne = TSNE(n_components=2, random_state=0, verbose = 1, perplexity = 40, n_iter=300)\n data_2d = tsne.fit_transform(data)\n\n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n ax.scatter(data_2d[:,0], data_2d[:,1])\n\n fig.savefig(outpath, dpi = 300)\n print(\"Wrote \", outpath)\n\ndef make_combined_tsne_plot(outpath, dataset1, dataset2, label1, label2):\n indices1 = np.random.permutation(len(dataset1))\n indices2 = np.random.permutation(len(dataset2))\n N = 1000\n indices1 = indices1[:N]\n indices2 = indices2[:N]\n data = list()\n for i in tqdm(indices1):\n data.append(dataset1[i][\"embedding\"])\n for i in tqdm(indices2):\n data.append(dataset2[i][\"embedding\"])\n data = np.stack(data)\n print(data.shape)\n\n from sklearn.manifold import TSNE\n tsne = TSNE(n_components=2, random_state=0, verbose = 1)\n data_2d = tsne.fit_transform(data)\n print(data_2d.shape)\n\n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n colors = [\"r\", \"g\"]\n markers = [\"+\", \"x\"]\n alphas = [1.0, 1.0]\n ax.scatter(\n data_2d[:N,0], data_2d[:N,1],\n c = colors[0], label = label1, marker = markers[0], alpha = alphas[0])\n ax.scatter(\n data_2d[N:,0], data_2d[N:,1],\n c = colors[1], label = label2, marker = markers[1], alpha = alphas[1])\n ax.legend()\n\n fig.savefig(outpath, dpi = 300)\n print(\"Wrote \", outpath)\n\n\ndef run(embedding_root, postfixes):\n joint_config = {\n \"spatial_size\": 256,\n \"data_root\": \"data/deepfashion/images\",\n \"embedding_root\": embedding_root,\n \"embedding_postfixes\": postfixes,\n \"data_csv\": \"data/deepfashion/test_reconstruction.csv\",\n \"z_size\": None}\n joint_dataset = FromCSVWithMultiEmbedding(joint_config)\n marginal_config = {\n \"spatial_size\": 256,\n \"data_root\": \"data/deepfashion/images\",\n \"embedding_root\": embedding_root,\n \"embedding_postfixes\": postfixes,\n \"data_csv\": \"data/deepfashion/test_transfer.csv\",\n \"z_size\": None}\n marginal_dataset = FromCSVWithMultiEmbedding(marginal_config)\n print(len(joint_dataset))\n print(len(marginal_dataset))\n for name, dataset in zip([\"joint\", \"marginal\"], [joint_dataset, marginal_dataset]):\n out_path = \"tsne_\" + name + \".png\"\n out_path = os.path.join(embedding_root, out_path)\n make_tsne_plot(out_path, dataset)\n\n out_path = \"tsne_\" + \"combined\" + \".png\"\n out_path = os.path.join(embedding_root, out_path)\n make_combined_tsne_plot(out_path, joint_dataset, marginal_dataset, \"joint\", \"marginal\")\n\n\nif __name__ == \"__main__\":\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument(\"embedding_root\")\n parser.add_argument(\"--postfixes\", nargs = \"+\", required = True)\n opt = parser.parse_args()\n run(opt.embedding_root, opt.postfixes)\n"
] |
[
[
"numpy.stack",
"sklearn.manifold.TSNE",
"matplotlib.pyplot.figure"
]
] |
iam-abbas/numpy
|
[
"2fb5e969fded3cd468f2ca01d5b954c953545dd9",
"2fb5e969fded3cd468f2ca01d5b954c953545dd9",
"2fb5e969fded3cd468f2ca01d5b954c953545dd9"
] |
[
"benchmarks/benchmarks/bench_lib.py",
"doc/source/reference/random/performance.py",
"numpy/array_api/tests/test_array_object.py"
] |
[
"\"\"\"Benchmarks for `numpy.lib`.\"\"\"\n\n\nfrom .common import Benchmark\n\nimport numpy as np\n\n\nclass Pad(Benchmark):\n \"\"\"Benchmarks for `numpy.pad`.\n\n When benchmarking the pad function it is useful to cover scenarios where\n the ratio between the size of the input array and the output array differs\n significantly (original area vs. padded area). This allows to evaluate for\n which scenario a padding algorithm is optimized. Furthermore involving\n large range of array sizes ensures that the effects of CPU-bound caching is\n visible.\n\n The table below shows the sizes of the arrays involved in this benchmark:\n\n +-----------------+----------+-----------+-----------+-----------------+\n | shape | original | padded: 1 | padded: 8 | padded: (0, 32) |\n +=================+==========+===========+===========+=================+\n | (2 ** 22,) | 32 MiB | 32.0 MiB | 32.0 MiB | 32.0 MiB |\n +-----------------+----------+-----------+-----------+-----------------+\n | (1024, 1024) | 8 MiB | 8.03 MiB | 8.25 MiB | 8.51 MiB |\n +-----------------+----------+-----------+-----------+-----------------+\n | (256, 256, 1) | 256 KiB | 786 KiB | 5.08 MiB | 11.6 MiB |\n +-----------------+----------+-----------+-----------+-----------------+\n | (4, 4, 4, 4) | 2 KiB | 10.1 KiB | 1.22 MiB | 12.8 MiB |\n +-----------------+----------+-----------+-----------+-----------------+\n | (1, 1, 1, 1, 1) | 8 B | 1.90 MiB | 10.8 MiB | 299 MiB |\n +-----------------+----------+-----------+-----------+-----------------+\n \"\"\"\n\n param_names = [\"shape\", \"pad_width\", \"mode\"]\n params = [\n # Shape of the input arrays\n [(2 ** 22,), (1024, 1024), (256, 128, 1),\n (4, 4, 4, 4), (1, 1, 1, 1, 1)],\n # Tested pad widths\n [1, 8, (0, 32)],\n # Tested modes: mean, median, minimum & maximum use the same code path\n # reflect & symmetric share a lot of their code path\n [\"constant\", \"edge\", \"linear_ramp\", \"mean\", \"reflect\", \"wrap\"],\n ]\n\n def setup(self, shape, pad_width, mode):\n # Make sure to fill the array to make the OS page fault\n # in the setup phase and not the timed phase\n self.array = np.full(shape, fill_value=1, dtype=np.float64)\n\n def time_pad(self, shape, pad_width, mode):\n np.pad(self.array, pad_width, mode)\n\n\nclass Nan(Benchmark):\n \"\"\"Benchmarks for nan functions\"\"\"\n\n param_names = [\"array_size\", \"percent_nans\"]\n params = [\n # sizes of the 1D arrays\n [200, int(2e5)],\n # percent of np.nan in arrays\n [0, 0.1, 2., 50., 90.],\n ]\n\n def setup(self, array_size, percent_nans):\n np.random.seed(123)\n # produce a randomly shuffled array with the\n # approximate desired percentage np.nan content\n base_array = np.random.uniform(size=array_size)\n base_array[base_array < percent_nans / 100.] = np.nan\n self.arr = base_array\n\n def time_nanmin(self, array_size, percent_nans):\n np.nanmin(self.arr)\n\n def time_nanmax(self, array_size, percent_nans):\n np.nanmax(self.arr)\n\n def time_nanargmin(self, array_size, percent_nans):\n np.nanargmin(self.arr)\n\n def time_nanargmax(self, array_size, percent_nans):\n np.nanargmax(self.arr)\n\n def time_nansum(self, array_size, percent_nans):\n np.nansum(self.arr)\n\n def time_nanprod(self, array_size, percent_nans):\n np.nanprod(self.arr)\n\n def time_nancumsum(self, array_size, percent_nans):\n np.nancumsum(self.arr)\n\n def time_nancumprod(self, array_size, percent_nans):\n np.nancumprod(self.arr)\n\n def time_nanmean(self, array_size, percent_nans):\n np.nanmean(self.arr)\n\n def time_nanvar(self, array_size, percent_nans):\n np.nanvar(self.arr)\n\n def time_nanstd(self, array_size, percent_nans):\n np.nanstd(self.arr)\n\n def time_nanmedian(self, array_size, percent_nans):\n np.nanmedian(self.arr)\n\n def time_nanquantile(self, array_size, percent_nans):\n np.nanquantile(self.arr, q=0.2)\n\n def time_nanpercentile(self, array_size, percent_nans):\n np.nanpercentile(self.arr, q=50)\n\n\nclass Unique(Benchmark):\n \"\"\"Benchmark for np.unique with np.nan values.\"\"\"\n\n param_names = [\"array_size\", \"percent_nans\"]\n params = [\n # sizes of the 1D arrays\n [200, int(2e5)],\n # percent of np.nan in arrays\n [0, 0.1, 2., 50., 90.],\n ]\n\n def setup(self, array_size, percent_nans):\n np.random.seed(123)\n # produce a randomly shuffled array with the\n # approximate desired percentage np.nan content\n base_array = np.random.uniform(size=array_size)\n base_array[base_array < percent_nans / 100.] = np.nan\n self.arr = base_array\n\n def time_unique(self, array_size, percent_nans):\n np.unique(self.arr)\n",
"from timeit import repeat\n\nimport pandas as pd\n\nimport numpy as np\nfrom numpy.random import MT19937, PCG64, PCG64DXSM, Philox, SFC64\n\nPRNGS = [MT19937, PCG64, PCG64DXSM, Philox, SFC64]\n\nfuncs = {}\nintegers = 'integers(0, 2**{bits},size=1000000, dtype=\"uint{bits}\")'\nfuncs['32-bit Unsigned Ints'] = integers.format(bits=32)\nfuncs['64-bit Unsigned Ints'] = integers.format(bits=64)\nfuncs['Uniforms'] = 'random(size=1000000)'\nfuncs['Normals'] = 'standard_normal(size=1000000)'\nfuncs['Exponentials'] = 'standard_exponential(size=1000000)'\nfuncs['Gammas'] = 'standard_gamma(3.0,size=1000000)'\nfuncs['Binomials'] = 'binomial(9, .1, size=1000000)'\nfuncs['Laplaces'] = 'laplace(size=1000000)'\nfuncs['Poissons'] = 'poisson(3.0, size=1000000)'\n\nsetup = \"\"\"\nfrom numpy.random import {prng}, Generator\nrg = Generator({prng}())\n\"\"\"\n\ntest = \"rg.{func}\"\ntable = {}\nfor prng in PRNGS:\n print(prng)\n col = {}\n for key in funcs:\n t = repeat(test.format(func=funcs[key]),\n setup.format(prng=prng().__class__.__name__),\n number=1, repeat=3)\n col[key] = 1000 * min(t)\n col = pd.Series(col)\n table[prng().__class__.__name__] = col\n\nnpfuncs = {}\nnpfuncs.update(funcs)\nnpfuncs['32-bit Unsigned Ints'] = 'randint(2**32,dtype=\"uint32\",size=1000000)'\nnpfuncs['64-bit Unsigned Ints'] = 'randint(2**64,dtype=\"uint64\",size=1000000)'\nsetup = \"\"\"\nfrom numpy.random import RandomState\nrg = RandomState()\n\"\"\"\ncol = {}\nfor key in npfuncs:\n t = repeat(test.format(func=npfuncs[key]),\n setup.format(prng=prng().__class__.__name__),\n number=1, repeat=3)\n col[key] = 1000 * min(t)\ntable['RandomState'] = pd.Series(col)\n\ncolumns = ['MT19937', 'PCG64', 'PCG64DXSM', 'Philox', 'SFC64', 'RandomState']\ntable = pd.DataFrame(table)\norder = np.log(table).mean().sort_values().index\ntable = table.T\ntable = table.reindex(columns)\ntable = table.T\ntable = table.reindex([k for k in funcs], axis=0)\nprint(table.to_csv(float_format='%0.1f'))\n\n\nrel = table.loc[:, ['RandomState']].values @ np.ones(\n (1, table.shape[1])) / table\nrel.pop('RandomState')\nrel = rel.T\nrel['Overall'] = np.exp(np.log(rel).mean(1))\nrel *= 100\nrel = np.round(rel)\nrel = rel.T\nprint(rel.to_csv(float_format='%0d'))\n\n# Cross-platform table\nrows = ['32-bit Unsigned Ints','64-bit Unsigned Ints','Uniforms','Normals','Exponentials']\nxplat = rel.reindex(rows, axis=0)\nxplat = 100 * (xplat / xplat.MT19937.values[:,None])\noverall = np.exp(np.log(xplat).mean(0))\nxplat = xplat.T.copy()\nxplat['Overall']=overall\nprint(xplat.T.round(1))\n\n\n\n",
"import operator\n\nfrom numpy.testing import assert_raises\nimport numpy as np\n\nfrom .. import ones, asarray, result_type\nfrom .._dtypes import (\n _all_dtypes,\n _boolean_dtypes,\n _floating_dtypes,\n _integer_dtypes,\n _integer_or_boolean_dtypes,\n _numeric_dtypes,\n int8,\n int16,\n int32,\n int64,\n uint64,\n)\n\n\ndef test_validate_index():\n # The indexing tests in the official array API test suite test that the\n # array object correctly handles the subset of indices that are required\n # by the spec. But the NumPy array API implementation specifically\n # disallows any index not required by the spec, via Array._validate_index.\n # This test focuses on testing that non-valid indices are correctly\n # rejected. See\n # https://data-apis.org/array-api/latest/API_specification/indexing.html\n # and the docstring of Array._validate_index for the exact indexing\n # behavior that should be allowed. This does not test indices that are\n # already invalid in NumPy itself because Array will generally just pass\n # such indices directly to the underlying np.ndarray.\n\n a = ones((3, 4))\n\n # Out of bounds slices are not allowed\n assert_raises(IndexError, lambda: a[:4])\n assert_raises(IndexError, lambda: a[:-4])\n assert_raises(IndexError, lambda: a[:3:-1])\n assert_raises(IndexError, lambda: a[:-5:-1])\n assert_raises(IndexError, lambda: a[3:])\n assert_raises(IndexError, lambda: a[-4:])\n assert_raises(IndexError, lambda: a[3::-1])\n assert_raises(IndexError, lambda: a[-4::-1])\n\n assert_raises(IndexError, lambda: a[...,:5])\n assert_raises(IndexError, lambda: a[...,:-5])\n assert_raises(IndexError, lambda: a[...,:4:-1])\n assert_raises(IndexError, lambda: a[...,:-6:-1])\n assert_raises(IndexError, lambda: a[...,4:])\n assert_raises(IndexError, lambda: a[...,-5:])\n assert_raises(IndexError, lambda: a[...,4::-1])\n assert_raises(IndexError, lambda: a[...,-5::-1])\n\n # Boolean indices cannot be part of a larger tuple index\n assert_raises(IndexError, lambda: a[a[:,0]==1,0])\n assert_raises(IndexError, lambda: a[a[:,0]==1,...])\n assert_raises(IndexError, lambda: a[..., a[0]==1])\n assert_raises(IndexError, lambda: a[[True, True, True]])\n assert_raises(IndexError, lambda: a[(True, True, True),])\n\n # Integer array indices are not allowed (except for 0-D)\n idx = asarray([[0, 1]])\n assert_raises(IndexError, lambda: a[idx])\n assert_raises(IndexError, lambda: a[idx,])\n assert_raises(IndexError, lambda: a[[0, 1]])\n assert_raises(IndexError, lambda: a[(0, 1), (0, 1)])\n assert_raises(IndexError, lambda: a[[0, 1]])\n assert_raises(IndexError, lambda: a[np.array([[0, 1]])])\n\n # np.newaxis is not allowed\n assert_raises(IndexError, lambda: a[None])\n assert_raises(IndexError, lambda: a[None, ...])\n assert_raises(IndexError, lambda: a[..., None])\n\n\ndef test_operators():\n # For every operator, we test that it works for the required type\n # combinations and raises TypeError otherwise\n binary_op_dtypes = {\n \"__add__\": \"numeric\",\n \"__and__\": \"integer_or_boolean\",\n \"__eq__\": \"all\",\n \"__floordiv__\": \"numeric\",\n \"__ge__\": \"numeric\",\n \"__gt__\": \"numeric\",\n \"__le__\": \"numeric\",\n \"__lshift__\": \"integer\",\n \"__lt__\": \"numeric\",\n \"__mod__\": \"numeric\",\n \"__mul__\": \"numeric\",\n \"__ne__\": \"all\",\n \"__or__\": \"integer_or_boolean\",\n \"__pow__\": \"floating\",\n \"__rshift__\": \"integer\",\n \"__sub__\": \"numeric\",\n \"__truediv__\": \"floating\",\n \"__xor__\": \"integer_or_boolean\",\n }\n\n # Recompute each time because of in-place ops\n def _array_vals():\n for d in _integer_dtypes:\n yield asarray(1, dtype=d)\n for d in _boolean_dtypes:\n yield asarray(False, dtype=d)\n for d in _floating_dtypes:\n yield asarray(1.0, dtype=d)\n\n for op, dtypes in binary_op_dtypes.items():\n ops = [op]\n if op not in [\"__eq__\", \"__ne__\", \"__le__\", \"__ge__\", \"__lt__\", \"__gt__\"]:\n rop = \"__r\" + op[2:]\n iop = \"__i\" + op[2:]\n ops += [rop, iop]\n for s in [1, 1.0, False]:\n for _op in ops:\n for a in _array_vals():\n # Test array op scalar. From the spec, the following combinations\n # are supported:\n\n # - Python bool for a bool array dtype,\n # - a Python int within the bounds of the given dtype for integer array dtypes,\n # - a Python int or float for floating-point array dtypes\n\n # We do not do bounds checking for int scalars, but rather use the default\n # NumPy behavior for casting in that case.\n\n if ((dtypes == \"all\"\n or dtypes == \"numeric\" and a.dtype in _numeric_dtypes\n or dtypes == \"integer\" and a.dtype in _integer_dtypes\n or dtypes == \"integer_or_boolean\" and a.dtype in _integer_or_boolean_dtypes\n or dtypes == \"boolean\" and a.dtype in _boolean_dtypes\n or dtypes == \"floating\" and a.dtype in _floating_dtypes\n )\n # bool is a subtype of int, which is why we avoid\n # isinstance here.\n and (a.dtype in _boolean_dtypes and type(s) == bool\n or a.dtype in _integer_dtypes and type(s) == int\n or a.dtype in _floating_dtypes and type(s) in [float, int]\n )):\n # Only test for no error\n getattr(a, _op)(s)\n else:\n assert_raises(TypeError, lambda: getattr(a, _op)(s))\n\n # Test array op array.\n for _op in ops:\n for x in _array_vals():\n for y in _array_vals():\n # See the promotion table in NEP 47 or the array\n # API spec page on type promotion. Mixed kind\n # promotion is not defined.\n if (x.dtype == uint64 and y.dtype in [int8, int16, int32, int64]\n or y.dtype == uint64 and x.dtype in [int8, int16, int32, int64]\n or x.dtype in _integer_dtypes and y.dtype not in _integer_dtypes\n or y.dtype in _integer_dtypes and x.dtype not in _integer_dtypes\n or x.dtype in _boolean_dtypes and y.dtype not in _boolean_dtypes\n or y.dtype in _boolean_dtypes and x.dtype not in _boolean_dtypes\n or x.dtype in _floating_dtypes and y.dtype not in _floating_dtypes\n or y.dtype in _floating_dtypes and x.dtype not in _floating_dtypes\n ):\n assert_raises(TypeError, lambda: getattr(x, _op)(y))\n # Ensure in-place operators only promote to the same dtype as the left operand.\n elif (\n _op.startswith(\"__i\")\n and result_type(x.dtype, y.dtype) != x.dtype\n ):\n assert_raises(TypeError, lambda: getattr(x, _op)(y))\n # Ensure only those dtypes that are required for every operator are allowed.\n elif (dtypes == \"all\" and (x.dtype in _boolean_dtypes and y.dtype in _boolean_dtypes\n or x.dtype in _numeric_dtypes and y.dtype in _numeric_dtypes)\n or (dtypes == \"numeric\" and x.dtype in _numeric_dtypes and y.dtype in _numeric_dtypes)\n or dtypes == \"integer\" and x.dtype in _integer_dtypes and y.dtype in _numeric_dtypes\n or dtypes == \"integer_or_boolean\" and (x.dtype in _integer_dtypes and y.dtype in _integer_dtypes\n or x.dtype in _boolean_dtypes and y.dtype in _boolean_dtypes)\n or dtypes == \"boolean\" and x.dtype in _boolean_dtypes and y.dtype in _boolean_dtypes\n or dtypes == \"floating\" and x.dtype in _floating_dtypes and y.dtype in _floating_dtypes\n ):\n getattr(x, _op)(y)\n else:\n assert_raises(TypeError, lambda: getattr(x, _op)(y))\n\n unary_op_dtypes = {\n \"__abs__\": \"numeric\",\n \"__invert__\": \"integer_or_boolean\",\n \"__neg__\": \"numeric\",\n \"__pos__\": \"numeric\",\n }\n for op, dtypes in unary_op_dtypes.items():\n for a in _array_vals():\n if (\n dtypes == \"numeric\"\n and a.dtype in _numeric_dtypes\n or dtypes == \"integer_or_boolean\"\n and a.dtype in _integer_or_boolean_dtypes\n ):\n # Only test for no error\n getattr(a, op)()\n else:\n assert_raises(TypeError, lambda: getattr(a, op)())\n\n # Finally, matmul() must be tested separately, because it works a bit\n # different from the other operations.\n def _matmul_array_vals():\n for a in _array_vals():\n yield a\n for d in _all_dtypes:\n yield ones((3, 4), dtype=d)\n yield ones((4, 2), dtype=d)\n yield ones((4, 4), dtype=d)\n\n # Scalars always error\n for _op in [\"__matmul__\", \"__rmatmul__\", \"__imatmul__\"]:\n for s in [1, 1.0, False]:\n for a in _matmul_array_vals():\n if (type(s) in [float, int] and a.dtype in _floating_dtypes\n or type(s) == int and a.dtype in _integer_dtypes):\n # Type promotion is valid, but @ is not allowed on 0-D\n # inputs, so the error is a ValueError\n assert_raises(ValueError, lambda: getattr(a, _op)(s))\n else:\n assert_raises(TypeError, lambda: getattr(a, _op)(s))\n\n for x in _matmul_array_vals():\n for y in _matmul_array_vals():\n if (x.dtype == uint64 and y.dtype in [int8, int16, int32, int64]\n or y.dtype == uint64 and x.dtype in [int8, int16, int32, int64]\n or x.dtype in _integer_dtypes and y.dtype not in _integer_dtypes\n or y.dtype in _integer_dtypes and x.dtype not in _integer_dtypes\n or x.dtype in _floating_dtypes and y.dtype not in _floating_dtypes\n or y.dtype in _floating_dtypes and x.dtype not in _floating_dtypes\n or x.dtype in _boolean_dtypes\n or y.dtype in _boolean_dtypes\n ):\n assert_raises(TypeError, lambda: x.__matmul__(y))\n assert_raises(TypeError, lambda: y.__rmatmul__(x))\n assert_raises(TypeError, lambda: x.__imatmul__(y))\n elif x.shape == () or y.shape == () or x.shape[1] != y.shape[0]:\n assert_raises(ValueError, lambda: x.__matmul__(y))\n assert_raises(ValueError, lambda: y.__rmatmul__(x))\n if result_type(x.dtype, y.dtype) != x.dtype:\n assert_raises(TypeError, lambda: x.__imatmul__(y))\n else:\n assert_raises(ValueError, lambda: x.__imatmul__(y))\n else:\n x.__matmul__(y)\n y.__rmatmul__(x)\n if result_type(x.dtype, y.dtype) != x.dtype:\n assert_raises(TypeError, lambda: x.__imatmul__(y))\n elif y.shape[0] != y.shape[1]:\n # This one fails because x @ y has a different shape from x\n assert_raises(ValueError, lambda: x.__imatmul__(y))\n else:\n x.__imatmul__(y)\n\n\ndef test_python_scalar_construtors():\n b = asarray(False)\n i = asarray(0)\n f = asarray(0.0)\n\n assert bool(b) == False\n assert int(i) == 0\n assert float(f) == 0.0\n assert operator.index(i) == 0\n\n # bool/int/float should only be allowed on 0-D arrays.\n assert_raises(TypeError, lambda: bool(asarray([False])))\n assert_raises(TypeError, lambda: int(asarray([0])))\n assert_raises(TypeError, lambda: float(asarray([0.0])))\n assert_raises(TypeError, lambda: operator.index(asarray([0])))\n\n # bool/int/float should only be allowed on arrays of the corresponding\n # dtype\n assert_raises(ValueError, lambda: bool(i))\n assert_raises(ValueError, lambda: bool(f))\n\n assert_raises(ValueError, lambda: int(b))\n assert_raises(ValueError, lambda: int(f))\n\n assert_raises(ValueError, lambda: float(b))\n assert_raises(ValueError, lambda: float(i))\n\n assert_raises(TypeError, lambda: operator.index(b))\n assert_raises(TypeError, lambda: operator.index(f))\n"
] |
[
[
"numpy.nanmean",
"numpy.full",
"numpy.nanpercentile",
"numpy.nanprod",
"numpy.nanmin",
"numpy.nanvar",
"numpy.nanmax",
"numpy.nanmedian",
"numpy.nanstd",
"numpy.pad",
"numpy.nansum",
"numpy.nanargmin",
"numpy.random.seed",
"numpy.nanquantile",
"numpy.nancumprod",
"numpy.nanargmax",
"numpy.random.uniform",
"numpy.nancumsum",
"numpy.unique"
],
[
"numpy.log",
"numpy.round",
"pandas.DataFrame",
"numpy.ones",
"pandas.Series"
],
[
"numpy.array",
"numpy.testing.assert_raises"
]
] |
hayatonakamura/feverDetector
|
[
"4d39a2551b45aa45954f633b4dd35e4f7444e7f8"
] |
[
"board/new_hit_with_camera.py"
] |
[
"# Hayato Nakamura\n# hn2357\n# Copyright 2020 Hayato Nakamura\nfrom __future__ import print_function #compatible with python 2.7\nimport sys, time\nimport numpy as np\nfrom picamera import PiCamera \nimport aws\nfrom collections import OrderedDict\nfrom decimal import *\nfrom datetime import datetime\n#import threading\ndynamodb = aws.getResource('dynamodb', 'us-east-1')\ns3 = aws.getClient('s3', 'us-east-1')\n\ndef search(mat):\n\t# Scan the matrix\n\tfor y in range(24-3):\n\t\tfor x in range(32-3):\n\t\t\twindow = mat[y:y+3, x:x+3]\n\t\t\tprint(window)\n\t\t\tprint(np.mean(window))\n\t\t\tif (np.mean(window) > 36 and np.mean(window) < 45):\n\t\t\t\tprint(\"\\n\\nHIT\\n\\n\")\n\t\t\t\treturn True\n\treturn False\n\n\ndef process_pic(name):\n\tcamera = PiCamera()\n\tcamera.capture('/home/pi/Desktop/mlx90640-library/' + name + '.jpg')\n\tcamera.close()\n\ttry:\n\t\tfile = name + '.jpg'\n\t\ts3.upload_file(file, 'hayatopia', file)\n\t\tprint(\"File uploaded on S3\")\n\texcept:\n\t\tprint(\"S3 failed...\")\n\n\ndef dynamo_add(name, arr, timestamp):\n\ttry:\n\t\ttable = dynamodb.Table(name)\n\texcept:\n\t\tprint(\"Table with name \", name, \"doesn't exist...\")\n\t\treturn\n\titems = OrderedDict()\n\titems['timestamp'] = timestamp\n\tfor x in range(len(arr)):\n\t\tval = '{:3.2f}'.format(arr[x])\n\t\tval = Decimal(val)\n\t\titems[str(x)] = val\n\ttry:\n\t\ttable.put_item(Item=items)\n\t\tprint(\"Data successfully uploaded...\")\n\texcept:\n\t\tprint(\"Data upload unsuccessful...\")\n\n# def t_add(name, ir):\n# \ttry:\n# \t\tprint('Starting Thread: ', threading.currentThread().getName())\n# \t\ttake_picture(name)\n# \t\tdynamo_add('temperature', ir)\n# \t\tprint ('Exiting Thread: ', threading.currentThread().getName())\n# \texcept:\n# \t\tprint(\"Error with threading...\")\n\n\n\ndef main():\n\tfifo = open('/var/run/mlx9062x.sock', 'r')\n\tfor z in range(20):\n\t\tfile = open('temperature.txt', 'w')\n\t\tmat = np.zeros((24, 32))\n\n\t\t# 20 frames\n\t\tir = np.frombuffer(fifo.read()[0:3072], dtype=np.float32)\n\t\tif (len(ir) == 0):\n\t\t\tbreak\n\n\t\ttemp = \"\"\n\t\tfor y in range(24):\n\t\t\tfor x in range(32):\n\t\t\t\tval = '{:3.2f}'.format(ir[32 * (23-y) + x])\n\t\t\t\ttemp += val + \" \"\n\n\t\t\t\tmat[y, x] = float(val)\n\t\t\tfile.write(temp)\n\t\t\tfile.write('\\n')\n\t\t\ttemp = \"\"\n\t\tfile.write('\\n')\n\t\tfile.write('\\n')\n\n\t\tfile.close()\n\t\tif (search(mat)):\n\t\t\tprint(\"here\")\n\t\t\tnow = str(datetime.now().strftime(\"%Y_%m_%d_%H_%M_%S\"))\n\t\t\tname = 'temp'\n\t\t\tprocess_pic(now)\n\t\t\tdynamo_add('temperature', ir, now)\n\t\t\t# t1 = threading.Thread(name='Upload to dynamo', target=t_add, args=(name, ir,))\n\t\t\t# t1.setDaemon(True)\n\t\t\t# t1.start()\n\n\t\ttime.sleep(0.1) #10fps\n\n\t\n\nif __name__ == \"__main__\":\n\t#print(\"Active threads: \", threading.active_count())\n\tmain()\n\n"
] |
[
[
"numpy.mean",
"numpy.zeros"
]
] |
ichigo663/SimulatedAnnealing
|
[
"f319ee6bbdd8e0be16d39de0893a7a26d234a817"
] |
[
"simulatedAnnealing.py"
] |
[
"from pyglet.gl import *\nimport numpy as np\n\nclass SimulatedAnnealing:\n\n def __init__(self, function, tStart=15.0, tEnd=2.0,localMovement=False, dxMovement=1, scale=2.0):\n #xpos, ypos of current position\n self.scale = scale\n self.step = function.step\n self.xmin = function.minx\n self.xmax = function.maxx\n self.pos = [0, 0]\n self.pos[0] = np.random.randint(function.minx, function.maxx)\n self.pos[1] = function.compute(self.pos[0])\n self.evaluate = function.compute\n self.tmax = tStart\n self.t = tStart\n self.t_end = tEnd\n self.timer = 0.0\n self.p = 1.0\n self.local_movement = localMovement\n self.dx_interval = dxMovement\n self.run_time = tStart\n self.vline_len = 5.0\n self.y_max = np.max(function.y)\n self.y_min = np.min(function.y)\n\n def add_callback(self, f):\n self.callback = f\n\n def render(self):\n # render current solution\n glColor3f(0, 0, 0)\n glPointSize(10)\n glBegin(GL_POINTS)\n glVertex2f(self.pos[0], self.pos[1])\n glEnd()\n glBegin(GL_LINES)\n glVertex2f(self.pos[0], self.pos[1] - self.vline_len)\n glVertex2f(self.pos[0], self.pos[1] + self.vline_len)\n glEnd()\n\n def decreaseT(self, dec):\n self.t -= dec\n\n # return next random move\n # pick is made from [xmin, xmax]\n def next_value_all_random(self):\n x = (self.xmax - self.xmin) * np.random.ranf() + self.xmin\n return x, self.evaluate(x)\n\n # return next random move\n # pick is made from [x-dx, x+dx]\n def next_value_int(self, dx):\n x = dx*2 * np.random.ranf() + self.pos[0]-dx\n return x, self.evaluate(x)\n\n # returns a value in [0,1]\n def schedule(self):\n return self.t/self.tmax\n\n def run(self, dt):\n self.timer += dt\n # time in seconds\n if self.timer >= 0.1:\n self.decreaseT(self.timer)\n self.run_time -= self.timer\n self.timer = 0.0\n # update T probability\n self.p = self.schedule()\n # check termination\n if self.run_time < 0:\n self.callback()\n print(self)\n else:\n # pick next move\n if(self.local_movement):\n x, y = self.next_value_int(self.dx_interval)\n else:\n x, y = self.next_value_all_random()\n # delta of energy\n # and normalization in [0,1]\n # then we scale (optional), scaling\n # helps avoiding large P(x) for minor bad moves\n d_energy = np.abs((y - self.pos[1]) /(self.y_max - self.y_min))*self.scale\n # find the minimum\n if y < self.pos[1]:\n self.pos = [x, y]\n # accept with probability e^(-(delta_energy)/temperature))\n elif self.t > self.t_end and np.exp(-(d_energy) / self.p) >= np.random.ranf():\n self.pos = [x, y]\n\n def __repr__(self):\n return \"pos: [{x}, {y}]\\nstep: {step}\".format(x=self.pos[0], y=self.pos[1], step=self.step)\n\n"
] |
[
[
"numpy.max",
"numpy.min",
"numpy.exp",
"numpy.random.randint",
"numpy.abs",
"numpy.random.ranf"
]
] |
chipmuenk/A2SRC
|
[
"156c063c825669130bdaf1f41a1e972bbc1747e3"
] |
[
"A2SRC/plot_vispy_test8_mesh.py"
] |
[
"import numpy as np\nfrom vispy import app, gloo, visuals\nfrom vispy.geometry import create_sphere\nfrom vispy.visuals.transforms import (STTransform, AffineTransform,\n ChainTransform)\n\n\nclass Canvas(app.Canvas):\n def __init__(self):\n app.Canvas.__init__(self, keys='interactive', size=(800, 550))\n\n self.meshes = []\n self.rotation = AffineTransform()\n\n # Generate some data to work with\n global mdata\n mdata = create_sphere(20, 40, 1.0)\n\n # Mesh with pre-indexed vertices, uniform color\n self.meshes.append(visuals.MeshVisual(meshdata=mdata, color='r'))\n\n ## Mesh with pre-indexed vertices, per-face color\n ## Because vertices are pre-indexed, we get a different color\n ## every time a vertex is visited, resulting in sharp color\n ## differences between edges.\n verts = mdata.get_vertices(indexed='faces')\n nf = verts.size//9\n fcolor = np.ones((nf, 3, 4), dtype=np.float32)\n fcolor[..., 0] = np.linspace(1, 0, nf)[:, np.newaxis]\n fcolor[..., 1] = np.random.normal(size=nf)[:, np.newaxis]\n fcolor[..., 2] = np.linspace(0, 1, nf)[:, np.newaxis]\n mesh = visuals.MeshVisual(vertices=verts, face_colors=fcolor)\n self.meshes.append(mesh)\n\n ## Mesh with unindexed vertices, per-vertex color\n ## Because vertices are unindexed, we get the same color\n ## every time a vertex is visited, resulting in no color differences\n ## between edges.\n verts = mdata.get_vertices()\n faces = mdata.get_faces()\n nv = verts.size//3\n vcolor = np.ones((nv, 4), dtype=np.float32)\n vcolor[:, 0] = np.linspace(1, 0, nv)\n vcolor[:, 1] = np.random.normal(size=nv)\n vcolor[:, 2] = np.linspace(0, 1, nv)\n self.meshes.append(visuals.MeshVisual(verts, faces, vcolor))\n self.meshes.append(visuals.MeshVisual(verts, faces, vcolor,\n shading='flat'))\n self.meshes.append(visuals.MeshVisual(verts, faces, vcolor,\n shading='smooth'))\n\n # Lay out meshes in a grid\n grid = (3, 3)\n s = 300. / max(grid)\n for i, mesh in enumerate(self.meshes):\n x = 800. * (i % grid[0]) / grid[0] + 400. / grid[0] - 2\n y = 800. * (i // grid[1]) / grid[1] + 400. / grid[1] + 2\n transform = ChainTransform([STTransform(translate=(x, y),\n scale=(s, s, 1)),\n self.rotation])\n tr_sys = visuals.transforms.TransformSystem(self)\n tr_sys.visual_to_document = transform\n mesh.tr_sys = tr_sys\n\n self.show()\n\n self.timer = app.Timer(connect=self.rotate)\n self.timer.start(0.016)\n\n def rotate(self, event):\n self.rotation.rotate(1, (0, 1, 0))\n self.update()\n\n def on_draw(self, ev):\n gloo.set_viewport(0, 0, *self.physical_size)\n gloo.clear(color='black', depth=True)\n for mesh in self.meshes:\n mesh.draw(mesh.tr_sys)\n\n\nif __name__ == '__main__':\n win = Canvas()\n import sys\n if sys.flags.interactive != 1:\n app.run()"
] |
[
[
"numpy.random.normal",
"numpy.linspace",
"numpy.ones"
]
] |
ttthomaschan/DeepcvLib
|
[
"18f7728559136a3c5c8ad54666788ea771e95b16",
"18f7728559136a3c5c8ad54666788ea771e95b16"
] |
[
"Detection/detect.py",
"Detection/dataset/VOC_dataset.py"
] |
[
"import cv2\r\nfrom model.fcos import FCOSDetector\r\nimport torch\r\nfrom torchvision import transforms\r\nimport numpy as np\r\nfrom dataset.VOC_dataset import VOCDataset\r\nimport time\r\nimport matplotlib.patches as patches\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.ticker import NullLocator\r\n\r\n\r\ndef preprocess_img(image, input_ksize):\r\n \"\"\"\r\n resize image and bboxes\r\n Returns\r\n image_paded: input_ksize\r\n bboxes: [None,4]\r\n \"\"\"\r\n min_side, max_side = input_ksize\r\n h, w, _ = image.shape\r\n\r\n smallest_side = min(w, h)\r\n largest_side = max(w, h)\r\n scale = min_side / smallest_side\r\n if largest_side * scale > max_side:\r\n scale = max_side / largest_side\r\n nw, nh = int(scale * w), int(scale * h)\r\n image_resized = cv2.resize(image, (nw, nh))\r\n\r\n pad_w = 32 - nw % 32\r\n pad_h = 32 - nh % 32\r\n\r\n image_paded = np.zeros(shape=[nh + pad_h, nw + pad_w, 3], dtype=np.uint8)\r\n image_paded[:nh, :nw, :] = image_resized\r\n return image_paded\r\n\r\n\r\ndef convertSyncBNtoBN(module):\r\n module_output = module\r\n if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):\r\n module_output = torch.nn.BatchNorm2d(module.num_features,\r\n module.eps, module.momentum,\r\n module.affine,\r\n module.track_running_stats)\r\n if module.affine:\r\n module_output.weight.data = module.weight.data.clone().detach()\r\n module_output.bias.data = module.bias.data.clone().detach()\r\n module_output.running_mean = module.running_mean\r\n module_output.running_var = module.running_var\r\n for name, child in module.named_children():\r\n module_output.add_module(name, convertSyncBNtoBN(child))\r\n del module\r\n return module_output\r\n\r\n\r\nif __name__ == \"__main__\":\r\n cmap = plt.get_cmap('tab20b')\r\n colors = [cmap(i) for i in np.linspace(0, 1, 20)]\r\n\r\n\r\n class Config():\r\n # backbone\r\n pretrained = False\r\n freeze_stage_1 = True\r\n freeze_bn = True\r\n\r\n # fpn\r\n fpn_out_channels = 256\r\n use_p5 = True\r\n\r\n # head\r\n class_num = 80\r\n use_GN_head = True\r\n prior = 0.01\r\n add_centerness = True\r\n cnt_on_reg = False\r\n\r\n # training\r\n strides = [8, 16, 32, 64, 128]\r\n limit_range = [[-1, 64], [64, 128], [128, 256], [256, 512], [512, 999999]]\r\n\r\n # inference\r\n score_threshold = 0.3\r\n nms_iou_threshold = 0.4\r\n max_detection_boxes_num = 300\r\n\r\n\r\n model = FCOSDetector(mode=\"inference\", config=Config)\r\n # model=torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)\r\n # print(\"INFO===>success convert BN to SyncBN\")\r\n model = torch.nn.DataParallel(model)\r\n model.load_state_dict(torch.load(\"./checkpoint/voc_77.8.pth\", map_location=torch.device('cpu')))\r\n # model=convertSyncBNtoBN(model)\r\n # print(\"INFO===>success convert SyncBN to BN\")\r\n model = model.eval()\r\n model.to(\"cuda\")\r\n print(\"===>success loading model\")\r\n\r\n import os\r\n\r\n root = \"./test_images/\"\r\n names = os.listdir(root)\r\n for name in names:\r\n img_bgr = cv2.imread(root + name)\r\n img_pad = preprocess_img(img_bgr, [800, 1333])\r\n img = cv2.cvtColor(img_pad.copy(), cv2.COLOR_BGR2RGB)\r\n img1 = transforms.ToTensor()(img)\r\n img1 = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], inplace=True)(img1)\r\n img1 = img1.to(\"cuda\")\r\n\r\n start_t = time.time()\r\n with torch.no_grad():\r\n out = model(img1.unsqueeze_(dim=0))\r\n end_t = time.time()\r\n cost_t = 1000 * (end_t - start_t)\r\n print(\"===>success processing img, cost time %.2f ms\" % cost_t)\r\n # print(out)\r\n scores, classes, boxes = out\r\n\r\n boxes = boxes[0].cpu().numpy().tolist()\r\n classes = classes[0].cpu().numpy().tolist()\r\n scores = scores[0].cpu().numpy().tolist()\r\n plt.figure()\r\n fig, ax = plt.subplots(1)\r\n ax.imshow(img)\r\n for i, box in enumerate(boxes):\r\n pt1 = (int(box[0]), int(box[1]))\r\n pt2 = (int(box[2]), int(box[3]))\r\n img_pad = cv2.rectangle(img_pad, pt1, pt2, (0, 255, 0))\r\n b_color = colors[int(classes[i]) - 1]\r\n bbox = patches.Rectangle((box[0], box[1]), width=box[2] - box[0], height=box[3] - box[1], linewidth=1,\r\n facecolor='none', edgecolor=b_color)\r\n ax.add_patch(bbox)\r\n plt.text(box[0], box[1], s=\"%s %.3f\" % (VOCDataset.CLASSES_NAME[int(classes[i])], scores[i]), color='white',\r\n verticalalignment='top',\r\n bbox={'color': b_color, 'pad': 0})\r\n plt.axis('off')\r\n plt.gca().xaxis.set_major_locator(NullLocator())\r\n plt.gca().yaxis.set_major_locator(NullLocator())\r\n plt.savefig('out_images/{}'.format(name), bbox_inches='tight', pad_inches=0.0)\r\n plt.close()\r\n",
"import torch\r\nimport xml.etree.ElementTree as ET\r\nimport os\r\nimport cv2\r\nimport numpy as np\r\nfrom torchvision import transforms\r\nfrom PIL import Image\r\nimport random\r\n\r\n\r\ndef flip(img, boxes):\r\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\r\n w = img.width\r\n if boxes.shape[0] != 0:\r\n xmin = w - boxes[:, 2]\r\n xmax = w - boxes[:, 0]\r\n boxes[:, 2] = xmax\r\n boxes[:, 0] = xmin\r\n return img, boxes\r\n\r\n\r\nclass VOCDataset(torch.utils.data.Dataset):\r\n CLASSES_NAME = (\r\n \"__background__ \",\r\n \"aeroplane\",\r\n \"bicycle\",\r\n \"bird\",\r\n \"boat\",\r\n \"bottle\",\r\n \"bus\",\r\n \"car\",\r\n \"cat\",\r\n \"chair\",\r\n \"cow\",\r\n \"diningtable\",\r\n \"dog\",\r\n \"horse\",\r\n \"motorbike\",\r\n \"person\",\r\n \"pottedplant\",\r\n \"sheep\",\r\n \"sofa\",\r\n \"train\",\r\n \"tvmonitor\",\r\n )\r\n\r\n def __init__(self, root_dir, resize_size=[800, 1333], split='trainval', use_difficult=False, is_train=True,\r\n augment=None):\r\n self.root = root_dir\r\n self.use_difficult = use_difficult\r\n self.imgset = split\r\n\r\n self._annopath = os.path.join(self.root, \"Annotations\", \"%s.xml\")\r\n self._imgpath = os.path.join(self.root, \"JPEGImages\", \"%s.jpg\")\r\n self._imgsetpath = os.path.join(self.root, \"ImageSets\", \"Main\", \"%s.txt\")\r\n\r\n with open(self._imgsetpath % self.imgset) as f:\r\n self.img_ids = f.readlines()\r\n self.img_ids = [x.strip() for x in self.img_ids]\r\n self.name2id = dict(zip(VOCDataset.CLASSES_NAME, range(len(VOCDataset.CLASSES_NAME))))\r\n self.id2name = {v: k for k, v in self.name2id.items()}\r\n self.resize_size = resize_size\r\n self.mean = [0.485, 0.456, 0.406]\r\n self.std = [0.229, 0.224, 0.225]\r\n self.train = is_train\r\n self.augment = augment\r\n print(\"INFO=====>voc dataset init finished ! !\")\r\n\r\n def __len__(self):\r\n return len(self.img_ids)\r\n\r\n def __getitem__(self, index):\r\n\r\n img_id = self.img_ids[index]\r\n img = Image.open(self._imgpath % img_id)\r\n\r\n anno = ET.parse(self._annopath % img_id).getroot()\r\n\r\n boxes = []\r\n classes = []\r\n for obj in anno.iter(\"object\"):\r\n difficult = int(obj.find(\"difficult\").text) == 1\r\n if not self.use_difficult and difficult:\r\n continue\r\n _box = obj.find(\"bndbox\")\r\n # Make pixel indexes 0-based\r\n # Refer to \"https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/datasets/pascal_voc.py#L208-L211\"\r\n box = [\r\n _box.find(\"xmin\").text,\r\n _box.find(\"ymin\").text,\r\n _box.find(\"xmax\").text,\r\n _box.find(\"ymax\").text,\r\n ]\r\n TO_REMOVE = 1\r\n box = tuple(\r\n map(lambda x: x - TO_REMOVE, list(map(float, box)))\r\n )\r\n boxes.append(box)\r\n\r\n name = obj.find(\"name\").text.lower().strip()\r\n classes.append(self.name2id[name])\r\n\r\n boxes = np.array(boxes, dtype=np.float32)\r\n if self.train:\r\n if random.random() < 0.5:\r\n img, boxes = flip(img, boxes)\r\n if self.augment is not None:\r\n img, boxes = self.augment(img, boxes)\r\n\r\n img = np.array(img)\r\n img, boxes = self.preprocess_img_boxes(img, boxes, self.resize_size)\r\n\r\n img = transforms.ToTensor()(img)\r\n boxes = torch.from_numpy(boxes)\r\n classes = torch.LongTensor(classes)\r\n\r\n return img, boxes, classes\r\n\r\n def preprocess_img_boxes(self, image, boxes, input_ksize):\r\n \"\"\"\r\n resize image and bboxes\r\n Returns\r\n image_paded: input_ksize\r\n bboxes: [None,4]\r\n \"\"\"\r\n min_side, max_side = input_ksize\r\n h, w, _ = image.shape\r\n\r\n smallest_side = min(w, h)\r\n largest_side = max(w, h)\r\n scale = min_side / smallest_side\r\n if largest_side * scale > max_side:\r\n scale = max_side / largest_side\r\n nw, nh = int(scale * w), int(scale * h)\r\n image_resized = cv2.resize(image, (nw, nh))\r\n\r\n pad_w = 32 - nw % 32\r\n pad_h = 32 - nh % 32\r\n\r\n image_paded = np.zeros(shape=[nh + pad_h, nw + pad_w, 3], dtype=np.uint8)\r\n image_paded[:nh, :nw, :] = image_resized\r\n\r\n if boxes is None:\r\n return image_paded\r\n else:\r\n boxes[:, [0, 2]] = boxes[:, [0, 2]] * scale\r\n boxes[:, [1, 3]] = boxes[:, [1, 3]] * scale\r\n return image_paded, boxes\r\n\r\n def collate_fn(self, data):\r\n imgs_list, boxes_list, classes_list = zip(*data)\r\n assert len(imgs_list) == len(boxes_list) == len(classes_list)\r\n batch_size = len(boxes_list)\r\n pad_imgs_list = []\r\n pad_boxes_list = []\r\n pad_classes_list = []\r\n\r\n h_list = [int(s.shape[1]) for s in imgs_list]\r\n w_list = [int(s.shape[2]) for s in imgs_list]\r\n max_h = np.array(h_list).max()\r\n max_w = np.array(w_list).max()\r\n for i in range(batch_size):\r\n img = imgs_list[i]\r\n pad_imgs_list.append(transforms.Normalize(self.mean, self.std, inplace=True)(\r\n torch.nn.functional.pad(img, (0, int(max_w - img.shape[2]), 0, int(max_h - img.shape[1])), value=0.)))\r\n\r\n max_num = 0\r\n for i in range(batch_size):\r\n n = boxes_list[i].shape[0]\r\n if n > max_num: max_num = n\r\n for i in range(batch_size):\r\n pad_boxes_list.append(\r\n torch.nn.functional.pad(boxes_list[i], (0, 0, 0, max_num - boxes_list[i].shape[0]), value=-1))\r\n pad_classes_list.append(\r\n torch.nn.functional.pad(classes_list[i], (0, max_num - classes_list[i].shape[0]), value=-1))\r\n\r\n batch_boxes = torch.stack(pad_boxes_list)\r\n batch_classes = torch.stack(pad_classes_list)\r\n batch_imgs = torch.stack(pad_imgs_list)\r\n\r\n return batch_imgs, batch_boxes, batch_classes\r\n\r\n\r\nif __name__ == \"__main__\":\r\n pass\r\n eval_dataset = VOCDataset(root_dir='../VOCdevkit/VOC2012', resize_size=[800, 1333],\r\n split='trainval_demoData', use_difficult=False, is_train=False, augment=None)\r\n print(len(eval_dataset.CLASSES_NAME))\r\n # dataset=VOCDataset(\"/home/data/voc2007_2012/VOCdevkit/VOC2012\",split='trainval')\r\n # for i in range(100):\r\n # img,boxes,classes=dataset[i]\r\n # img,boxes,classes=img.numpy().astype(np.uint8),boxes.numpy(),classes.numpy()\r\n # img=np.transpose(img,(1,2,0))\r\n # print(img.shape)\r\n # print(boxes)\r\n # print(classes)\r\n # for box in boxes:\r\n # pt1=(int(box[0]),int(box[1]))\r\n # pt2=(int(box[2]),int(box[3]))\r\n # img=cv2.rectangle(img,pt1,pt2,[0,255,0],3)\r\n # cv2.imshow(\"test\",img)\r\n # if cv2.waitKey(0)==27:\r\n # break\r\n # imgs,boxes,classes=eval_dataset.collate_fn([dataset[105],dataset[101],dataset[200]])\r\n # print(boxes,classes,\"\\n\",imgs.shape,boxes.shape,classes.shape,boxes.dtype,classes.dtype,imgs.dtype)\r\n # for index,i in enumerate(imgs):\r\n # i=i.numpy().astype(np.uint8)\r\n # i=np.transpose(i,(1,2,0))\r\n # i=cv2.cvtColor(i,cv2.COLOR_RGB2BGR)\r\n # print(i.shape,type(i))\r\n # cv2.imwrite(str(index)+\".jpg\",i)\r\n"
] |
[
[
"torch.device",
"numpy.zeros",
"matplotlib.patches.Rectangle",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.get_cmap",
"torch.nn.BatchNorm2d",
"torch.no_grad",
"matplotlib.pyplot.close",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.axis",
"matplotlib.ticker.NullLocator",
"numpy.linspace",
"torch.nn.DataParallel"
],
[
"numpy.array",
"torch.stack",
"numpy.zeros",
"torch.from_numpy",
"torch.LongTensor",
"torch.nn.functional.pad"
]
] |
Ynakatsuka/nishika-22
|
[
"72994cab16486b3a26686642ad72a29b6761b46d",
"72994cab16486b3a26686642ad72a29b6761b46d"
] |
[
"src/benchmark/app.py",
"src/kvt/losses/lovasz.py"
] |
[
"import warnings\n\nimport numpy as np\nimport pandas as pd\nimport streamlit as st\nfrom benchmark import (\n EXPERIMENT_NAMES,\n get_result,\n load_augmentation,\n load_config,\n load_lightning_module,\n normalize,\n)\nfrom kvt.initialization import initialize as kvt_initialize\nfrom kvt.utils import QueryExpansion\nfrom PIL import Image\n\n\[email protected](allow_output_mutation=True)\ndef load():\n # config\n config = load_config()\n\n # variables\n sample_submission_path = config.competition.sample_submission_path\n save_dir = config.save_dir\n\n # load reference\n sub = pd.read_csv(sample_submission_path)\n cite = pd.read_csv(config.competition.cite_path)\n embedding_paths = [\n f\"{save_dir}/predictions/test/{name}/test_fold_0.npy\"\n for name in EXPERIMENT_NAMES\n ]\n embeddings = np.concatenate(\n [normalize(np.load(path)) for path in embedding_paths], axis=1\n ).astype(\"float32\")\n embeddings = normalize(embeddings)\n n_query = len(sub)\n reference_embeddings = embeddings[n_query:]\n reference_ids = cite[\"gid\"].values\n\n # load models\n models, transforms, preprocessors = [], [], []\n for name in EXPERIMENT_NAMES:\n overrides = name.split(\",\")\n config = load_config(name, overrides=overrides)\n models.append(load_lightning_module(config))\n transforms.append(load_augmentation(config))\n preprocessors.append(lambda x: x)\n\n qe = QueryExpansion(\n alpha=1,\n k=50,\n similarity_threshold=0.7,\n normalize_similarity=True,\n strategy_to_deal_original=\"add\",\n n_query_update_iter=1,\n n_reference_update_iter=0,\n batch_size=10,\n )\n _, reference_embeddings = qe(reference_embeddings[:1], reference_embeddings)\n index = qe.create_index(reference_embeddings)\n\n return (\n config,\n preprocessors,\n transforms,\n models,\n qe,\n index,\n reference_embeddings,\n reference_ids,\n )\n\n\ndef main(\n config,\n preprocessors,\n transforms,\n models,\n qe,\n index,\n reference_embeddings,\n reference_ids,\n):\n # draw the page\n st.title(\"Similar Trade Mark Image Search\")\n\n k = 20\n n_cols, n_rows = 5, 4\n assert n_cols * n_rows == k\n\n # search\n uploaded_file = st.sidebar.file_uploader(\"Upload Image File\", type=\"jpg\")\n if uploaded_file is not None:\n image = Image.open(uploaded_file)\n st.sidebar.image(image, caption=\"Query Image\", use_column_width=True)\n\n D, I, _embeddings = get_result(\n np.array(image),\n preprocessors,\n transforms,\n models,\n qe,\n index,\n reference_embeddings,\n reference_ids,\n k=k,\n )\n assert len(D) == 1\n\n # draw image\n st.header(\"Found Images:\")\n col = st.columns(n_cols)\n for i, (sim, ref_id) in enumerate(zip(D[0], I[0])):\n if (i > 0) and (i % n_cols == 0):\n col = st.columns(n_cols)\n\n with col[i % n_cols]:\n path = f\"{config.input_dir}/cite_images/{ref_id}/{ref_id}.jpg\"\n image = Image.open(path)\n st.image(\n image,\n caption=f\"#{i+1}: Similarity: {sim:.3f}\",\n use_column_width=True,\n )\n\n\nif __name__ == \"__main__\":\n warnings.filterwarnings(\"ignore\")\n kvt_initialize()\n (\n config,\n preprocessors,\n transforms,\n models,\n qe,\n index,\n reference_embeddings,\n reference_ids,\n ) = load()\n main(\n config,\n preprocessors,\n transforms,\n models,\n qe,\n index,\n reference_embeddings,\n reference_ids,\n )\n",
"\"\"\"\nLovasz-Softmax and Jaccard hinge loss in PyTorch\nMaxim Berman 2018 ESAT-PSI KU Leuven (MIT License)\n\"\"\"\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\ntry:\n from itertools import ifilterfalse\nexcept ImportError: # py3k\n from itertools import filterfalse as ifilterfalse\n\n\ndef lovasz_grad(gt_sorted):\n \"\"\"\n Computes gradient of the Lovasz extension w.r.t sorted errors\n See Alg. 1 in paper\n \"\"\"\n p = len(gt_sorted)\n gts = gt_sorted.sum()\n intersection = gts - gt_sorted.float().cumsum(0)\n union = gts + (1 - gt_sorted).float().cumsum(0)\n jaccard = 1.0 - intersection / union\n if p > 1: # cover 1-pixel case\n jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]\n return jaccard\n\n\ndef iou_binary(preds, labels, EMPTY=1.0, ignore=None, per_image=True):\n \"\"\"\n IoU for foreground class\n binary: 1 foreground, 0 background\n \"\"\"\n if not per_image:\n preds, labels = (preds,), (labels,)\n ious = []\n for pred, label in zip(preds, labels):\n intersection = ((label == 1) & (pred == 1)).sum()\n union = ((label == 1) | ((pred == 1) & (label != ignore))).sum()\n if not union:\n iou = EMPTY\n else:\n iou = float(intersection) / float(union)\n ious.append(iou)\n iou = mean(ious) # mean accross images if per_image\n return 100 * iou\n\n\ndef iou(preds, labels, C, EMPTY=1.0, ignore=None, per_image=False):\n \"\"\"\n Array of IoU for each (non ignored) class\n \"\"\"\n if not per_image:\n preds, labels = (preds,), (labels,)\n ious = []\n for pred, label in zip(preds, labels):\n iou = []\n for i in range(C):\n if i != ignore:\n # The ignored label is sometimes among predicted classes\n # (ENet - CityScapes)\n intersection = ((label == i) & (pred == i)).sum()\n union = ((label == i) | ((pred == i) & (label != ignore))).sum()\n if not union:\n iou.append(EMPTY)\n else:\n iou.append(float(intersection) / float(union))\n ious.append(iou)\n ious = [mean(iou) for iou in zip(*ious)] # mean accross images if per_image\n return 100 * np.array(ious)\n\n\n# --------------------------- BINARY LOSSES ---------------------------\n\n\ndef lovasz_hinge(logits, labels, per_image=True, ignore=None):\n \"\"\"\n Binary Lovasz hinge loss\n logits: [B, H, W] Variable, logits at each pixel (between -infty and +infty)\n labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)\n per_image: compute the loss per image instead of per batch\n ignore: void class id\n \"\"\"\n if per_image:\n loss = mean(\n lovasz_hinge_flat(\n *flatten_binary_scores(\n log.unsqueeze(0), lab.unsqueeze(0), ignore\n )\n )\n for log, lab in zip(logits, labels)\n )\n else:\n loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore))\n return loss\n\n\ndef lovasz_hinge_flat(logits, labels):\n \"\"\"\n Binary Lovasz hinge loss\n logits: [P] Variable, logits at each prediction (between -infty and +infty)\n labels: [P] Tensor, binary ground truth labels (0 or 1)\n ignore: label to ignore\n \"\"\"\n if len(labels) == 0:\n # only void pixels, the gradients should be 0\n return logits.sum() * 0.0\n signs = 2.0 * labels.float() - 1.0\n errors = 1.0 - logits * Variable(signs)\n errors_sorted, perm = torch.sort(errors, dim=0, descending=True)\n perm = perm.data\n gt_sorted = labels[perm]\n grad = lovasz_grad(gt_sorted)\n loss = torch.dot(F.relu(errors_sorted), Variable(grad))\n return loss\n\n\ndef flatten_binary_scores(scores, labels, ignore=None):\n \"\"\"\n Flattens predictions in the batch (binary case)\n Remove labels equal to 'ignore'\n \"\"\"\n scores = scores.view(-1)\n labels = labels.view(-1)\n if ignore is None:\n return scores, labels\n valid = labels != ignore\n vscores = scores[valid]\n vlabels = labels[valid]\n return vscores, vlabels\n\n\nclass StableBCELoss(torch.nn.modules.Module):\n def __init__(self):\n super(StableBCELoss, self).__init__()\n\n def forward(self, input, target):\n neg_abs = -input.abs()\n loss = input.clamp(min=0) - input * target + (1 + neg_abs.exp()).log()\n return loss.mean()\n\n\ndef binary_xloss(logits, labels, ignore=None):\n \"\"\"\n Binary Cross entropy loss\n logits: [B, H, W] Variable, logits at each pixel (between -infty and +infty)\n labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)\n ignore: void class id\n \"\"\"\n logits, labels = flatten_binary_scores(logits, labels, ignore)\n loss = StableBCELoss()(logits, Variable(labels.float()))\n return loss\n\n\n# --------------------------- MULTICLASS LOSSES ---------------------------\n\n\ndef lovasz_softmax(\n probas, labels, classes=\"present\", per_image=False, ignore=None\n):\n \"\"\"\n Multi-class Lovasz-Softmax loss\n probas: [B, C, H, W] Variable, class probabilities at each prediction\n (between 0 and 1).\n Interpreted as binary (sigmoid) output with outputs of size [B, H, W].\n labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1)\n classes: 'all' for all, 'present' for classes present in labels,\n or a list of classes to average.\n per_image: compute the loss per image instead of per batch\n ignore: void class labels\n \"\"\"\n if per_image:\n loss = mean(\n lovasz_softmax_flat(\n *flatten_probas(prob.unsqueeze(0), lab.unsqueeze(0), ignore),\n classes=classes,\n )\n for prob, lab in zip(probas, labels)\n )\n else:\n loss = lovasz_softmax_flat(\n *flatten_probas(probas, labels, ignore), classes=classes\n )\n return loss\n\n\ndef lovasz_softmax_flat(probas, labels, classes=\"present\"):\n \"\"\"\n Multi-class Lovasz-Softmax loss\n probas: [P, C] Variable, class probabilities at each prediction\n (between 0 and 1)\n labels: [P] Tensor, ground truth labels (between 0 and C - 1)\n classes: 'all' for all, 'present' for classes present in labels,\n or a list of classes to average.\n \"\"\"\n if probas.numel() == 0:\n # only void pixels, the gradients should be 0\n return probas * 0.0\n C = probas.size(1)\n losses = []\n class_to_sum = list(range(C)) if classes in [\"all\", \"present\"] else classes\n for c in class_to_sum:\n fg = (labels == c).float() # foreground for class c\n if (classes == \"present\") and (fg.sum() == 0):\n continue\n if C == 1:\n if len(classes) > 1:\n raise ValueError(\"Sigmoid output possible only with 1 class\")\n class_pred = probas[:, 0]\n else:\n class_pred = probas[:, c]\n errors = (Variable(fg) - class_pred).abs()\n errors_sorted, perm = torch.sort(errors, 0, descending=True)\n perm = perm.data\n fg_sorted = fg[perm]\n losses.append(\n torch.dot(errors_sorted, Variable(lovasz_grad(fg_sorted)))\n )\n return mean(losses)\n\n\ndef flatten_probas(probas, labels, ignore=None):\n \"\"\"\n Flattens predictions in the batch\n \"\"\"\n if probas.dim() == 3:\n # assumes output of a sigmoid layer\n B, H, W = probas.size()\n probas = probas.view(B, 1, H, W)\n B, C, H, W = probas.size()\n # B * H * W, C = P, C\n probas = probas.permute(0, 2, 3, 1).contiguous().view(-1, C)\n labels = labels.view(-1)\n if ignore is None:\n return probas, labels\n valid = labels != ignore\n vprobas = probas[valid.nonzero().squeeze()]\n vlabels = labels[valid]\n return vprobas, vlabels\n\n\ndef xloss(logits, labels, ignore=None):\n \"\"\"\n Cross entropy loss\n \"\"\"\n return F.cross_entropy(logits, Variable(labels), ignore_index=255)\n\n\n# --------------------------- HELPER FUNCTIONS ---------------------------\ndef isnan(x):\n return x != x\n\n\ndef mean(list_, ignore_nan=False, empty=0):\n \"\"\"\n nanmean compatible with generators.\n \"\"\"\n list_ = iter(list_)\n if ignore_nan:\n list_ = ifilterfalse(isnan, list_)\n\n n = 1\n try:\n acc = next(list_)\n except StopIteration:\n if empty == \"raise\":\n raise ValueError(\"Empty mean\")\n return empty\n for _, v in enumerate(list_, 2):\n acc += v\n n += 1\n if n == 1:\n return acc\n return acc / n\n\n\nclass LovaszSoftmaxLoss(nn.Module):\n def __init__(self, classes=\"present\", per_image=False, ignore=None):\n super().__init__()\n self.per_image = per_image\n self.classes = classes\n self.ignore = ignore\n\n def forward(self, input, target):\n if self.per_image:\n loss = mean(\n lovasz_softmax_flat(\n *flatten_probas(\n prob.unsqueeze(0), lab.unsqueeze(0), self.ignore\n ),\n classes=self.classes,\n )\n for prob, lab in zip(input, target)\n )\n else:\n loss = lovasz_softmax_flat(\n *flatten_probas(input, target, self.ignore),\n classes=self.classes,\n )\n return loss\n\n\nclass LovaszHingeLoss(nn.Module):\n def __init__(self, per_image=False, ignore=None):\n super().__init__()\n print(\"[LovaszHingeLoss::__init__] per_image:\", per_image)\n self.per_image = per_image\n self.ignore = ignore\n\n def forward(self, input, target):\n return lovasz_hinge(input, target, self.per_image, self.ignore)\n"
] |
[
[
"numpy.array",
"pandas.read_csv",
"numpy.load"
],
[
"torch.autograd.Variable",
"torch.nn.functional.relu",
"numpy.array",
"torch.sort"
]
] |
mmolnar0/sgillen_research
|
[
"752e09fdf7a996c832e71b0a8296322fe77e9ae3"
] |
[
"torch_lstm_mod/lstm.py"
] |
[
"import math\nimport torch as th\nimport torch.nn as nn\n\n\n\nclass LSTMCell(nn.Module):\n\n def __init__(self, input_size, hidden_size, bias=True):\n super(LSTMCell, self).__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.bias = bias\n self.i2h = nn.Linear(input_size, 4 * hidden_size, bias=bias)\n self.h2h = nn.Linear(hidden_size, 4 * hidden_size, bias=bias)\n self.reset_parameters()\n\n def reset_parameters(self):\n std = 1.0 / math.sqrt(self.hidden_size)\n for w in self.parameters():\n w.data.uniform_(-std, std)\n\n def forward(self, x, hidden):\n\n if hidden is None:\n hidden = self._init_hidden(x)\n\n h, c = hidden\n h = h.view(h.size(1), -1)\n c = c.view(c.size(1), -1)\n x = x.view(x.size(1), -1)\n\n # Linear mappings\n preact = self.i2h(x) + self.h2h(h)\n\n # activations\n gates = preact[:, :3 * self.hidden_size].sigmoid()\n g_t = preact[:, 3 * self.hidden_size:].tanh()\n i_t = gates[:, :self.hidden_size]\n f_t = gates[:, self.hidden_size:2 * self.hidden_size]\n o_t = gates[:, -self.hidden_size:]\n\n c_t = th.mul(c, f_t) + th.mul(i_t, g_t)\n\n h_t = th.mul(o_t, c_t.tanh())\n\n h_t = h_t.view(1, h_t.size(0), -1)\n c_t = c_t.view(1, c_t.size(0), -1)\n return h_t, (h_t, c_t)\n\n @staticmethod\n def _init_hidden(input_):\n #h = th.zeros_like(input_.view(1, input_.size(1), -1))\n #c = th.zeros(1, input_.size(1), self.hidden_size))\n\n #return h, c\n return\n\n\nclass LSTM(nn.Module):\n\n def __init__(self, input_size, hidden_size, bias=True):\n super().__init__()\n self.lstm_cell = LSTMCell(input_size, hidden_size, bias)\n\n def forward(self, input_, hidden=None):\n # input_ is of dimensionalty (1, time, input_size, ...)\n\n outputs = []\n for x in th.unbind(input_, dim=1):\n hidden = self.lstm_cell(x, hidden)\n outputs.append(hidden[0].clone())\n\n return th.stack(outputs, dim=1)\n\n\n\nif __name__ == \"__main__\":\n\n lstm_cell = LSTMCell(input_size = 4, hidden_size = 12, bias=False)\n\n x = th.randn(4,1)\n h = th.randn(12,1)\n c = th.randn(12,1)\n\n yc, (hc,cc) = lstm_cell.forward(x,(h,c))\n\n print(\"yc shape: \" , yc.shape)\n print(\"hc shape: \" , hc.shape)\n print(\"cc shape: \" , cc.shape)\n\n\n lstm = LSTM(input_size = 4, hidden_size = 12, bias=False)\n\n x = th.randn(4,100,1)\n h = th.randn(12,1)\n\n y = lstm.forward(x, hidden = (h,h))\n\n print(\"y shape: \", y.shape)\n\n"
] |
[
[
"torch.nn.Linear",
"torch.mul",
"torch.stack",
"torch.unbind",
"torch.randn"
]
] |
jxhuang0508/CVRN
|
[
"ecbd1bebd43dadfd29536a8f31a65b920346fda6"
] |
[
"cvrn/dataset/datasets_crst.py"
] |
[
"import os\nimport os.path as osp\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plt\nimport collections\nimport torch\nimport torchvision.transforms as transforms\nimport torchvision\nimport cv2\nfrom torch.utils import data\nimport sys\nfrom PIL import Image\n\npalette = [128, 64, 128, 244, 35, 232, 70, 70, 70, 102, 102, 156, 190, 153, 153, 153, 153, 153, 250, 170, 30,\n 220, 220, 0, 107, 142, 35, 152, 251, 152, 70, 130, 180, 220, 20, 60, 255, 0, 0, 0, 0, 142, 0, 0, 70,\n 0, 60, 100, 0, 80, 100, 0, 0, 230, 119, 11, 32]\nzero_pad = 256 * 3 - len(palette)\nfor i in range(zero_pad):\n palette.append(0)\n\n\ndef colorize_mask(mask):\n # mask: numpy array of the mask\n new_mask = Image.fromarray(mask.astype(np.uint8)).convert('P')\n new_mask.putpalette(palette)\n\n return new_mask\n\nclass GTA5TestDataSet(data.Dataset):\n def __init__(self, root, list_path, max_iters=None, test_size=(1024, 512), test_scale=1.0, mean=(128, 128, 128),\n std=(1, 1, 1), scale=True, mirror=True, ignore_label=255):\n self.root = root\n self.list_path = list_path\n self.test_h, self.test_w = test_size\n self.scale = scale\n self.test_scale = test_scale\n self.ignore_label = ignore_label\n self.mean = mean\n self.std = std\n self.is_mirror = mirror\n # self.mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434])\n self.img_ids = []\n self.label_ids = []\n with open(list_path) as f:\n for item in f.readlines():\n fields = item.strip().split('\\t')\n self.img_ids.append(fields[0])\n self.label_ids.append(fields[1])\n if not max_iters == None:\n self.img_ids = self.img_ids * int(np.ceil(float(max_iters) / len(self.img_ids)))\n self.label_ids = self.label_ids * int(np.ceil(float(max_iters) / len(self.label_ids)))\n self.files = []\n\n for idx in range(len(self.img_ids)):\n img_name = self.img_ids[idx]\n label_name = self.label_ids[idx]\n img_file = osp.join(self.root, img_name)\n label_file = osp.join(self.root, label_name)\n self.files.append({\n \"img\": img_file,\n \"label\": label_file,\n \"img_name\": img_name,\n \"label_name\": label_name\n })\n\n def __len__(self):\n return len(self.files)\n\n def __getitem__(self, index):\n datafiles = self.files[index]\n image = cv2.imread(datafiles[\"img\"], cv2.IMREAD_COLOR) # OpenCV read image as BGR, not RGB\n label = cv2.imread(datafiles[\"label\"], cv2.IMREAD_GRAYSCALE)\n\n img_name = datafiles[\"img_name\"]\n image = cv2.resize(image, None, fx=self.test_scale, fy=self.test_scale, interpolation=cv2.INTER_CUBIC)\n\n image = np.asarray(image, np.float32)\n label = np.asarray(label, np.float32)\n\n image -= self.mean # BGR\n image = image / self.std # np.reshape(self.std,(1,1,3))\n size = image.shape\n image = image.transpose((2, 0, 1))\n\n return image.copy(), label.copy(), np.array(size), img_name\n"
] |
[
[
"numpy.array",
"numpy.asarray"
]
] |
yasirabd/deployment-notebook-prescriptive
|
[
"f3e07ee8472be9f2d8c78cfea2990131bdcbe881"
] |
[
"utils/transform.py"
] |
[
"import pandas as pd\nimport numpy as np\nimport time\nfrom datetime import timedelta, date, datetime\n\nclass TransformData(object):\n def __init__(self):\n pass\n\n # get data and preprocessing\n def format_timestamp(self, utc_datetime):\n now_timestamp = time.time()\n offset = datetime.fromtimestamp(now_timestamp) - datetime.utcfromtimestamp(now_timestamp)\n return utc_datetime + offset\n\n def reduce_columns(self, df, sensor):\n idx_cols_selected = [i for i in range(df.shape[1]) if i==0 or i%6==0]\n idx_col_timestamp = [1]\n idx = idx_col_timestamp + idx_cols_selected\n\n df = df[df.columns[idx]]\n df.columns = ['date'] + sensor\n\n # format col timestamp\n result = df.copy()\n result['date'] = pd.to_datetime(df['date']).dt.strftime('%Y-%m-%d %H:%M:%S')\n result['date'] = pd.to_datetime(result['date']).apply(self.format_timestamp)\n return result.iloc[0]\n\n def transform(self, data):\n date = data['date'].strftime(\"%Y-%m-%d %H:%M:%S\")\n sensors = data.index.tolist()[1:]\n actuals = []\n for d in data.tolist()[1:]:\n if type(d) == np.int or type(d) == np.int_ or type(d) == np.float64 or type(d) == np.float:\n actuals.append(np.around(d, 6))\n else:\n actuals.append(np.nan)\n return {'date': date, 'sensors': sensors, 'actuals':actuals}"
] |
[
[
"pandas.to_datetime",
"numpy.around"
]
] |
CoAxLab/binding_manuscript
|
[
"fc6c3dabc81b505edb5a79a1835c6f29c494f941"
] |
[
"imaging_code/fMRI_task.py"
] |
[
"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\nfrom __future__ import division # so that 1/3=0.333 instead of 1/3=0\nfrom psychopy import visual, core, data, event, logging, gui\nfrom psychopy.constants import * # things like STARTED, FINISHED\nimport pandas as pd\nimport numpy as np # whole numpy lib is available, prepend 'np.'\nfrom numpy import sin, cos, tan, log, log10, pi, average, sqrt, std, deg2rad, rad2deg, linspace, asarray\nfrom numpy.random import random, randint, normal, shuffle\nimport os # handy system and path functions\nimport statsmodels.formula.api as sm\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport scipy.io\n\n# Ensure that relative paths start from the same directory as this script\n_thisDir = os.path.dirname(os.path.abspath(__file__))\nos.chdir(_thisDir)\n\n# Store info about the experiment session\nexpName = u'r2d4_MM' # from the Builder filename that created this script\nexpInfo = {'participant':u'', 'session':u''}\ndlg = gui.DlgFromDict(dictionary=expInfo, title=expName)\nif dlg.OK == False: core.quit() # user pressed cancel\nexpInfo['date'] = data.getDateStr() # add a simple timestamp\nexpInfo['expName'] = expName\n\n# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc\nfilename = _thisDir + os.sep + 'data/%s_%s_%s' %(expInfo['participant'], expName, expInfo['date'])\n\nout_all_fn = _thisDir + os.sep + 'data/%s_%s_%s_responses.csv' %(expInfo['participant'], expName, expInfo['session'])\ndata_out = pd.DataFrame(columns=('onsetTime','correctResp','keysPressed'))\n\n\n# An ExperimentHandler isn't essential but helps with data saving\nthisExp = data.ExperimentHandler(name=expName, version='',\n extraInfo=expInfo, runtimeInfo=None,\n originPath=None,\n savePickle=True, saveWideText=True,\n dataFileName=filename)\n#save a log file for detail verbose info\nlogFile = logging.LogFile(filename+'.log', level=logging.EXP)\nlogging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file\n\nendExpNow = False # flag for 'escape' or other condition => quit the exp\n\n# Start Code - component code to be run before the window creation\n\n# Setup the Window\nwin = visual.Window(size=(500, 500), fullscr=True, screen=0, allowGUI=False, allowStencil=False,\n monitor='testMonitor', color=[-1,-1,-1], colorSpace='rgb',\n blendMode='avg', useFBO=True,\n )\n# store frame rate of monitor if we can measure it successfully\nexpInfo['frameRate']=win.getActualFrameRate()\nif expInfo['frameRate']!=None:\n frameDur = 1.0/round(expInfo['frameRate'])\nelse:\n frameDur = 1.0/60.0 # couldn't get a reliable measure so guess\n\n# Initialize components for Routine \"Instructions\"\nInstructionsClock = core.Clock()\ntext_2 = visual.TextStim(win=win, ori=0, name='text_2',\n text=u'The experiment is about to begin. ', font=u'Arial',\n pos=[0, 0], height=0.1, wrapWidth=None,\n color=u'white', colorSpace='rgb', opacity=1,\n depth=0.0)\n\n# Initialize components for Routine \"trial\"\ntrialClock = core.Clock()\nISI = core.StaticPeriod(win=win, screenHz=expInfo['frameRate'], name='ISI')\nimage = visual.ImageStim(win=win, name='image',units='pix',\n image='sin', mask=None,\n ori=0, pos=[0, 0], size=[200,200],\n color=[1,1,1], colorSpace='rgb', opacity=1,\n flipHoriz=False, flipVert=False,\n texRes=128, interpolate=True, depth=0.0)\n\nfixation = visual.ShapeStim(win,\n vertices=((0, -0.075), (0, 0.075), (0,0), (-0.05,0), (0.05, 0)),\n lineWidth=3,\n closeShape=False,\n lineColor='white')\n\n\n\nWrong_1 = visual.Circle(win=win, units = 'pix', radius = 100,lineColor='red', fillColor = 'red')\n\n\n# Initialize components for Routine \"End\"\nEndClock = core.Clock()\ntext = visual.TextStim(win=win, ori=0, name='text',\n text=u'Experiment is completed. Thank you for your participation.', font=u'Arial',\n pos=[0, 0], height=0.1, wrapWidth=None,\n color=u'white', colorSpace='rgb', opacity=1,\n depth=0.0)\n\n\n#######################\n#### Set up onsets ####\n#######################\ncorr_thresh = 0.1\ndfStims = pd.DataFrame\nsequence_img_ids = []\nimg_dict = {2: 'image_folder/stim_2.png', 3: 'image_folder/stim_3.png', 4: 'image_folder/stim_4.png', 5: 'image_folder/stim_5.png'}\nkey_dict = {2:'2', 3:'3', 4:'4', 5:'5'}\n\nisDone = 0\nwhile not isDone:\n trial_types = np.asarray([2, 3, 4, 5])\n trial_IDs = np.asarray(range(4))\n trial_freq = np.asarray([12, 12, 12, 12])\n iti_range = np.asarray([2, 2, 2, 2, 3, 3, 3, 4, 5, 6, 7, 8])\n\n n_post = 3\n t_vec = []\n iti_vec = []\n tid_vec = []\n\n for tt in range(0,len(trial_types)):\n t_vec = np.repeat(trial_types,12)\n iti_vec = np.tile(iti_range,4)\n\n np.random.shuffle(t_vec)\n np.random.shuffle(iti_vec)\n vec = [0]\n id_vec = vec\n\n for t in range(0, len(t_vec)):\n vec = vec + [t_vec[t]] + np.repeat(0,iti_vec[t]).tolist()\n vec = vec + [0,0,0]\n dfStims = pd.DataFrame()\n X = np.zeros((len(vec),len(trial_types)))\n ons = np.zeros((12,4))\n for c in trial_types:\n a = np.where(vec==c)[0]\n ons[:,c-2] = a*2\n for indx in range(0, len(a)):\n name = a[indx]\n X[a[indx]][c-2]= 1\n\n df=pd.DataFrame(X)\n cxy = df.corr()\n cxy = abs(np.tril(cxy, k=-1))\n if cxy.max() < corr_thresh:\n isDone = 1\n\nfor x in range(0,len(vec)):\n if vec[x] == 0:\n sequence_img_ids.append('image_folder/skip.png')\n elif vec[x] != 0:\n sequence_img_ids.append(img_dict[vec[x]])\n\nid_vec = vec\nt_vec = range(0,480,2)\ndfStims['trial_img'] = sequence_img_ids\ndfStims['trial_ans'] = vec\n\n\n#######################\n## End Set up onsets ##\n#######################\n\nfilename = _thisDir + os.sep + 'data/%s_%s_%s_onsets.csv' %(expInfo['participant'], expName, expInfo['session'])\nnp.savetxt(filename, ons, '%5.2f',delimiter=\",\")\ndfStims.to_csv('MM_onsets.csv', index= False)\n\n\n#######################\n## Save as mat file for SPM\n#######################\n\n#\n# new_onsets = np.empty((4,), dtype=object)\n# df = pd.read_csv('0273_r2d4_MM_Run1_onsets.csv',header=None)\n# new_onsets[0] = np.array(df[0][:,np.newaxis])/2\n# new_onsets[1] = np.array(df[1][:,np.newaxis])/2\n# new_onsets[2] = np.array(df[2][:,np.newaxis])/2\n# new_onsets[3] = np.array(df[3][:,np.newaxis])/2\n# data={}\n# data['ons'] = new_onsets\n# scipy.io.savemat('0273_r2d4_MM_Run1_onsets.mat', data)\n#\n\n# Create some handy timers\nglobalClock = core.Clock() # to track the time since experiment started\nroutineTimer = core.CountdownTimer() # to track time remaining of each (non-slip) routine\n\n#------Prepare to start Routine \"Instructions\"-------\nt = 0\nInstructionsClock.reset() # clock\nframeN = -1\nroutineTimer.add(5.000000)\n# update component parameters for each repeat\n# keep track of which components have finished\nInstructionsComponents = []\nInstructionsComponents.append(text_2)\nfor thisComponent in InstructionsComponents:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n\n\n#-------Start Routine \"Instructions\"-------\ncontinueRoutine = True\nwhile continueRoutine and routineTimer.getTime() > 0:\n # get current time\n t = InstructionsClock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n\n # *text_2* updates\n if t >= 0.0 and text_2.status == NOT_STARTED:\n # keep track of start time/frame for later\n text_2.tStart = t # underestimates by a little under one frame\n text_2.frameNStart = frameN # exact frame index\n text_2.setAutoDraw(True)\n if text_2.status == STARTED and t >= (0.0 + (5-win.monitorFramePeriod*0.75)): #most of one frame period left\n text_2.setAutoDraw(False)\n\n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in InstructionsComponents:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n\n # check for quit (the Esc key)\n if endExpNow or event.getKeys(keyList=[\"escape\"]):\n core.quit()\n\n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n\n#-------Ending Routine \"Instructions\"-------\nfor thisComponent in InstructionsComponents:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n\n# set up handler to look after randomisation of conditions etc\ntrials = data.TrialHandler(nReps=1, method='sequential',\n extraInfo=expInfo, originPath=None,\n trialList=data.importConditions(u'MM_onsets.csv'),\n seed=None, name='trials')\n\n\nthisExp.addLoop(trials) # add the loop to the experiment\nthisTrial = trials.trialList[0] # so we can initialise stimuli with some values\n# abbreviate parameter names if possible (e.g. rgb=thisTrial.rgb)\nif thisTrial != None:\n for paramName in thisTrial.keys():\n exec(paramName + '= thisTrial.' + paramName)\nRTclock = core.Clock()\nmax_rt = 1\n\n##### Wait for scanner trigger key #####\nevent.clearEvents(eventType='keyboard')\n\nScannerKey = event.waitKeys([\"^\",\"escape\"])\nif endExpNow or \"escape\" in ScannerKey:\n core.quit()\nglobalClock.reset()\n\n\n\ntrial = -1\nfor thisTrial in trials:\n trial = trial+1\n\n currentLoop = trials\n # abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb)\n if thisTrial != None:\n for paramName in thisTrial.keys():\n exec(paramName + '= thisTrial.' + paramName)\n\n fixation.setAutoDraw(True)\n win.flip()\n\n\n\n #------Prepare to start Routine \"trial\"-------\n\n frameN = -1\n routineTimer.add(2.000000)\n\n #For Debugging\n #print globalClock.getTime()\n #print t_vec[trial]\n # update component parameters for each repeat\n while globalClock.getTime() < t_vec[trial]:\n core.wait(.001)\n if endExpNow or event.getKeys(keyList=[\"escape\"]):\n core.quit()\n\n\n if trial_img != 'image_folder/skip.png':\n fixation.setAutoDraw(False)\n win.flip()\n image.setImage(trial_img)\n key_response = event.BuilderKeyResponse() # create an object of type KeyResponse\n key_response.status = NOT_STARTED\n # keep track of which components have finished\n trialComponents = []\n trialComponents.append(image)\n trialComponents.append(key_response)\n\n for thisComponent in trialComponents:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n\n #-------Start Routine \"trial\"-------\n continueRoutine = True\n trialClock.reset() # clock\n # Print routTimer to verify matches correct onset timings.\n # print routineTimer.getTime()\n\n while continueRoutine:\n # get current me\n t = trialClock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n\n # *image* updates\n if t >= 0.0 and image.status == NOT_STARTED:\n # keep track of start time/frame for later\n image.tStart = t # underestimates by a little under one frame\n image.frameNStart = frameN # exact frame index\n image.setAutoDraw(True)\n onsetTime = globalClock.getTime()\n if image.status == STARTED and t >= (0.0 + (1.0-win.monitorFramePeriod*0.75)): #most of one frame period left\n image.setAutoDraw(False)\n continueRoutine = False\n # *key_response* updates\n if t >= 0.0 and key_response.status == NOT_STARTED:\n # keep track of start time/frame for later\n key_response.tStart = t # underestimates by a little under one frame\n key_response.frameNStart = frameN # exact frame index\n key_response.status = STARTED\n # keyboard checking is just starting\n key_response.clock.reset() # now t=0\n event.clearEvents(eventType='keyboard')\n if key_response.status == STARTED and t >= (0.0 + (1-win.monitorFramePeriod*0.75)): #most of one frame period left\n key_response.status = STOPPED\n continueRoutine = False\n if key_response.status == STARTED:\n theseKeys = event.getKeys(keyList=['2', '3', '4', '5'])\n\n # check for quit:\n if \"escape\" in theseKeys:\n endExpNow = True\n if len(theseKeys) > 0: # at least one key was pressed\n key_response.keys.extend(theseKeys) # storing all keys\n key_response.rt.append(key_response.clock.getTime())\n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n break\n\n for thisComponent in trialComponents:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n\n # check for quit (the Esc key)\n if endExpNow or event.getKeys(keyList=[\"escape\"]):\n core.quit()\n\n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n #-------Ending Routine \"trial\"-------\n for thisComponent in trialComponents:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\n # check responses\n if key_response.keys in ['', [], None]: # No response was made\n key_response.keys=None\n # was no response the correct answer?!\n if str(trial_ans).lower() == 'none': key_response.corr = 1 # correct non-response\n else: key_response.corr = 0 # failed to respond (incorrectly)\n # store data for trials (TrialHandler)\n trials.addData('key_response.keys',key_response.keys)\n trials.addData('key_response.corr', key_response.corr)\n if key_response.keys != None: # we had a response\n trials.addData('key_response.rt', key_response.rt)\n thisExp.nextEntry()\n win.flip()\n #Save Data to output File\n\n\n data_out.loc[len(data_out)+1]=[onsetTime,trial_ans, str(key_response.keys).strip('[]')]\n data_out.to_csv(out_all_fn, index=False)\n\n elif trial_img == 'image_folder/skip.png':\n fixation.setAutoDraw(True)\n core.wait(0.5)\n thisExp.nextEntry()\n\n\n# completed all trials\n\n\n#------Prepare to start Routine \"End\"-------\nt = 0\nEndClock.reset() # clock\nframeN = -1\nroutineTimer.add(1.000000)\n# update component parameters for each repeat\n# keep track of which components have finished\nEndComponents = []\nEndComponents.append(text)\nfor thisComponent in EndComponents:\n if hasattr(thisComponent, 'status'):\n thisComponent.status = NOT_STARTED\n\n#-------Start Routine \"End\"-------\ncontinueRoutine = True\nwhile continueRoutine and routineTimer.getTime() > 0:\n # get current time\n t = EndClock.getTime()\n frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n # update/draw components on each frame\n\n # *text* updates\n if t >= 0.0 and text.status == NOT_STARTED:\n # keep track of start time/frame for later\n text.tStart = t # underestimates by a little under one frame\n text.frameNStart = frameN # exact frame index\n text.setAutoDraw(True)\n if text.status == STARTED and t >= (0.0 + (1.0-win.monitorFramePeriod*0.75)): #most of one frame period left\n text.setAutoDraw(False)\n\n # check if all components have finished\n if not continueRoutine: # a component has requested a forced-end of Routine\n break\n continueRoutine = False # will revert to True if at least one component still running\n for thisComponent in EndComponents:\n if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n continueRoutine = True\n break # at least one component has not yet finished\n\n # check for quit (the Esc key)\n if endExpNow or event.getKeys(keyList=[\"escape\"]):\n core.quit()\n\n # refresh the screen\n if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n win.flip()\n\n#-------Ending Routine \"End\"-------\nfor thisComponent in EndComponents:\n if hasattr(thisComponent, \"setAutoDraw\"):\n thisComponent.setAutoDraw(False)\nwin.close()\ncore.quit()\n"
] |
[
[
"numpy.savetxt",
"numpy.asarray",
"numpy.zeros",
"pandas.DataFrame",
"numpy.tile",
"numpy.random.shuffle",
"numpy.where",
"numpy.repeat",
"numpy.tril"
]
] |
NunoEdgarGFlowHub/io
|
[
"242a3be6c375e4f7cf130766c0098cfe4b0fc8d2"
] |
[
"tensorflow_io/kafka/__init__.py"
] |
[
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Kafka Dataset.\n\n@@KafkaDataset\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow_io.kafka.python.ops.kafka_dataset_ops import KafkaDataset\n\nfrom tensorflow.python.util.all_util import remove_undocumented\n\n_allowed_symbols = [\n \"KafkaDataset\",\n]\n\nremove_undocumented(__name__)\n"
] |
[
[
"tensorflow.python.util.all_util.remove_undocumented"
]
] |
jbbrokaw/matplotlib
|
[
"86ec1b6fc5628bfb2d09797c58d7eed0ca8c2427",
"86ec1b6fc5628bfb2d09797c58d7eed0ca8c2427",
"86ec1b6fc5628bfb2d09797c58d7eed0ca8c2427",
"86ec1b6fc5628bfb2d09797c58d7eed0ca8c2427",
"86ec1b6fc5628bfb2d09797c58d7eed0ca8c2427",
"86ec1b6fc5628bfb2d09797c58d7eed0ca8c2427"
] |
[
"lib/matplotlib/backends/backend_cairo.py",
"examples/mplot3d/trisurf3d_demo.py",
"examples/pylab_examples/print_stdout.py",
"examples/pylab_examples/fancybox_demo.py",
"examples/axes_grid/demo_imagegrid_aspect.py",
"examples/user_interfaces/pylab_with_gtk.py"
] |
[
"\"\"\"\nA Cairo backend for matplotlib\nAuthor: Steve Chaplin\n\nCairo is a vector graphics library with cross-device output support.\nFeatures of Cairo:\n * anti-aliasing\n * alpha channel\n * saves image files as PNG, PostScript, PDF\n\nhttp://cairographics.org\nRequires (in order, all available from Cairo website):\n cairo, pycairo\n\nNaming Conventions\n * classes MixedUpperCase\n * varables lowerUpper\n * functions underscore_separated\n\"\"\"\n\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport six\n\nimport os, sys, warnings, gzip\n\nimport numpy as np\n\ndef _fn_name(): return sys._getframe(1).f_code.co_name\n\ntry:\n import cairocffi as cairo\nexcept ImportError:\n try:\n import cairo\n except ImportError:\n raise ImportError(\"Cairo backend requires that cairocffi or pycairo is installed.\")\n else:\n HAS_CAIRO_CFFI = False\nelse:\n HAS_CAIRO_CFFI = True\n\n_version_required = (1,2,0)\nif cairo.version_info < _version_required:\n raise ImportError (\"Pycairo %d.%d.%d is installed\\n\"\n \"Pycairo %d.%d.%d or later is required\"\n % (cairo.version_info + _version_required))\nbackend_version = cairo.version\ndel _version_required\n\nfrom matplotlib.backend_bases import RendererBase, GraphicsContextBase,\\\n FigureManagerBase, FigureCanvasBase\nfrom matplotlib.cbook import is_string_like\nfrom matplotlib.figure import Figure\nfrom matplotlib.mathtext import MathTextParser\nfrom matplotlib.path import Path\nfrom matplotlib.transforms import Bbox, Affine2D\nfrom matplotlib.font_manager import ttfFontProperty\n\n_debug = False\n#_debug = True\n\n# Image::color_conv(format) for draw_image()\nif sys.byteorder == 'little':\n BYTE_FORMAT = 0 # BGRA\nelse:\n BYTE_FORMAT = 1 # ARGB\n\n\nclass RendererCairo(RendererBase):\n fontweights = {\n 100 : cairo.FONT_WEIGHT_NORMAL,\n 200 : cairo.FONT_WEIGHT_NORMAL,\n 300 : cairo.FONT_WEIGHT_NORMAL,\n 400 : cairo.FONT_WEIGHT_NORMAL,\n 500 : cairo.FONT_WEIGHT_NORMAL,\n 600 : cairo.FONT_WEIGHT_BOLD,\n 700 : cairo.FONT_WEIGHT_BOLD,\n 800 : cairo.FONT_WEIGHT_BOLD,\n 900 : cairo.FONT_WEIGHT_BOLD,\n 'ultralight' : cairo.FONT_WEIGHT_NORMAL,\n 'light' : cairo.FONT_WEIGHT_NORMAL,\n 'normal' : cairo.FONT_WEIGHT_NORMAL,\n 'medium' : cairo.FONT_WEIGHT_NORMAL,\n 'semibold' : cairo.FONT_WEIGHT_BOLD,\n 'bold' : cairo.FONT_WEIGHT_BOLD,\n 'heavy' : cairo.FONT_WEIGHT_BOLD,\n 'ultrabold' : cairo.FONT_WEIGHT_BOLD,\n 'black' : cairo.FONT_WEIGHT_BOLD,\n }\n fontangles = {\n 'italic' : cairo.FONT_SLANT_ITALIC,\n 'normal' : cairo.FONT_SLANT_NORMAL,\n 'oblique' : cairo.FONT_SLANT_OBLIQUE,\n }\n\n\n def __init__(self, dpi):\n \"\"\"\n \"\"\"\n if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))\n self.dpi = dpi\n self.gc = GraphicsContextCairo (renderer=self)\n self.text_ctx = cairo.Context (\n cairo.ImageSurface (cairo.FORMAT_ARGB32,1,1))\n self.mathtext_parser = MathTextParser('Cairo')\n\n RendererBase.__init__(self)\n\n def set_ctx_from_surface (self, surface):\n self.gc.ctx = cairo.Context (surface)\n\n\n def set_width_height(self, width, height):\n self.width = width\n self.height = height\n self.matrix_flipy = cairo.Matrix (yy=-1, y0=self.height)\n # use matrix_flipy for ALL rendering?\n # - problem with text? - will need to switch matrix_flipy off, or do a\n # font transform?\n\n\n def _fill_and_stroke (self, ctx, fill_c, alpha, alpha_overrides):\n if fill_c is not None:\n ctx.save()\n if len(fill_c) == 3 or alpha_overrides:\n ctx.set_source_rgba (fill_c[0], fill_c[1], fill_c[2], alpha)\n else:\n ctx.set_source_rgba (fill_c[0], fill_c[1], fill_c[2], fill_c[3])\n ctx.fill_preserve()\n ctx.restore()\n ctx.stroke()\n\n @staticmethod\n def convert_path(ctx, path, transform):\n for points, code in path.iter_segments(transform):\n if code == Path.MOVETO:\n ctx.move_to(*points)\n elif code == Path.CLOSEPOLY:\n ctx.close_path()\n elif code == Path.LINETO:\n ctx.line_to(*points)\n elif code == Path.CURVE3:\n ctx.curve_to(points[0], points[1],\n points[0], points[1],\n points[2], points[3])\n elif code == Path.CURVE4:\n ctx.curve_to(*points)\n\n\n def draw_path(self, gc, path, transform, rgbFace=None):\n ctx = gc.ctx\n\n transform = transform + \\\n Affine2D().scale(1.0, -1.0).translate(0, self.height)\n\n ctx.new_path()\n self.convert_path(ctx, path, transform)\n\n self._fill_and_stroke(ctx, rgbFace, gc.get_alpha(), gc.get_forced_alpha())\n\n def draw_image(self, gc, x, y, im):\n # bbox - not currently used\n if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))\n\n rows, cols, buf = im.color_conv (BYTE_FORMAT)\n surface = cairo.ImageSurface.create_for_data (\n buf, cairo.FORMAT_ARGB32, cols, rows, cols*4)\n ctx = gc.ctx\n y = self.height - y - rows\n\n ctx.save()\n ctx.set_source_surface (surface, x, y)\n if gc.get_alpha() != 1.0:\n ctx.paint_with_alpha(gc.get_alpha())\n else:\n ctx.paint()\n ctx.restore()\n\n def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):\n # Note: x,y are device/display coords, not user-coords, unlike other\n # draw_* methods\n if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))\n\n if ismath:\n self._draw_mathtext(gc, x, y, s, prop, angle)\n\n else:\n ctx = gc.ctx\n ctx.new_path()\n ctx.move_to (x, y)\n ctx.select_font_face (prop.get_name(),\n self.fontangles [prop.get_style()],\n self.fontweights[prop.get_weight()])\n\n size = prop.get_size_in_points() * self.dpi / 72.0\n\n ctx.save()\n if angle:\n ctx.rotate (-angle * np.pi / 180)\n ctx.set_font_size (size)\n\n if HAS_CAIRO_CFFI:\n if not isinstance(s, six.text_type):\n s = six.text_type(s)\n else:\n if not six.PY3 and isinstance(s, six.text_type):\n s = s.encode(\"utf-8\")\n\n ctx.show_text(s)\n ctx.restore()\n\n def _draw_mathtext(self, gc, x, y, s, prop, angle):\n if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))\n\n ctx = gc.ctx\n width, height, descent, glyphs, rects = self.mathtext_parser.parse(\n s, self.dpi, prop)\n\n ctx.save()\n ctx.translate(x, y)\n if angle:\n ctx.rotate (-angle * np.pi / 180)\n\n for font, fontsize, s, ox, oy in glyphs:\n ctx.new_path()\n ctx.move_to(ox, oy)\n\n fontProp = ttfFontProperty(font)\n ctx.save()\n ctx.select_font_face (fontProp.name,\n self.fontangles [fontProp.style],\n self.fontweights[fontProp.weight])\n\n size = fontsize * self.dpi / 72.0\n ctx.set_font_size(size)\n if isinstance(s, six.text_type):\n s = s.encode(\"utf-8\")\n ctx.show_text(s)\n ctx.restore()\n\n for ox, oy, w, h in rects:\n ctx.new_path()\n ctx.rectangle (ox, oy, w, h)\n ctx.set_source_rgb (0, 0, 0)\n ctx.fill_preserve()\n\n ctx.restore()\n\n\n def flipy(self):\n if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))\n return True\n #return False # tried - all draw objects ok except text (and images?)\n # which comes out mirrored!\n\n\n def get_canvas_width_height(self):\n if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))\n return self.width, self.height\n\n\n def get_text_width_height_descent(self, s, prop, ismath):\n if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))\n if ismath:\n width, height, descent, fonts, used_characters = self.mathtext_parser.parse(\n s, self.dpi, prop)\n return width, height, descent\n\n ctx = self.text_ctx\n ctx.save()\n ctx.select_font_face (prop.get_name(),\n self.fontangles [prop.get_style()],\n self.fontweights[prop.get_weight()])\n\n # Cairo (says it) uses 1/96 inch user space units, ref: cairo_gstate.c\n # but if /96.0 is used the font is too small\n\n size = prop.get_size_in_points() * self.dpi / 72.0\n\n # problem - scale remembers last setting and font can become\n # enormous causing program to crash\n # save/restore prevents the problem\n ctx.set_font_size (size)\n\n y_bearing, w, h = ctx.text_extents (s)[1:4]\n ctx.restore()\n\n return w, h, h + y_bearing\n\n\n def new_gc(self):\n if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))\n self.gc.ctx.save()\n self.gc._alpha = 1.0\n self.gc._forced_alpha = False # if True, _alpha overrides A from RGBA\n return self.gc\n\n\n def points_to_pixels(self, points):\n if _debug: print('%s.%s()' % (self.__class__.__name__, _fn_name()))\n return points/72.0 * self.dpi\n\n\nclass GraphicsContextCairo(GraphicsContextBase):\n _joind = {\n 'bevel' : cairo.LINE_JOIN_BEVEL,\n 'miter' : cairo.LINE_JOIN_MITER,\n 'round' : cairo.LINE_JOIN_ROUND,\n }\n\n _capd = {\n 'butt' : cairo.LINE_CAP_BUTT,\n 'projecting' : cairo.LINE_CAP_SQUARE,\n 'round' : cairo.LINE_CAP_ROUND,\n }\n\n\n def __init__(self, renderer):\n GraphicsContextBase.__init__(self)\n self.renderer = renderer\n\n\n def restore(self):\n self.ctx.restore()\n\n\n def set_alpha(self, alpha):\n GraphicsContextBase.set_alpha(self, alpha)\n _alpha = self.get_alpha()\n rgb = self._rgb\n if self.get_forced_alpha():\n self.ctx.set_source_rgba (rgb[0], rgb[1], rgb[2], _alpha)\n else:\n self.ctx.set_source_rgba (rgb[0], rgb[1], rgb[2], rgb[3])\n\n\n #def set_antialiased(self, b):\n # enable/disable anti-aliasing is not (yet) supported by Cairo\n\n\n def set_capstyle(self, cs):\n if cs in ('butt', 'round', 'projecting'):\n self._capstyle = cs\n self.ctx.set_line_cap (self._capd[cs])\n else:\n raise ValueError('Unrecognized cap style. Found %s' % cs)\n\n\n def set_clip_rectangle(self, rectangle):\n if not rectangle: return\n x,y,w,h = rectangle.bounds\n # pixel-aligned clip-regions are faster\n x,y,w,h = round(x), round(y), round(w), round(h)\n ctx = self.ctx\n ctx.new_path()\n ctx.rectangle (x, self.renderer.height - h - y, w, h)\n ctx.clip ()\n\n def set_clip_path(self, path):\n if not path: return\n tpath, affine = path.get_transformed_path_and_affine()\n ctx = self.ctx\n ctx.new_path()\n affine = affine + Affine2D().scale(1.0, -1.0).translate(0.0, self.renderer.height)\n RendererCairo.convert_path(ctx, tpath, affine)\n ctx.clip()\n\n def set_dashes(self, offset, dashes):\n self._dashes = offset, dashes\n if dashes == None:\n self.ctx.set_dash([], 0) # switch dashes off\n else:\n self.ctx.set_dash(\n list(self.renderer.points_to_pixels(np.asarray(dashes))), offset)\n\n\n def set_foreground(self, fg, isRGBA=None):\n GraphicsContextBase.set_foreground(self, fg, isRGBA)\n if len(self._rgb) == 3:\n self.ctx.set_source_rgb(*self._rgb)\n else:\n self.ctx.set_source_rgba(*self._rgb)\n\n def set_graylevel(self, frac):\n GraphicsContextBase.set_graylevel(self, frac)\n if len(self._rgb) == 3:\n self.ctx.set_source_rgb(*self._rgb)\n else:\n self.ctx.set_source_rgba(*self._rgb)\n\n\n def set_joinstyle(self, js):\n if js in ('miter', 'round', 'bevel'):\n self._joinstyle = js\n self.ctx.set_line_join(self._joind[js])\n else:\n raise ValueError('Unrecognized join style. Found %s' % js)\n\n\n def set_linewidth(self, w):\n self._linewidth = w\n self.ctx.set_line_width (self.renderer.points_to_pixels(w))\n\n\ndef new_figure_manager(num, *args, **kwargs): # called by backends/__init__.py\n \"\"\"\n Create a new figure manager instance\n \"\"\"\n if _debug: print('%s()' % (_fn_name()))\n FigureClass = kwargs.pop('FigureClass', Figure)\n thisFig = FigureClass(*args, **kwargs)\n return new_figure_manager_given_figure(num, thisFig)\n\n\ndef new_figure_manager_given_figure(num, figure):\n \"\"\"\n Create a new figure manager instance for the given figure.\n \"\"\"\n canvas = FigureCanvasCairo(figure)\n manager = FigureManagerBase(canvas, num)\n return manager\n\n\nclass FigureCanvasCairo (FigureCanvasBase):\n def print_png(self, fobj, *args, **kwargs):\n width, height = self.get_width_height()\n\n renderer = RendererCairo (self.figure.dpi)\n renderer.set_width_height (width, height)\n surface = cairo.ImageSurface (cairo.FORMAT_ARGB32, width, height)\n renderer.set_ctx_from_surface (surface)\n\n self.figure.draw (renderer)\n surface.write_to_png (fobj)\n\n def print_pdf(self, fobj, *args, **kwargs):\n return self._save(fobj, 'pdf', *args, **kwargs)\n\n def print_ps(self, fobj, *args, **kwargs):\n return self._save(fobj, 'ps', *args, **kwargs)\n\n def print_svg(self, fobj, *args, **kwargs):\n return self._save(fobj, 'svg', *args, **kwargs)\n\n def print_svgz(self, fobj, *args, **kwargs):\n return self._save(fobj, 'svgz', *args, **kwargs)\n\n def _save (self, fo, format, **kwargs):\n # save PDF/PS/SVG\n orientation = kwargs.get('orientation', 'portrait')\n\n dpi = 72\n self.figure.dpi = dpi\n w_in, h_in = self.figure.get_size_inches()\n width_in_points, height_in_points = w_in * dpi, h_in * dpi\n\n if orientation == 'landscape':\n width_in_points, height_in_points = (height_in_points,\n width_in_points)\n\n if format == 'ps':\n if not hasattr(cairo, 'PSSurface'):\n raise RuntimeError ('cairo has not been compiled with PS '\n 'support enabled')\n surface = cairo.PSSurface (fo, width_in_points, height_in_points)\n elif format == 'pdf':\n if not hasattr(cairo, 'PDFSurface'):\n raise RuntimeError ('cairo has not been compiled with PDF '\n 'support enabled')\n surface = cairo.PDFSurface (fo, width_in_points, height_in_points)\n elif format in ('svg', 'svgz'):\n if not hasattr(cairo, 'SVGSurface'):\n raise RuntimeError ('cairo has not been compiled with SVG '\n 'support enabled')\n if format == 'svgz':\n filename = fo\n if is_string_like(fo):\n fo = open(fo, 'wb')\n close = True\n else:\n close = False\n try:\n fo = gzip.GzipFile(None, 'wb', fileobj=fo)\n finally:\n if close:\n fo.close()\n surface = cairo.SVGSurface (fo, width_in_points, height_in_points)\n else:\n warnings.warn (\"unknown format: %s\" % format)\n return\n\n # surface.set_dpi() can be used\n renderer = RendererCairo (self.figure.dpi)\n renderer.set_width_height (width_in_points, height_in_points)\n renderer.set_ctx_from_surface (surface)\n ctx = renderer.gc.ctx\n\n if orientation == 'landscape':\n ctx.rotate (np.pi/2)\n ctx.translate (0, -height_in_points)\n # cairo/src/cairo_ps_surface.c\n # '%%Orientation: Portrait' is always written to the file header\n # '%%Orientation: Landscape' would possibly cause problems\n # since some printers would rotate again ?\n # TODO:\n # add portrait/landscape checkbox to FileChooser\n\n self.figure.draw (renderer)\n\n show_fig_border = False # for testing figure orientation and scaling\n if show_fig_border:\n ctx.new_path()\n ctx.rectangle(0, 0, width_in_points, height_in_points)\n ctx.set_line_width(4.0)\n ctx.set_source_rgb(1,0,0)\n ctx.stroke()\n ctx.move_to(30,30)\n ctx.select_font_face ('sans-serif')\n ctx.set_font_size(20)\n ctx.show_text('Origin corner')\n\n ctx.show_page()\n surface.finish()\n\n\nFigureCanvas = FigureCanvasCairo\n",
"from mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nn_angles = 36\nn_radii = 8\n\n# An array of radii\n# Does not include radius r=0, this is to eliminate duplicate points\nradii = np.linspace(0.125, 1.0, n_radii)\n\n# An array of angles\nangles = np.linspace(0, 2*np.pi, n_angles, endpoint=False)\n\n# Repeat all angles for each radius\nangles = np.repeat(angles[..., np.newaxis], n_radii, axis=1)\n\n# Convert polar (radii, angles) coords to cartesian (x, y) coords\n# (0, 0) is added here. There are no duplicate points in the (x, y) plane\nx = np.append(0, (radii*np.cos(angles)).flatten())\ny = np.append(0, (radii*np.sin(angles)).flatten())\n\n# Pringle surface\nz = np.sin(-x*y)\n\nfig = plt.figure()\nax = fig.gca(projection='3d')\n\nax.plot_trisurf(x, y, z, cmap=cm.jet, linewidth=0.2)\n\nplt.show()\n",
"#!/usr/bin/env python\n# -*- noplot -*-\n# print png to standard out\n# usage: python print_stdout.py > somefile.png\nimport sys\nimport matplotlib\nmatplotlib.use('Agg')\nfrom pylab import *\n\nplot([1, 2, 3])\n\nsavefig(sys.stdout)\nshow()\n",
"import matplotlib.pyplot as plt\nimport matplotlib.transforms as mtransforms\nfrom matplotlib.patches import FancyBboxPatch\n\n\n# Bbox object around which the fancy box will be drawn.\nbb = mtransforms.Bbox([[0.3, 0.4], [0.7, 0.6]])\n\n\ndef draw_bbox(ax, bb):\n # boxstyle=square with pad=0, i.e. bbox itself.\n p_bbox = FancyBboxPatch((bb.xmin, bb.ymin),\n abs(bb.width), abs(bb.height), \n boxstyle=\"square,pad=0.\",\n ec=\"k\", fc=\"none\", zorder=10.,\n )\n ax.add_patch(p_bbox)\n\n\ndef test1(ax):\n\n # a fancy box with round corners. pad=0.1\n p_fancy = FancyBboxPatch((bb.xmin, bb.ymin),\n abs(bb.width), abs(bb.height),\n boxstyle=\"round,pad=0.1\",\n fc=(1., .8, 1.),\n ec=(1., 0.5, 1.))\n\n ax.add_patch(p_fancy)\n\n ax.text(0.1, 0.8,\n r' boxstyle=\"round,pad=0.1\"',\n size=10, transform=ax.transAxes)\n\n # draws control points for the fancy box.\n #l = p_fancy.get_path().vertices\n #ax.plot(l[:,0], l[:,1], \".\")\n\n # draw the original bbox in black\n draw_bbox(ax, bb)\n\n\ndef test2(ax):\n\n # bbox=round has two optional argument. pad and rounding_size.\n # They can be set during the initialization.\n p_fancy = FancyBboxPatch((bb.xmin, bb.ymin),\n abs(bb.width), abs(bb.height),\n boxstyle=\"round,pad=0.1\",\n fc=(1., .8, 1.),\n ec=(1., 0.5, 1.))\n\n ax.add_patch(p_fancy)\n\n # boxstyle and its argument can be later modified with\n # set_boxstyle method. Note that the old attributes are simply\n # forgotten even if the boxstyle name is same.\n\n p_fancy.set_boxstyle(\"round,pad=0.1, rounding_size=0.2\")\n # or\n #p_fancy.set_boxstyle(\"round\", pad=0.1, rounding_size=0.2)\n\n ax.text(0.1, 0.8,\n ' boxstyle=\"round,pad=0.1\\n rounding\\\\_size=0.2\"',\n size=10, transform=ax.transAxes)\n\n # draws control points for the fancy box.\n #l = p_fancy.get_path().vertices\n #ax.plot(l[:,0], l[:,1], \".\")\n\n draw_bbox(ax, bb)\n\n\ndef test3(ax):\n\n # mutation_scale determine overall scale of the mutation,\n # i.e. both pad and rounding_size is scaled according to this\n # value.\n p_fancy = FancyBboxPatch((bb.xmin, bb.ymin),\n abs(bb.width), abs(bb.height),\n boxstyle=\"round,pad=0.1\",\n mutation_scale=2.,\n fc=(1., .8, 1.),\n ec=(1., 0.5, 1.))\n\n ax.add_patch(p_fancy)\n\n ax.text(0.1, 0.8,\n ' boxstyle=\"round,pad=0.1\"\\n mutation\\\\_scale=2',\n size=10, transform=ax.transAxes)\n\n # draws control points for the fancy box.\n #l = p_fancy.get_path().vertices\n #ax.plot(l[:,0], l[:,1], \".\")\n\n draw_bbox(ax, bb)\n\n\ndef test4(ax):\n\n # When the aspect ratio of the axes is not 1, the fancy box may\n # not be what you expected (green)\n\n p_fancy = FancyBboxPatch((bb.xmin, bb.ymin),\n abs(bb.width), abs(bb.height),\n boxstyle=\"round,pad=0.2\",\n fc=\"none\",\n ec=(0., .5, 0.), zorder=4)\n\n ax.add_patch(p_fancy)\n\n # You can compensate this by setting the mutation_aspect (pink).\n p_fancy = FancyBboxPatch((bb.xmin, bb.ymin),\n abs(bb.width), abs(bb.height),\n boxstyle=\"round,pad=0.3\",\n mutation_aspect=.5, \n fc=(1., 0.8, 1.),\n ec=(1., 0.5, 1.))\n\n ax.add_patch(p_fancy)\n\n ax.text(0.1, 0.8,\n ' boxstyle=\"round,pad=0.3\"\\n mutation\\\\_aspect=.5',\n size=10, transform=ax.transAxes)\n\n draw_bbox(ax, bb)\n\n\ndef test_all():\n plt.clf()\n\n ax = plt.subplot(2, 2, 1)\n test1(ax)\n ax.set_xlim(0., 1.)\n ax.set_ylim(0., 1.)\n ax.set_title(\"test1\")\n ax.set_aspect(1.)\n\n ax = plt.subplot(2, 2, 2)\n ax.set_title(\"test2\")\n test2(ax)\n ax.set_xlim(0., 1.)\n ax.set_ylim(0., 1.)\n ax.set_aspect(1.)\n\n ax = plt.subplot(2, 2, 3)\n ax.set_title(\"test3\")\n test3(ax)\n ax.set_xlim(0., 1.)\n ax.set_ylim(0., 1.)\n ax.set_aspect(1)\n\n ax = plt.subplot(2, 2, 4)\n ax.set_title(\"test4\")\n test4(ax)\n ax.set_xlim(-0.5, 1.5)\n ax.set_ylim(0., 1.)\n ax.set_aspect(2.)\n\n plt.draw()\n plt.show()\n\ntest_all()\n",
"import matplotlib.pyplot as plt\n\nfrom mpl_toolkits.axes_grid1 import ImageGrid\nfig = plt.figure(1)\n\ngrid1 = ImageGrid(fig, 121, (2, 2), axes_pad=0.1,\n aspect=True, share_all=True)\n\nfor i in [0, 1]:\n grid1[i].set_aspect(2)\n\n\ngrid2 = ImageGrid(fig, 122, (2, 2), axes_pad=0.1,\n aspect=True, share_all=True)\n\n\nfor i in [1, 3]:\n grid2[i].set_aspect(2)\n\nplt.show()\n",
"\"\"\"\nAn example of how to use pylab to manage your figure windows, but\nmodify the GUI by accessing the underlying gtk widgets\n\"\"\"\nfrom __future__ import print_function\nimport matplotlib\nmatplotlib.use('GTKAgg')\nimport matplotlib.pyplot as plt\n\n\nfig, ax = plt.subplots()\nplt.plot([1, 2, 3], 'ro-', label='easy as 1 2 3')\nplt.plot([1, 4, 9], 'gs--', label='easy as 1 2 3 squared')\nplt.legend()\n\n\nmanager = plt.get_current_fig_manager()\n# you can also access the window or vbox attributes this way\ntoolbar = manager.toolbar\n\n# now let's add a button to the toolbar\nimport gtk\nnext = 8; # where to insert this in the mpl toolbar\nbutton = gtk.Button('Click me')\nbutton.show()\n\n\ndef clicked(button):\n print('hi mom')\nbutton.connect('clicked', clicked)\n\ntoolitem = gtk.ToolItem()\ntoolitem.show()\ntoolitem.set_tooltip(\n toolbar.tooltips,\n 'Click me for fun and profit')\n\ntoolitem.add(button)\ntoolbar.insert(toolitem, next); next += 1\n\n# now let's add a widget to the vbox\nlabel = gtk.Label()\nlabel.set_markup('Drag mouse over axes for position')\nlabel.show()\nvbox = manager.vbox\nvbox.pack_start(label, False, False)\nvbox.reorder_child(manager.toolbar, -1)\n\n\ndef update(event):\n if event.xdata is None:\n label.set_markup('Drag mouse over axes for position')\n else:\n label.set_markup('<span color=\"#ef0000\">x,y=(%f, %f)</span>' % (event.xdata, event.ydata))\n\nplt.connect('motion_notify_event', update)\n\nplt.show()\n"
] |
[
[
"matplotlib.backend_bases.GraphicsContextBase.__init__",
"matplotlib.cbook.is_string_like",
"numpy.asarray",
"matplotlib.mathtext.MathTextParser",
"matplotlib.backend_bases.FigureManagerBase",
"matplotlib.backend_bases.GraphicsContextBase.set_alpha",
"matplotlib.backend_bases.GraphicsContextBase.set_graylevel",
"matplotlib.font_manager.ttfFontProperty",
"matplotlib.backend_bases.RendererBase.__init__",
"matplotlib.backend_bases.GraphicsContextBase.set_foreground",
"matplotlib.transforms.Affine2D"
],
[
"numpy.sin",
"matplotlib.pyplot.figure",
"numpy.cos",
"numpy.repeat",
"matplotlib.pyplot.show",
"numpy.linspace"
],
[
"matplotlib.use"
],
[
"matplotlib.transforms.Bbox",
"matplotlib.pyplot.draw",
"matplotlib.pyplot.show",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.subplot"
],
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"matplotlib.use",
"matplotlib.pyplot.get_current_fig_manager",
"matplotlib.pyplot.connect",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
]
] |
ejnnr/steerable_pdo_experiments
|
[
"17902e56641cefe305b935c8733b45aa066bf068",
"17902e56641cefe305b935c8733b45aa066bf068"
] |
[
"stl_experiments/experiments/plot_exps.py",
"stl_experiments/experiments/utils.py"
] |
[
"\nimport pandas as pd\nimport argparse\nimport os\nimport matplotlib\n\nimport utils\n\nif \"DISPLAY\" not in os.environ:\n matplotlib.use('Agg')\n\nimport matplotlib.pyplot as plt\n\nSHOW_PLOT = False\nSAVE_PLOT = True\n\nRESHUFFLE = False\nAUGMENT_TRAIN = False\n\ncolors = {\n \"train\": \"blue\",\n \"valid\": \"green\",\n \"test\": \"red\"\n}\n\n\ndef plot_mean_with_variance(axis, data, label):\n mean = data.mean()\n std = data.std()\n axis.plot(mean, label=label, color=colors[label])\n axis.fill_between(\n mean.index,\n mean - std,\n mean + std,\n color=colors[label],\n alpha=0.1\n )\n\n\ndef plot(logs, plotpath=None, show=False, outfig=None):\n \n if isinstance(logs, str) and os.path.isfile(logs):\n logs = utils.retrieve_logs(logs)\n elif not isinstance(logs, pd.DataFrame):\n raise ValueError()\n \n if outfig is None:\n figure, (loss_axis, acc_axis) = plt.subplots(1, 2, figsize=(10, 4))\n else:\n figure, (loss_axis, acc_axis) = outfig\n\n train = logs[logs.split.str.startswith(\"train\")].groupby(\"iteration\")\n valid = logs[logs.split == \"valid\"].groupby(\"iteration\")\n test = logs[logs.split == \"test\"].groupby(\"iteration\")\n \n #################### Plot Loss trends ####################\n \n loss_axis.cla()\n \n plot_mean_with_variance(loss_axis, train.loss, \"train\")\n if len(valid) > 0:\n plot_mean_with_variance(loss_axis, valid.loss, \"valid\")\n if len(test) > 0:\n plot_mean_with_variance(loss_axis, test.loss, \"test\")\n \n loss_axis.legend()\n loss_axis.set_xlabel('iterations')\n loss_axis.set_ylabel('Loss')\n \n #################### Plot Accuracy trends ####################\n \n acc_axis.cla()\n \n plot_mean_with_variance(acc_axis, train.accuracy, \"train\")\n if len(valid) > 0:\n plot_mean_with_variance(acc_axis, valid.accuracy, \"valid\")\n if len(test) > 0:\n plot_mean_with_variance(acc_axis, test.accuracy, \"test\")\n \n ################## Test score ########################\n \n test = logs[logs.split == \"test\"]\n \n xmax = logs.iteration.max()\n \n if len(test) > 0:\n best_acc = test.accuracy.max()\n acc_axis.hlines(best_acc, xmin=0, xmax=xmax, linewidth=0.5, linestyles='--', label='Max Test Accuracy')\n acc_axis.set_yticks(list(acc_axis.get_yticks()) + [best_acc])\n \n if len(test) > 1:\n mean_acc = test.accuracy.mean()\n mean_std = test.accuracy.std()\n acc_axis.hlines(mean_acc, xmin=0, xmax=xmax, linewidth=0.5, color=colors[\"test\"], label='Mean Test Accuracy')\n acc_axis.fill_between([0, xmax], [mean_acc - mean_std] * 2, [mean_acc + mean_std] * 2, color=colors[\"test\"],\n alpha=0.1)\n acc_axis.set_yticks(list(acc_axis.get_yticks()) + [mean_acc])\n \n acc_axis.legend()\n acc_axis.set_xlabel('iterations')\n acc_axis.set_ylabel('Accuracy')\n \n figure.tight_layout()\n plt.draw()\n \n if plotpath is not None:\n figure.savefig(plotpath, format='svg', dpi=256, bbox_inches=\"tight\")\n \n if show:\n figure.show()\n plt.pause(0.01)\n\n\n################################################################################\n################################################################################\n\n\nif __name__ == \"__main__\":\n # Parse training configuration\n parser = argparse.ArgumentParser()\n \n # Dataset params\n parser.add_argument('--dataset', type=str, help='The name of the dataset to use')\n parser.add_argument('--augment', dest=\"augment\", action=\"store_true\",\n help='Augment the training set with rotated images')\n parser.set_defaults(augment=AUGMENT_TRAIN)\n\n parser.add_argument('--reshuffle', dest=\"reshuffle\", action=\"store_true\",\n help='Reshuffle train and valid splits instead of using the default split')\n parser.set_defaults(reshuffle=RESHUFFLE)\n \n # Model params\n parser.add_argument('--model', type=str, help='The name of the model to use')\n parser.add_argument('--N', type=int, help='Size of cyclic group for GCNN and maximum frequency for HNET')\n parser.add_argument('--flip', dest=\"flip\", action=\"store_true\",\n help='Use also reflection equivariance in the EXP model')\n parser.set_defaults(flip=False)\n parser.add_argument('--sgsize', type=int, default=None,\n help='Number of rotations in the subgroup to restrict to in the EXP e2sfcnn models')\n parser.add_argument('--fixparams', dest=\"fixparams\", action=\"store_true\",\n help='Keep the number of parameters of the model fixed by adjusting its topology')\n parser.set_defaults(fixparams=False)\n parser.add_argument('--F', type=float, default=0.8, help='Frequency cut-off: maximum frequency at radius \"r\" is \"F*r\"')\n parser.add_argument('--sigma', type=float, default=0.6, help='Width of the rings building the bases (std of the gaussian window)')\n parser.add_argument('--J', type=int, default=0, help='Number of additional frequencies in the interwiners of finite groups')\n parser.add_argument('--restrict', type=int, default=-1, help='Layer where to restrict SFCNN from E(2) to SE(2)')\n\n # plot configs\n parser.add_argument('--show', dest=\"show\", action=\"store_true\", help='Show the plots during execution')\n parser.set_defaults(show=SHOW_PLOT)\n\n parser.add_argument('--store_plot', dest=\"store_plot\", action=\"store_true\", help='Save the plots in a file or not')\n parser.set_defaults(store_plot=SAVE_PLOT)\n \n config = parser.parse_args()\n \n # Draw the plot\n logs_file = utils.logs_path(config)\n plotpath = utils.plot_path(config)\n plot(logs_file, plotpath, config.show)\n",
"import os.path\nimport sqlite3\nimport pandas as pd\nimport numpy as np\nimport io\n\nfrom typing import List\n\nfrom models import *\n\n# the values of these command line arguments are used to define the name of the experiments\n# you can add more names in this list\nEXPERIMENT_PARAMETERS = [\"model\", \"type\", \"N\", \"flip\", \"restrict\", \"sgsize\", \"fixparams\", \"augment\", \"F\", \"sigma\", \"interpolation\"]\n\n\n########################################################################################################################\n# Utilites to store and retrieve results of the experiments\n########################################################################################################################\n\n\ndef update_logs(logs: pd.DataFrame, path: str):\n conn = sqlite3.connect(path)\n logs.to_sql(\"logs\", conn, if_exists=\"append\")\n conn.close()\n\n\ndef retrieve_logs(path: str) -> pd.DataFrame:\n conn = sqlite3.connect(path)\n logs = pd.read_sql_query(\"select * from logs;\", conn)\n conn.close()\n \n return logs\n\n\n# create data type in sqlite to store numpy arrays\n\n# convert array to binary to store it in sqlite\n\ndef encode_array2binary(x: np.ndarray):\n binary_buffer = io.BytesIO()\n np.save(binary_buffer, x)\n binary_buffer.seek(0)\n y = binary_buffer.read()\n y = sqlite3.Binary(y)\n return y\n\nsqlite3.register_adapter(np.ndarray, encode_array2binary)\n\n# recover array from binary encoding in the sqlite database\n\ndef decode_binary2array(y):\n binary_buffer = io.BytesIO(y)\n binary_buffer.seek(0)\n x = np.load(binary_buffer)\n return x\n\nsqlite3.register_converter(\"array\", decode_binary2array)\n\n##########################################################\n\n\ndef update_confusion(confusion_matrix: np.array, path: str):\n assert confusion_matrix.shape[0] == confusion_matrix.shape[1]\n \n create = '''CREATE TABLE IF NOT EXISTS confusions (matrix array)'''\n \n insert = '''INSERT INTO confusions (matrix) VALUES (?)'''\n \n conn = sqlite3.connect(path, detect_types=sqlite3.PARSE_DECLTYPES)\n cursor = conn.cursor()\n cursor.execute(create)\n cursor.execute(insert, (confusion_matrix,))\n \n conn.commit()\n conn.close()\n\n\ndef retrieve_confusion(path: str) -> List[np.array]:\n conn = sqlite3.connect(path, detect_types=sqlite3.PARSE_DECLTYPES)\n cursor = conn.cursor()\n \n cursor.execute(\"select * from confusions\")\n arrays = cursor.fetchall()\n conn.close()\n \n arrays = [a[0] for a in arrays]\n \n return arrays\n\n\n########################################################################################################################\n# Utilites to build paths and names in a standard way\n########################################################################################################################\n\n\ndef exp_name(config):\n config = vars(config)\n return '_'.join([str(config[p]) for p in EXPERIMENT_PARAMETERS])\n\n\ndef out_path(config):\n path = 'results/{}'.format(config.dataset)\n if config.reshuffle:\n path += \"(shuffled)\"\n if config.augment:\n path += \"_(train_augmentation)\"\n if not config.earlystop:\n path += \"_(full_train)\"\n return path\n\n\ndef plot_path(config):\n return os.path.join(out_path(config), exp_name(config) + \".svg\")\n\n\ndef backup_path(config):\n backup_folder = os.path.join(out_path(config), exp_name(config))\n return os.path.join(backup_folder, f\"_{config.seed}.model\")\n\n\ndef logs_path(config):\n return os.path.join(out_path(config), exp_name(config) + \".db\")\n\n\ndef build_model(config, n_inputs, n_outputs):\n # SFCNN VARIANTS\n if config.model == 'E2SFCNN':\n model = E2SFCNN(n_inputs, n_outputs, restrict=config.restrict, N=config.N, fco=config.F, J=config.J,\n sigma=config.sigma, fix_param=config.fixparams, sgsize=config.sgsize, flip=config.flip)\n elif config.model == 'E2SFCNN_QUOT':\n model = E2SFCNN_QUOT(n_inputs, n_outputs, restrict=config.restrict, N=config.N, fco=config.F, J=config.J,\n sigma=config.sigma, sgsize=config.sgsize, flip=config.flip)\n elif config.model == 'EXP':\n model = ExpE2SFCNN(n_inputs, n_outputs, layer_type=config.type, restrict=config.restrict, N=config.N,\n fix_param=config.fixparams, fco=config.F, J=config.J, sigma=config.sigma,\n deltaorth=config.deltaorth, antialias=config.antialias, sgsize=config.sgsize,\n flip=config.flip)\n elif config.model == 'CNN':\n model = ExpCNN(n_inputs, n_outputs, fix_param=config.fixparams, deltaorth=config.deltaorth)\n elif config.model == \"wrn16_8_stl\":\n model = wrn16_8_stl(num_classes=n_outputs, deltaorth=config.deltaorth)\n elif config.model == \"e2wrn16_8_stl\":\n model = e2wrn16_8_stl(N=config.N, r=config.restrict, num_classes=n_outputs, sigma=config.sigma, F=config.F,\n deltaorth=config.deltaorth, fixparams=config.fixparams)\n elif config.model == \"diffopwrn16_8_stl\":\n model = diffopwrn16_8_stl(N=config.N, r=config.restrict, num_classes=n_outputs,\n deltaorth=config.deltaorth, fixparams=config.fixparams)\n elif config.model == \"gausswrn16_8_stl\":\n model = gausswrn16_8_stl(N=config.N, r=config.restrict, num_classes=n_outputs,\n deltaorth=config.deltaorth, fixparams=config.fixparams)\n elif config.model == \"e2wrn28_10\":\n model = e2wrn28_10(N=config.N, r=config.restrict, num_classes=n_outputs, sigma=config.sigma, F=config.F,\n deltaorth=config.deltaorth, fixparams=config.fixparams)\n elif config.model == \"e2wrn28_7\":\n model = e2wrn28_7(N=config.N, r=config.restrict, num_classes=n_outputs, sigma=config.sigma, F=config.F,\n deltaorth=config.deltaorth, fixparams=config.fixparams)\n elif config.model == \"e2wrn28_10R\":\n model = e2wrn28_10R(N=config.N, r=config.restrict, num_classes=n_outputs, sigma=config.sigma, F=config.F,\n deltaorth=config.deltaorth, fixparams=config.fixparams)\n elif config.model == \"e2wrn28_7R\":\n model = e2wrn28_7R(N=config.N, r=config.restrict, num_classes=n_outputs, sigma=config.sigma, F=config.F,\n deltaorth=config.deltaorth, fixparams=config.fixparams)\n else:\n raise ValueError(\"Model selected ({}) not recognized!\".format(config.model))\n \n return model\n\n\n########################################################################################################################\n# utilites to build dataloaders\n########################################################################################################################\n\nfrom datasets.mnist_rot import data_loader_mnist_rot\nfrom datasets.mnist_fliprot import data_loader_mnist_fliprot\nfrom datasets.mnist12k import data_loader_mnist12k\nfrom datasets.cifar10 import data_loader_cifar10\nfrom datasets.cifar100 import data_loader_cifar100\nfrom datasets.STL10 import data_loader_stl10\nfrom datasets.STL10 import data_loader_stl10frac\n\n\ndef build_dataloaders(dataset, batch_size, num_workers, augment, validation=True, reshuffle=False,\n eval_batch_size=None, interpolation=2):\n if eval_batch_size is None:\n eval_batch_size = batch_size\n \n if dataset == \"mnist_rot\":\n \n if validation:\n if reshuffle:\n seed = np.random.randint(0, 100000)\n else:\n seed = None\n train_loader, _, _ = data_loader_mnist_rot.build_mnist_rot_loader(\"train\",\n batch_size,\n rot_interpol_augmentation=augment,\n interpolation=interpolation,\n reshuffle_seed=seed)\n valid_loader, _, _ = data_loader_mnist_rot.build_mnist_rot_loader(\"valid\",\n eval_batch_size,\n rot_interpol_augmentation=False,\n interpolation=interpolation,\n reshuffle_seed=seed)\n else:\n train_loader, _, _ = data_loader_mnist_rot.build_mnist_rot_loader(\"trainval\",\n batch_size,\n rot_interpol_augmentation=augment,\n interpolation=interpolation,\n reshuffle_seed=None)\n valid_loader = False\n \n test_loader, n_inputs, n_outputs = data_loader_mnist_rot.build_mnist_rot_loader(\"test\",\n eval_batch_size,\n rot_interpol_augmentation=False)\n \n elif dataset == \"mnist_fliprot\":\n \n if validation:\n if reshuffle:\n seed = np.random.randint(0, 100000)\n else:\n seed = None\n \n train_loader, _, _ = data_loader_mnist_fliprot.build_mnist_rot_loader(\"train\",\n batch_size,\n rot_interpol_augmentation=augment,\n interpolation=interpolation,\n reshuffle_seed=seed)\n valid_loader, _, _ = data_loader_mnist_fliprot.build_mnist_rot_loader(\"valid\",\n eval_batch_size,\n rot_interpol_augmentation=False,\n interpolation=interpolation,\n reshuffle_seed=seed)\n else:\n train_loader, _, _ = data_loader_mnist_fliprot.build_mnist_rot_loader(\"trainval\",\n batch_size,\n rot_interpol_augmentation=augment,\n interpolation=interpolation,\n reshuffle_seed=None)\n valid_loader = False\n \n test_loader, n_inputs, n_outputs = data_loader_mnist_fliprot.build_mnist_rot_loader(\"test\",\n eval_batch_size,\n rot_interpol_augmentation=False)\n elif dataset == \"mnist12k\":\n \n if validation:\n if reshuffle:\n seed = np.random.randint(0, 100000)\n else:\n seed = None\n train_loader, _, _ = data_loader_mnist12k.build_mnist12k_loader(\"train\",\n batch_size,\n rot_interpol_augmentation=augment,\n interpolation=interpolation,\n reshuffle_seed=seed)\n valid_loader, _, _ = data_loader_mnist12k.build_mnist12k_loader(\"valid\",\n eval_batch_size,\n rot_interpol_augmentation=False,\n interpolation=interpolation,\n reshuffle_seed=seed)\n else:\n train_loader, _, _ = data_loader_mnist12k.build_mnist12k_loader(\"trainval\",\n batch_size,\n rot_interpol_augmentation=augment,\n interpolation=interpolation,\n reshuffle_seed=None)\n valid_loader = False\n \n test_loader, n_inputs, n_outputs = data_loader_mnist12k.build_mnist12k_loader(\"test\",\n eval_batch_size,\n # rot_interpol_augmentation=False\n # interpolation=interpolation,\n )\n elif dataset == \"STL10\":\n train_loader, valid_loader, test_loader, n_inputs, n_outputs = data_loader_stl10.build_stl10_loaders(\n batch_size,\n eval_batch_size,\n validation=validation,\n augment=augment,\n num_workers=num_workers,\n reshuffle=reshuffle\n )\n elif dataset == \"STL10cif\":\n train_loader, valid_loader, test_loader, n_inputs, n_outputs = data_loader_stl10.build_stl10cif_loaders(\n batch_size,\n eval_batch_size,\n validation=validation,\n augment=augment,\n num_workers=num_workers,\n reshuffle=reshuffle\n )\n elif dataset.startswith(\"STL10|\"):\n size = int(dataset.split(\"|\")[1])\n train_loader, valid_loader, test_loader, n_inputs, n_outputs = data_loader_stl10frac.build_stl10_frac_loaders(\n size,\n batch_size,\n eval_batch_size,\n validation=validation,\n augment=augment,\n num_workers=num_workers,\n reshuffle=reshuffle\n )\n elif dataset.startswith(\"STL10cif|\"):\n size = int(dataset.split(\"|\")[1])\n train_loader, valid_loader, test_loader, n_inputs, n_outputs = data_loader_stl10frac.build_stl10cif_frac_loaders(\n size,\n batch_size,\n eval_batch_size,\n validation=validation,\n augment=augment,\n num_workers=num_workers,\n reshuffle=reshuffle\n )\n elif dataset == \"cifar10\":\n train_loader, valid_loader, test_loader, n_inputs, n_outputs = data_loader_cifar10.build_cifar10_loaders(\n batch_size,\n eval_batch_size,\n validation=validation,\n augment=augment,\n num_workers=num_workers,\n reshuffle=reshuffle\n )\n elif dataset == \"cifar100\":\n train_loader, valid_loader, test_loader, n_inputs, n_outputs = data_loader_cifar100.build_cifar100_loaders(\n batch_size,\n eval_batch_size,\n validation=validation,\n augment=augment,\n num_workers=num_workers,\n reshuffle=reshuffle\n )\n else:\n raise ValueError(\"Dataset '{}' not recognized!\".format(dataset))\n \n dataloaders = {\"train\": train_loader, \"valid\": valid_loader, \"test\": test_loader}\n return dataloaders, n_inputs, n_outputs\n\n\n########################################################################################################################\n# utilites to build experiments' args parser\n########################################################################################################################\n\nimport argparse\n\nSHOW_PLOT = False\nSAVE_PLOT = True\n\nRESHUFFLE = False\nAUGMENT_TRAIN = False\n\nLEARNING_RATE = 1e-4\nBATCH_SIZE = 64\nEPOCHS = 40\n\nPLOT_FREQ = 100\n\nEVAL_FREQ = 100\n\nBACKUP = False\nBACKUP_FREQ = -1\n\nADAPT_LR = False\n\n\ndef none_or_float(value):\n if value == 'None':\n return None\n return float(value)\n\n\ndef args_exp_parameters(parser):\n ######## EXPERIMENT'S PARAMETERS ########\n\n # Dataset params\n parser.add_argument('--dataset', type=str, help='The name of the dataset to use')\n parser.add_argument('--augment', dest=\"augment\", action=\"store_true\",\n help='Augment the training set with rotated images')\n parser.set_defaults(augment=AUGMENT_TRAIN)\n parser.add_argument('--interpolation', type=int, default=2,\n help='Type of interpolation to use for data augmentation')\n \n parser.add_argument('--reshuffle', dest=\"reshuffle\", action=\"store_true\",\n help='Reshuffle train and valid splits instead of using the default split')\n parser.set_defaults(reshuffle=RESHUFFLE)\n parser.add_argument('--workers', type=int, default=8, help='Number of jobs to load the dataset')\n parser.add_argument('--time_limit', type=int, default=None, help='Maximum time limit for training (in Minutes)')\n parser.add_argument('--verbose', type=int, default=2, help='Verbose Level')\n \n # Model params\n parser.add_argument('--model', type=str, help='The name of the model to use')\n parser.add_argument('--type', type=str, default=None, help='Type of fiber for the EXP model')\n parser.add_argument('--N', type=int, help='Size of cyclic group for GCNN and maximum frequency for HNET')\n parser.add_argument('--F', type=none_or_float, default=None,\n help='Frequency cut-off: maximum frequency at radius \"r\" is \"F*r\"')\n parser.add_argument('--sigma', type=none_or_float, default=None,\n help='Width of the rings building the bases (std of the gaussian window)')\n parser.add_argument('--J', type=int, default=None,\n help='Number of additional frequencies in the interwiners of finite groups')\n parser.add_argument('--restrict', type=int, default=-1, help='Layer where to restrict SFCNN from E(2) to SE(2)')\n parser.add_argument('--sgsize', type=int, default=None,\n help='Number of rotations in the subgroup to restrict to in the EXP e2sfcnn models')\n parser.add_argument('--flip', dest=\"flip\", action=\"store_true\",\n help='Use also reflection equivariance in the EXP model')\n parser.set_defaults(flip=False)\n parser.add_argument('--fixparams', dest=\"fixparams\", action=\"store_true\",\n help='Keep the number of parameters of the model fixed by adjusting its topology')\n parser.set_defaults(fixparams=False)\n parser.add_argument('--deltaorth', dest=\"deltaorth\", action=\"store_true\",\n help='Use delta orthogonal initialization in conv layers')\n parser.set_defaults(deltaorth=False)\n parser.add_argument('--antialias', type=float, default=0.,\n help='Std for the gaussian blur in the max-pool layer. If zero (default), standard maxpooling is performed')\n \n # Training params\n parser.add_argument('--batch_size', type=int, default=BATCH_SIZE,\n help='Number of examples to process in a batch training')\n parser.add_argument('--eval_batch_size', type=int, default=None,\n help='Number of examples to process in a batch during test. By default (None), it is the same as batch_size.')\n parser.add_argument('--lr', type=float, default=LEARNING_RATE, help='Learning rate')\n parser.add_argument('--epochs', type=int, default=EPOCHS, help='Number of training epochs')\n parser.add_argument('--steps_per_epoch', type=int, default=-1,\n help='Number of batches to process during each epoch. By default, it processes all batches')\n parser.add_argument('--no_earlystop', dest=\"earlystop\", action=\"store_false\",\n help=\"Don't split the training set to build a validation set for early stopping but train on the union of validation and training set\")\n parser.set_defaults(earlystop=True)\n parser.add_argument('--valid_metric', type=str, default=\"accuracy\",\n help='Metric on the validation set to use for early stopping')\n parser.add_argument('--eval_test', dest=\"eval_test\", action=\"store_true\",\n help=\"Evaluate and logs model's performance also on the test set when doing validation during training (not used for early stopping)\")\n parser.set_defaults(eval_test=False)\n parser.add_argument('--accumulate', type=int, default=1,\n help='During training, accumulate the gradinet of a number of batches before optimizing the model. Useful for large models when a single batch does not fit in memory. (By default, only 1 batch is accumulated)')\n \n parser.add_argument('--optimizer', type=str, default=\"sfcnn\", choices=[\"sfcnn\", \"SGD\", \"Adam\"],\n help='Optimize to use')\n parser.add_argument('--momentum', type=float, default=0.9, help='Momentum (used only if optimizer = SGD)')\n parser.add_argument('--weight_decay', type=float, default=0.0005,\n help='Weight Decay (used only if optimizer = SGD or Adam)')\n \n parser.add_argument('--adapt_lr', type=str, default=None,\n help='Adaptive learning rate scheduler to sue (default = none). '\n 'Available choices: \"exponential\" or \"validation\" (the last one requires earlystop)')\n parser.add_argument('--lr_decay_start', type=int, default=15, help='Starting epoch for the adaptive lr scheduler')\n parser.add_argument('--lr_decay_factor', type=float, default=0.8, help='Decay factor for the adaptive lr scheduler')\n parser.add_argument('--lr_decay_epoch', type=int, default=1,\n help='Period (in number of epochs) for the adaptive lr scheduler')\n parser.add_argument('--lr_decay_schedule', type=int, nargs='+', default=None,\n help='Epochs when lr should be decayed for the adaptive lr scheduler '\n '(alternative for lr_decay_epoch)')\n \n # Regularization params\n parser.add_argument('--l1', dest=\"l1\", action=\"store_true\",\n help=\"Use L1L2 regularization as in SFCNN paper\")\n parser.set_defaults(l1=False)\n \n parser.add_argument('--lamb_conv_L1',\n type=float,\n default=1e-7,\n help='gain of L1 loss for steerable layer variables')\n parser.add_argument('--lamb_conv_L2',\n type=float,\n default=1e-7,\n help='gain of L2 loss for steerable layer variables')\n parser.add_argument('--lamb_fully_L1',\n type=float,\n default=1e-8,\n help='gain of L1 loss for fully connected layer variables')\n parser.add_argument('--lamb_fully_L2',\n type=float,\n default=1e-8,\n help='gain of L2 loss for fully connected layer variables')\n parser.add_argument('--lamb_softmax_L2',\n type=float,\n default=0,\n help='gain of L2 loss for fully connected layer variables')\n parser.add_argument('--lamb_bn_L1',\n type=float,\n default=0,\n help='gain of L1 loss for batchnorm weights')\n parser.add_argument('--lamb_bn_L2',\n type=float,\n default=0,\n help='gain of L2 loss for batchnorm weights')\n \n # Other experiment's parameters\n parser.add_argument('--eval_frequency', type=int, default=EVAL_FREQ,\n help='Evaluation frequency (counts iterations if positive, epochs if negative. Use -1 to evaluate at the end of each epoch)')\n \n parser.add_argument('--backup_model', dest=\"backup_model\", action=\"store_true\", help='Backup the model in a file')\n parser.set_defaults(backup_model=BACKUP)\n \n parser.add_argument('--plot_frequency', type=int, default=PLOT_FREQ,\n help='Plot frequency (counts iterations if positive, epochs if negative. Use -1 to plot at the end of each epoch)')\n parser.add_argument('--store_plot', dest=\"store_plot\", action=\"store_true\",\n help=\"Store the plot\")\n parser.set_defaults(store_plot=False)\n parser.add_argument('--show', dest=\"show\", action=\"store_true\",\n help=\"Show the plot during training\")\n parser.set_defaults(show=False)\n \n parser.add_argument('--backup_frequency', type=int, default=BACKUP_FREQ,\n help='Backup frequency (counts iterations if positive, epochs if negative. Use -1 to backup at the end of each epoch)')\n \n return parser\n\n"
] |
[
[
"matplotlib.use",
"matplotlib.pyplot.draw",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.subplots"
],
[
"numpy.random.randint",
"numpy.load",
"numpy.save",
"pandas.read_sql_query"
]
] |
Abdiel-EMT/segnet
|
[
"474a68079000a85d1e62ad9723d316074bb1eb8d"
] |
[
"segnet/models/multiresunet.py"
] |
[
"from tensorflow import keras as K\n\n\ndef conv2d(x, filters, shape, padding=\"same\", strides=(1, 1), activation=\"relu\"):\n \"\"\"\n 2D Convolutional layers with Batch Normalization\n \n Arguments:\n x: Keras layer, the input to the feature map\n filters: Int representing the number of filters to use\n shape: Tuple with two integer values (number of rows, number of columns)\n padding: String that determines the padding mode\n strides: Tuple of two integer values that represent the strides\n activation: String that defines the activation function\n \n Returns:\n x: A Keras layer\n \"\"\"\n\n x = K.layers.Conv2D(\n filters, shape, strides=strides, padding=padding, use_bias=False\n )(x)\n x = K.layers.BatchNormalization(scale=False)(x)\n\n if activation is None:\n return x\n\n x = K.layers.Activation(activation)(x)\n\n return x\n\n\ndef MultiResBlock(u_val, input, alpha=1.67):\n \"\"\"\n MultiRes Block, as defined in the paper. Alpha is a parameter that controls\n the number of parameters in the block.\n \n Arguments:\n U: Integer value for the number of filters.\n input: A Keras layer.\n \n Returns:\n out: A Keras layer.\n \"\"\"\n # Calculate the value of W as defined in the paper.\n weight = u_val * alpha\n # The first 1x1 map, to preserve dimensions\n dimension_conservation = conv2d(\n input,\n int(weight * 0.167) + int(weight * 0.333) + int(weight * 0.5),\n (1, 1),\n activation=None,\n padding=\"same\",\n )\n # First 3x3 map, adjusted with W / 6\n conv3x3 = conv2d(\n input, int(weight * 0.167), (3, 3), activation=\"relu\", padding=\"same\"\n )\n # Second 3x3 map, adjusted with W / 3\n conv5x5 = conv2d(\n conv3x3, int(weight * 0.333), (3, 3), activation=\"relu\", padding=\"same\"\n )\n # Third 3x3 map, adjusted with W / 2\n conv7x7 = conv2d(\n conv5x5, int(weight * 0.5), (3, 3), activation=\"relu\", padding=\"same\"\n )\n # Concatenate all three 3x3 maps\n out = K.layers.Concatenate()([conv3x3, conv5x5, conv7x7])\n out = K.layers.BatchNormalization()(out)\n # And add the new 7x7 map with the 1x1 map, batch normalized\n out = K.layers.add([dimension_conservation, out])\n out = K.layers.Activation(\"relu\")(out)\n out = K.layers.BatchNormalization()(out)\n\n return out\n\n\ndef ResPath(filters, input, length=None):\n \"\"\"\n ResPath, to mitigate the semantic gap in the architecture.\n This function creates a path with just one combination of residual\n and feature maps, and this can easily be extended with the length\n argument.\n \n Arguments:\n filters: Integer value corresponding to the number of filters.\n length: Integer value with the length of the path, number of maps.\n input: Keras layer.\n \n Returns:\n out: Keras layer.\n \"\"\"\n # First residual connection\n residual = conv2d(input, filters, (1, 1), activation=None, padding=\"same\")\n # And first feature map\n out = conv2d(input, filters, (3, 3), activation=\"relu\", padding=\"same\")\n # Add the layers and batch normalize\n out = K.layers.add([residual, out])\n out = K.layers.Activation(\"relu\")(out)\n out = K.layers.BatchNormalization()(out)\n # If there is more maps to add, we add them with this loop\n if not length is None:\n for _ in range(length - 1):\n\n residual = out\n residual = conv2d(\n residual, filters, (1, 1), activation=None, padding=\"same\"\n )\n\n out = conv2d(out, filters, (3, 3), activation=\"relu\", padding=\"same\")\n\n out = K.layers.add([residual, out])\n out = K.layers.Activation(\"relu\")(out)\n out = K.layers.BatchNormalization()(out)\n\n return out\n\n\ndef MultiResUnet(input_size=(256, 256, 3)):\n \"\"\"\n A TensorFlow implementation of the MultiResUNet architecture as defined in the\n following paper:\n https://arxiv.org/abs/1902.04049\n \n This is a variant of the U-Net, with additional blocks and paths to help mitigate\n semantic gaps and to obtain better characteristics from the images and maps.\n \n Arguments:\n input_size: Tuple of three integers (height, width, number of channels) that\n describe the input images.\n \n Returns:\n model: A Keras model instance.\n \"\"\"\n\n inputs = K.layers.Input((input_size))\n\n mresblock_1 = MultiResBlock(32, inputs)\n pool_1 = K.layers.MaxPooling2D(pool_size=(2, 2))(mresblock_1)\n mresblock_1 = ResPath(32, mresblock_1, 4)\n\n mresblock_2 = MultiResBlock(64, pool_1)\n pool_2 = K.layers.MaxPooling2D(pool_size=(2, 2))(mresblock_2)\n mresblock_2 = ResPath(64, mresblock_2, 3)\n\n mresblock_3 = MultiResBlock(128, pool_2)\n pool_3 = K.layers.MaxPooling2D(pool_size=(2, 2))(mresblock_3)\n mresblock_3 = ResPath(128, mresblock_3, 2)\n\n mresblock_4 = MultiResBlock(256, pool_3)\n pool_4 = K.layers.MaxPooling2D(pool_size=(2, 2))(mresblock_4)\n mresblock_4 = ResPath(256, mresblock_4)\n\n mresblock5 = MultiResBlock(512, pool_4)\n\n up_6 = K.layers.Conv2DTranspose(256, (2, 2), strides=(2, 2), padding=\"same\")(\n mresblock5\n )\n up_6 = K.layers.Concatenate()([up_6, mresblock_4])\n mresblock_6 = MultiResBlock(256, up_6)\n\n up_7 = K.layers.Conv2DTranspose(128, (2, 2), strides=(2, 2), padding=\"same\")(\n mresblock_6\n )\n up_7 = K.layers.Concatenate()([up_7, mresblock_3])\n mresblock7 = MultiResBlock(128, up_7)\n\n up_8 = K.layers.Conv2DTranspose(64, (2, 2), strides=(2, 2), padding=\"same\")(\n mresblock7\n )\n up_8 = K.layers.Concatenate()([up_8, mresblock_2])\n mresblock8 = MultiResBlock(64, up_8)\n\n up_9 = K.layers.Conv2DTranspose(32, (2, 2), strides=(2, 2), padding=\"same\")(\n mresblock8\n )\n up_9 = K.layers.Concatenate()([up_9, mresblock_1])\n mresblock9 = MultiResBlock(32, up_9)\n\n conv_10 = conv2d(mresblock9, 1, (1, 1), activation=\"sigmoid\")\n\n model = K.models.Model(inputs=[inputs], outputs=[conv_10])\n\n return model\n\n"
] |
[
[
"tensorflow.keras.layers.add",
"tensorflow.keras.layers.Input",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Conv2DTranspose",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.Concatenate"
]
] |
iankhr/armagarch
|
[
"5d292b54cde992cca47024aaeb8d4120f0665a7d"
] |
[
"armagarch/tStudent.py"
] |
[
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jun 26 12:46:36 2020\r\nThis class defines t-Student distribution for ARMA-GARCH models\r\n\r\n@author: Ian Khrashchevskyi\r\n\"\"\"\r\n\r\nfrom .distModel import DistModel\r\nimport numpy as np\r\nimport scipy.stats as stats\r\nfrom scipy.special import gamma\r\n\r\nclass tStudent(DistModel):\r\n \"\"\"\r\n INPUT:\r\n data - innovations\r\n params -dict with mean and Var and other as a parameter nu\r\n \"\"\"\r\n def _giveName(self):\r\n if self._params is None:\r\n self._params = {'Mean':0,'Var':1, 'other':3}\r\n \r\n self._name = 'Student'\r\n self._startingValues = 3\r\n self._varnames = ['nu']\r\n \r\n \r\n def _setConstraints(self, data=None):\r\n self._constraints = {'Mean':[(-np.Inf, np.inf),], 'Var':[(0,np.inf),],\\\r\n 'other':[(3,np.Inf),]}\r\n \r\n \r\n def lls(self, data =None, params = None):\r\n if data is None:\r\n data = self._data\r\n \r\n if params is None:\r\n params = self._params\r\n \r\n mu = params['Mean']\r\n var = params['Var']\r\n nu = params['other']\r\n ells = np.log(gamma((nu+1)/2)/(np.sqrt(np.pi*(nu-2))*gamma(nu/2)))\\\r\n - 0.5*np.log(var.values) \\\r\n - (nu+1)/2*np.log(1+(data.values-mu)**2/(var.values*(nu-2))) \r\n return -ells\r\n\r\n\r\n def simulate(self, nobs= 1):\r\n \"\"\"\r\n Use built in simulator for now\r\n \"\"\"\r\n return stats.t.rvs(df = self._params['other'],\\\r\n loc = self._params['Mean'],\\\r\n scale = self._params['Var'],\\\r\n size = nobs)"
] |
[
[
"scipy.special.gamma",
"numpy.sqrt",
"numpy.log",
"scipy.stats.t.rvs"
]
] |
ishine/sudo_rm_rf
|
[
"ec3fae1e2c9d85710f933a600f3ab93f92468dee"
] |
[
"sudo_rm_rf/dnn/experiments/run_fuss_separation.py"
] |
[
"\"\"\"!\n@brief Running an experiment with the improved version of SuDoRmRf on\nuniversal source separation with multiple sources.\n\n@author Efthymios Tzinis {[email protected]}\n@copyright University of Illinois at Urbana-Champaign\n\"\"\"\n\nimport os\nimport sys\ncurrent_dir = os.path.dirname(os.path.abspath('__file__'))\nroot_dir = os.path.abspath(os.path.join(current_dir, '../../../'))\nsys.path.append(root_dir)\n\nfrom __config__ import API_KEY\nfrom comet_ml import Experiment, OfflineExperiment\n\nimport torch\nfrom torch.nn import functional as F\nfrom tqdm import tqdm\nfrom pprint import pprint\nimport sudo_rm_rf.dnn.experiments.utils.improved_cmd_args_parser_v2 as parser\nimport sudo_rm_rf.dnn.experiments.utils.mixture_consistency \\\n as mixture_consistency\nimport sudo_rm_rf.dnn.experiments.utils.dataset_setup as dataset_setup\nimport sudo_rm_rf.dnn.losses.sisdr as sisdr_lib\nimport sudo_rm_rf.dnn.losses.snr as snr_lib\nimport sudo_rm_rf.dnn.losses.norm as norm_lib\nimport sudo_rm_rf.dnn.models.improved_sudormrf as improved_sudormrf\nimport sudo_rm_rf.dnn.models.groupcomm_sudormrf_v2 as sudormrf_gc_v2\nimport sudo_rm_rf.dnn.models.causal_improved_sudormrf_v3 as \\\n causal_improved_sudormrf\nimport sudo_rm_rf.dnn.models.sudormrf as initial_sudormrf\nimport sudo_rm_rf.dnn.utils.cometml_loss_report as cometml_report\nimport sudo_rm_rf.dnn.utils.cometml_log_audio as cometml_audio_logger\nimport sudo_rm_rf.dnn.utils.log_audio as offline_audio_logger\n\n# torch.backends.cudnn.enabled = False\nargs = parser.get_args()\nhparams = vars(args)\ngenerators = dataset_setup.setup(hparams)\n# Hardcode n_sources for all the experiments with musdb\nassert hparams['n_channels'] == 1, 'Mono source separation is available for now'\n\naudio_loggers = dict(\n [(n_src,\n cometml_audio_logger.AudioLogger(fs=hparams[\"fs\"],\n bs=1,\n n_sources=n_src))\n for n_src in range(1, hparams['max_num_sources'] + 1)])\n\n# offline_savedir = os.path.join('/home/thymios/offline_exps',\n# hparams[\"project_name\"],\n# '_'.join(hparams['cometml_tags']))\n# if not os.path.exists(offline_savedir):\n# os.makedirs(offline_savedir)\n# audio_logger = offline_audio_logger.AudioLogger(dirpath=offline_savedir,\n# fs=hparams[\"fs\"], bs=hparams[\"batch_size\"], n_sources=4)\n\n# Hardcode the test generator for each one of the number of sources\nfor n_src in range(hparams['min_num_sources'], hparams['max_num_sources']+1):\n for split_name in ['val', 'test']:\n loader = dataset_setup.create_loader_for_simple_dataset(\n dataset_name='FUSS',\n separation_task=hparams['separation_task'],\n data_split=split_name, sample_rate=hparams['fs'],\n n_channels=hparams['n_channels'], min_or_max=hparams['min_or_max'],\n zero_pad=hparams['zero_pad_audio'],\n timelegth=hparams['audio_timelength'],\n normalize_audio=hparams['normalize_audio'],\n n_samples=0, min_num_sources=n_src, max_num_sources=n_src)\n\n gen_name = '{}_{}_srcs'.format(split_name, n_src)\n generators[gen_name] = loader.get_generator(\n batch_size=hparams['batch_size'], num_workers=hparams['n_jobs'])\n\n# experiment = OfflineExperiment(API_KEY, offline_directory=offline_savedir)\nexperiment = Experiment(API_KEY, project_name=hparams['project_name'])\nexperiment.log_parameters(hparams)\nexperiment_name = '_'.join(hparams['cometml_tags'])\nfor tag in hparams['cometml_tags']:\n experiment.add_tag(tag)\nif hparams['experiment_name'] is not None:\n experiment.set_name(hparams['experiment_name'])\nelse:\n experiment.set_name(experiment_name)\n\nos.environ['CUDA_VISIBLE_DEVICES'] = ','.join(\n [cad for cad in hparams['cuda_available_devices']])\n\nback_loss_tr_loss_name, back_loss_tr_loss = (\n 'tr_back_loss_SNR',\n # norm_lib.L1(return_individual_results=False)\n # norm_lib.PermInvariantL1(n_sources=hparams[\"n_sources\"],\n # weighted_norm=True)\n # 'tr_back_loss_SISDRi',\n snr_lib.PermInvariantSNRwithZeroRefs(\n n_sources=hparams[\"max_num_sources\"],\n zero_mean=False,\n backward_loss=True,\n inactivity_threshold=-40.)\n)\n\nval_losses = {}\nall_losses = []\nfor val_set in [x for x in generators if not x == 'train']:\n if generators[val_set] is None:\n continue\n\n n_actual_sources = int(val_set.split('_')[1])\n if n_actual_sources == 1:\n single_source = False\n improvement = False\n metric_name = 'SISDR'\n n_estimated_sources = 1\n else:\n single_source = False\n improvement = True\n n_estimated_sources = hparams['max_num_sources']\n metric_name = 'SISDRi'\n val_losses[val_set] = {}\n all_losses.append(val_set + '_{}'.format(metric_name))\n val_losses[val_set][val_set + '_{}'.format(metric_name)] = \\\n sisdr_lib.StabilizedPermInvSISDRMetric(\n zero_mean=True,\n single_source=single_source,\n n_estimated_sources=n_estimated_sources,\n n_actual_sources=n_actual_sources,\n backward_loss=False,\n improvement=improvement,\n return_individual_results=True)\nall_losses.append(back_loss_tr_loss_name)\n\nif hparams['model_type'] == 'relu':\n model = improved_sudormrf.SuDORMRF(out_channels=hparams['out_channels'],\n in_channels=hparams['in_channels'],\n num_blocks=hparams['num_blocks'],\n upsampling_depth=hparams['upsampling_depth'],\n enc_kernel_size=hparams['enc_kernel_size'],\n enc_num_basis=hparams['enc_num_basis'],\n num_sources=hparams['max_num_sources'])\nelif hparams['model_type'] == 'causal':\n model = causal_improved_sudormrf.CausalSuDORMRF(\n in_audio_channels=1,\n out_channels=hparams['out_channels'],\n in_channels=hparams['in_channels'],\n num_blocks=hparams['num_blocks'],\n upsampling_depth=hparams['upsampling_depth'],\n enc_kernel_size=hparams['enc_kernel_size'],\n enc_num_basis=hparams['enc_num_basis'],\n num_sources=hparams['max_num_sources'])\nelif hparams['model_type'] == 'softmax':\n model = initial_sudormrf.SuDORMRF(out_channels=hparams['out_channels'],\n in_channels=hparams['in_channels'],\n num_blocks=hparams['num_blocks'],\n upsampling_depth=hparams['upsampling_depth'],\n enc_kernel_size=hparams['enc_kernel_size'],\n enc_num_basis=hparams['enc_num_basis'],\n num_sources=hparams['max_num_sources'])\nelif hparams['model_type'] == 'groupcomm_v2':\n model = sudormrf_gc_v2.GroupCommSudoRmRf(\n in_audio_channels=hparams['n_channels'],\n out_channels=hparams['out_channels'],\n in_channels=hparams['in_channels'],\n num_blocks=hparams['num_blocks'],\n upsampling_depth=hparams['upsampling_depth'],\n enc_kernel_size=hparams['enc_kernel_size'],\n enc_num_basis=hparams['enc_num_basis'],\n num_sources=hparams['max_num_sources'],\n group_size=16)\nelse:\n raise ValueError('Invalid model: {}.'.format(hparams['model_type']))\n\nnumparams = 0\nfor f in model.parameters():\n if f.requires_grad:\n numparams += f.numel()\nexperiment.log_parameter('Parameters', numparams)\nprint('Trainable Parameters: {}'.format(numparams))\n\nmodel = torch.nn.DataParallel(model).cuda()\nopt = torch.optim.Adam(model.parameters(), lr=hparams['learning_rate'])\n# lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n# optimizer=opt, mode='max', factor=1. / hparams['divide_lr_by'],\n# patience=hparams['patience'], verbose=True)\n\n\ndef normalize_tensor_wav(wav_tensor, eps=1e-8, std=None):\n mean = wav_tensor.mean(-1, keepdim=True)\n if std is None:\n std = wav_tensor.std(-1, keepdim=True)\n return (wav_tensor - mean) / (std + eps)\n\n\ndef online_augment(clean_sources):\n # clean_sources: (batch, n_sources, time)\n # Online mixing over samples of the batch. (This might cause to get\n # mixtures from the same type of sound but it's highly improbable).\n # Keep the exact same SNR distribution with the initial mixtures.\n n_sources = clean_sources.shape[1]\n batch_size = clean_sources.shape[0]\n\n initial_biases = torch.mean(clean_sources, dim=-1, keepdim=True)\n initial_energies = torch.std(clean_sources, dim=-1, keepdim=True)\n\n augmented_wavs_l = []\n for i in range(n_sources):\n augmented_wavs_l.append(clean_sources[torch.randperm(batch_size), i])\n augmented_wavs = torch.stack(augmented_wavs_l, 1)\n # augmented_wavs = normalize_tensor_wav(augmented_wavs)\n # augmented_wavs = (augmented_wavs * initial_energies) + initial_biases\n augmented_wavs = augmented_wavs[:, torch.randperm(n_sources)]\n augmented_wavs *= (torch.rand(batch_size, n_sources).unsqueeze(-1) + 0.5)\n\n return augmented_wavs\n\n\ntr_step = 0\nval_step = 0\nprev_epoch_val_loss = 0.\nfor i in range(hparams['n_epochs']):\n res_dic = {}\n for loss_name in all_losses:\n res_dic[loss_name] = {'mean': 0., 'std': 0., 'median': 0., 'acc': []}\n print(\"FUSS Sudo-RM-RF: {} - {} || Epoch: {}/{}\".format(\n experiment.get_key(), experiment.get_tags(), i+1, hparams['n_epochs']))\n model.train()\n\n sum_loss = 0.\n train_tqdm_gen = tqdm(generators['train'], desc='Training')\n for cnt, data in enumerate(train_tqdm_gen):\n opt.zero_grad()\n # data shape: (batch, n_sources, time_samples)\n clean_wavs = online_augment(data)\n clean_wavs = clean_wavs.cuda()\n\n input_mixture = torch.sum(clean_wavs, -2, keepdim=True)\n # input_mixture = normalize_tensor_wav(input_mixture)\n\n input_mix_std = input_mixture.std(-1, keepdim=True)\n input_mix_mean = input_mixture.mean(-1, keepdim=True)\n input_mixture = (input_mixture - input_mix_mean) / (\n input_mix_std + 1e-9)\n\n # input_mix_std = input_mixture.std(-1, keepdim=True)\n # input_mix_mean = input_mixture.mean(-1, keepdim=True)\n # input_mixture = (input_mixture - input_mix_mean) / (input_mix_std + 1e-9)\n # clean_wavs = normalize_tensor_wav(clean_wavs, std=input_mix_std)\n\n rec_sources_wavs = model(input_mixture)\n # rec_sources_wavs = (rec_sources_wavs * input_mix_std) + input_mix_mean\n rec_sources_wavs = mixture_consistency.apply(rec_sources_wavs,\n input_mixture)\n\n # l = back_loss_tr_loss(normalize_tensor_wav(rec_sources_wavs),\n # normalize_tensor_wav(clean_wavs))\n l = back_loss_tr_loss(rec_sources_wavs,\n clean_wavs)\n l.backward()\n\n if hparams['clip_grad_norm'] > 0:\n torch.nn.utils.clip_grad_norm_(model.parameters(),\n hparams['clip_grad_norm'])\n\n opt.step()\n sum_loss += l.detach().item()\n train_tqdm_gen.set_description(\n \"Training, Running Avg Loss: {}\".format(sum_loss / (cnt + 1)))\n\n if hparams['patience'] > 0:\n if tr_step % hparams['patience'] == 0:\n new_lr = (hparams['learning_rate']\n / (hparams['divide_lr_by'] ** (tr_step // hparams['patience'])))\n print('Reducing Learning rate to: {}'.format(new_lr))\n for param_group in opt.param_groups:\n param_group['lr'] = new_lr\n tr_step += 1\n\n for val_set in [x for x in generators if not x == 'train']:\n if generators[val_set] is not None:\n n_actual_sources = int(val_set.split('_')[1])\n model.eval()\n n_songs_written = 10\n with torch.no_grad():\n for data in tqdm(generators[val_set],\n desc='Validation on {}'.format(val_set)):\n clean_wavs = data.cuda()\n input_mixture = torch.sum(clean_wavs, -2, keepdim=True)\n # input_mixture = normalize_tensor_wav(input_mixture)\n input_mix_std = input_mixture.std(-1, keepdim=True)\n input_mix_mean = input_mixture.mean(-1, keepdim=True)\n input_mixture = (input_mixture - input_mix_mean) / (\n input_mix_std + 1e-9)\n\n rec_sources_wavs = model(input_mixture)\n # rec_sources_wavs = (rec_sources_wavs * input_mix_std) + input_mix_mean\n rec_sources_wavs = mixture_consistency.apply(\n rec_sources_wavs,\n input_mixture)\n\n for loss_name, loss_func in val_losses[val_set].items():\n # l, best_perm = loss_func(\n # normalize_tensor_wav(rec_sources_wavs),\n # normalize_tensor_wav(clean_wavs),\n # return_best_permutation=True)\n l, best_perm = loss_func(\n rec_sources_wavs,\n clean_wavs,\n return_best_permutation=True)\n res_dic[loss_name]['acc'] += l.tolist()\n\n audio_loggers[n_actual_sources].log_batch(\n rec_sources_wavs[:, best_perm.long().cuda()][0, 0].unsqueeze(0),\n clean_wavs[0].unsqueeze(0),\n input_mixture[0].unsqueeze(0),\n experiment, step=val_step, tag=val_set)\n\n val_step += 1\n\n res_dic = cometml_report.report_losses_mean_and_std(res_dic,\n experiment,\n tr_step,\n val_step)\n\n for loss_name in res_dic:\n res_dic[loss_name]['acc'] = []\n pprint(res_dic)\n"
] |
[
[
"torch.rand",
"torch.stack",
"torch.no_grad",
"torch.std",
"torch.randperm",
"torch.mean",
"torch.nn.DataParallel",
"torch.sum"
]
] |
niekh-13/Textmining_NPA-reports
|
[
"0453f2f12e7d0745ac59076a1d255f4de79fc85c"
] |
[
"util/outlier.py"
] |
[
"##############################################################\n# #\n# Niek Huijsmansen #\n# Textmining medical notes for cognition #\n# Outlier Z score #\n# #\n##############################################################\n\nimport scipy\nimport math\nimport numpy as np\n\n# Class for outlier detection algorithms based on some distribution of the data. They\n# all consider only single points per row (i.e. one column).\nclass OutlierDetection:\n\n # Finds outliers in the specified columns of datatable and removes outliers\n def chauvenet(self, data_table, col):\n # take only the column\n data = data_table[col]\n #remove nans\n data.dropna(inplace=True)\n # Taken partly from: https://www.astro.rug.nl/software/kapteyn/\n # Computer the mean and standard deviation.\n mean = data.mean()\n std = data.std()\n N = len(data.index)\n criterion = 1.0 / (2 * N)\n\n # Consider the deviation for the data points.\n deviation = abs(data - mean) / std\n\n # Express the upper and lower bounds.\n low = -deviation / math.sqrt(2)\n high = deviation / math.sqrt(2)\n mask = []\n\n # Pass all rows in the dataset.\n for i in data.index.tolist():\n # Determine the probability of observing the point\n prob = 1.0 - 0.5 * (scipy.special.erf(high.loc[i]) - scipy.special.erf(low.loc[i]))\n # And mark as an outlier when the probability is below our criterion.\n if prob < criterion:\n mask.append(i)\n else:\n continue\n print(data_table.loc[mask, col])\n data_table.loc[mask, col] = np.nan\n return data_table\n\n\n\n"
] |
[
[
"scipy.special.erf"
]
] |
knit-pk/AI-Section-2017
|
[
"a744b130defe58050264a37d88732af66ecabf40"
] |
[
"SARSA/SARSA.py"
] |
[
"'''\nExample implementation of SARSA algorithm for learning the path through frozen lake.\nThe is_slippery flag lets us change the rules of the game, if True the probability of\nchanging the chosen direction is 4 out of 10.\n'''\n\nimport gym\nimport numpy as np\nimport time\nimport pygame\n\n\nclass Game:\n stan = 0;\n\n def __init__(self, field):\n self.field = field\n\n def step(self, action):\n reward = -0.04\n done = False\n info = False\n\n if (action == 0) and ((self.stan % 4) != 0):\n self.stan -= 1\n if (action == 1) and (self.stan < 12):\n self.stan += 4\n if (action == 2) and ((self.stan % 4) != 3):\n self.stan += 1\n if (action == 3) and (self.stan > 3):\n self.stan -= 4\n\n if self.field[self.stan] == 'H':\n reward = -5\n done = True\n\n if self.field[self.stan] == 'G':\n reward = 1\n done = True\n\n return self.stan, reward, done, info;\n\n def reset(self):\n self.stan = 0\n return self.stan;\n\n\ndef drawGridWorld(Q, field, player, action):\n # Grid world init\n pygame.init()\n font = pygame.font.SysFont(\"monospace\", 30, True)\n surface = pygame.display.set_mode((860, 860)) # width x height\n pygame.display.set_caption('GridWorld')\n sleep_time = 0.02;\n\n surface.fill((0, 0, 0))\n wiersz = 0\n kolumna = 0\n offset = 10\n size = 200\n # print(action)\n for pole in range(len(Q)): # Y # pola pionowo\n if pole != 0 and (pole % len(Q[0]) == 0):\n wiersz += 1\n kolumna = 0\n x_cord = offset + offset * kolumna + kolumna * size\n y_cord = offset + offset * wiersz + wiersz * size\n # Field\n field_color = (189, 189, 189)\n if field[pole] == 'H':\n field_color = (33, 33, 33)\n if field[pole] == 'S':\n field_color = (255, 179, 0)\n if field[pole] == 'G':\n field_color = (118, 255, 3)\n pygame.draw.rect(surface, field_color, (x_cord, y_cord, size, size))\n # Player\n if pole == player:\n field_color = (3, 169, 244)\n pygame.draw.circle(surface, field_color, (\n int(round(x_cord + size / 2)), int(round(y_cord + size / 2))),\n int(round(size / 2)))\n if action == 0:\n move_action = font.render(\"<\", False, (255, 0, 0))\n if action == 1:\n move_action = font.render(\"\\/\", False, (255, 0, 0))\n if action == 2:\n move_action = font.render(\">\", False, (255, 0, 0))\n if action == 3:\n move_action = font.render(\"/\\\\\", False, (255, 0, 0))\n\n surface.blit(move_action, (0, 0))\n # QMatrix\n\n color = (255, 255, 255)\n\n best = Q[pole].argmax()\n for i in range(4):\n # print(best)\n if i == best:\n color = (255, 0, 0)\n x_label_cord = 0\n y_label_cord = 0\n if i == 0: # left\n x_label_cord = x_cord\n y_label_cord = y_cord\n direction = 'left'\n # color = (0, 0, 255) # blue\n\n if i == 1: # down\n x_label_cord = x_cord\n y_label_cord = y_cord + size / 4\n direction = 'down'\n # color = (0, 255, 0) # green\n\n if i == 2: # right\n x_label_cord = x_cord\n y_label_cord = y_cord + size / 4 * 2\n direction = 'right'\n # color = (0, 255, 255) # green blue\n\n if i == 3: # up\n x_label_cord = x_cord\n y_label_cord = y_cord + size / 2 + size / 4\n direction = 'up'\n # color = (255, 0, 0) # red\n\n label = font.render(\"{}:{}\".format(direction, round(Q[pole][i], 3)), False,\n color)\n surface.blit(label, (x_label_cord, y_label_cord))\n kolumna += 1\n pygame.display.update()\n time.sleep(sleep_time)\n\n\ndef learn(is_slippery):\n if is_slippery:\n env = gym.make('FrozenLake-v0')\n Q = np.zeros([env.observation_space.n, env.action_space.n])\n else:\n field = ['S', 'F', 'F', 'F',\n 'F', 'H', 'F', 'H',\n 'F', 'F', 'F', 'H',\n 'H', 'F', 'F', 'G'\n ]\n env = Game(field)\n Q = np.zeros([16, 4])\n\n a = .8 # alpha\n y = .95 # gamma\n num_episodes = 2000\n\n for i in range(num_episodes):\n\n current_state = env.reset()\n current_action = np.argmax(Q[current_state, :])\n for j in range(100):\n\n next_state, reward, done, _ = env.step(current_action)\n\n if is_slippery:\n next_action = np.argmax(\n Q[next_state, :] + np.random.randn(1, env.action_space.n) * (\n 1. / (i + 1)))\n else:\n next_action = np.argmax(Q[next_state, :] + np.random.randn(1, 4) * (\n 1. / (i + 1)))\n\n Q[current_state, current_action] += a * (\n reward + y * Q[next_state, next_action] - Q[\n current_state, current_action])\n\n current_state = next_state\n current_action = next_action\n\n if done == True:\n break\n\n return Q\n\n\ndef play(inQ, is_slippery):\n field = ['S', 'F', 'F', 'F',\n 'F', 'H', 'F', 'H',\n 'F', 'F', 'F', 'H',\n 'H', 'F', 'F', 'G'\n ]\n\n if is_slippery:\n env = gym.make('FrozenLake-v0')\n else:\n env = Game(field)\n\n num_episodes = 2000\n Q = inQ\n rList = [] # reward list\n\n for i in range(num_episodes):\n total_reward = 0\n\n state = env.reset()\n\n drawGridWorld(Q, field, state, 0)\n\n action = np.argmax(Q[state, :])\n for j in range(100):\n\n drawGridWorld(Q, field, state, action)\n\n state, reward, done, _ = env.step(action)\n\n action = np.argmax(Q[state, :])\n\n total_reward += reward\n\n if done == True:\n break\n rList.append(total_reward)\n\n print(\"Score over time: \" + str(sum(rList) / num_episodes))\n\n\nif __name__ == '__main__':\n is_slippery = False\n Q = learn(is_slippery)\n play(Q, is_slippery)\n"
] |
[
[
"numpy.random.randn",
"numpy.argmax",
"numpy.zeros"
]
] |
RCheese/gumpy
|
[
"c5d602122bef87827dae4abeace6c867c65eb1cb"
] |
[
"gumpy/split.py"
] |
[
"import sklearn.model_selection\nfrom sklearn.model_selection import (ShuffleSplit, StratifiedKFold,\n StratifiedShuffleSplit)\n\n\ndef normal(X, labels, test_size):\n \"\"\"Split a dataset into training and test parts.\n Args:\n X (numpy.ndarray): 2D features matrix \n labels: labels vector \n test_size: size of the split\n \n Returns:\n A 2D CSP features matrix \n \"\"\"\n Y = labels\n X_train, X_test, Y_train, Y_test = \\\n sklearn.model_selection.train_test_split(X, Y,\n test_size=test_size,\n random_state=0)\n return X_train, X_test, Y_train, Y_test\n\n\ndef time_series_split(features, labels, n_splits):\n \"\"\"Split a dataset into n splits.\n\n \"\"\"\n xx = sklearn.model_selection.TimeSeriesSplit(n_splits)\n for train_index, test_index in xx.split(features):\n X_train, X_test = features[train_index], features[test_index]\n y_train, y_test = labels[train_index], labels[test_index]\n\n return X_train, X_test, y_train, y_test\n\n\ndef stratified_KFold(features, labels, n_splits):\n \"\"\"Stratified K-Folds cross-validator\n Stratification is the process of rearranging the data as to ensure each fold is a good representative of the whole\n and by also keeping the balance of classes\n \"\"\"\n skf = StratifiedKFold(n_splits)\n skf.get_n_splits(features, labels)\n for train_index, test_index in skf.split(features, labels):\n X_train, X_test = features[train_index], features[test_index]\n Y_train, Y_test = labels[train_index], labels[test_index]\n return X_train, X_test, Y_train, Y_test\n\n\n# Stratified ShuffleSplit cross-validator\ndef stratified_shuffle_Split(features, labels, n_splits, test_size, random_state):\n \"\"\"Stratified ShuffleSplit cross-validator\n \"\"\"\n cv = StratifiedShuffleSplit(n_splits, test_size, random_state=random_state)\n for train_index, test_index in cv.split(features, labels):\n X_train = features[train_index]\n X_test = features[test_index]\n Y_train = labels[train_index]\n Y_test = labels[test_index]\n return X_train, X_test, Y_train, Y_test\n\n\n# Random permutation cross-validator\ndef shuffle_Split(features, labels, n_splits, test_size, random_state):\n \"\"\"ShuffleSplit: Random permutation cross-validator\n \"\"\"\n cv = ShuffleSplit(n_splits, test_size, random_state=random_state)\n for train_index, test_index in cv.split(features):\n X_train = features[train_index]\n X_test = features[test_index]\n Y_train = labels[train_index]\n Y_test = labels[test_index]\n return X_train, X_test, Y_train, Y_test\n"
] |
[
[
"sklearn.model_selection.StratifiedShuffleSplit",
"sklearn.model_selection.StratifiedKFold",
"sklearn.model_selection.ShuffleSplit"
]
] |
XiSHEN0220/SSR
|
[
"50f473b690f6c28e8c828c8ec65de7680400b011"
] |
[
"transductive_few_shot/src/dataset.py"
] |
[
"## settings of different datasets\nimport numpy as np\nimport torchvision.transforms as transforms\n\ndef dataset_setting(dataset, nSupport, nQuery=15):\n\n if 'miniImageNet' in dataset :\n mean = [x/255.0 for x in [120.39586422, 115.59361427, 104.54012653]]\n std = [x/255.0 for x in [70.68188272, 68.27635443, 72.54505529]]\n normalize = transforms.Normalize(mean=mean, std=std)\n trainTransform = transforms.Compose([transforms.RandomCrop(80, padding=8),\n transforms.RandomHorizontalFlip(),\n transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),\n lambda x: np.asarray(x),\n transforms.ToTensor(),\n normalize\n ])\n \n \n valTransform = transforms.Compose([transforms.CenterCrop(80),\n lambda x: np.asarray(x),\n transforms.ToTensor(),\n normalize])\n \n\n inputW, inputH, nbCls = 80, 80, 64\n\n trainDir = '../data/Mini-ImageNet/train/'\n valDir = '../data/Mini-ImageNet/val/'\n testDir = '../data/Mini-ImageNet/test/'\n episodeJson = '../data/Mini-ImageNet/val1000Episode_5_way_{:d}_shot_{:d}_query.json'.format(nSupport, nQuery)\n \n ## the preprocessing is the same as https://gitlab.mpi-klsb.mpg.de/yaoyaoliu/e3bm/-/blob/inductive/dataloader/tiered_imagenet.py \n elif 'tieredImageNet' in dataset :\n mean = [x / 255.0 for x in [125.3, 123.0, 113.9]]\n std = [x / 255.0 for x in [63.0, 62.1, 66.7]]\n normalize = transforms.Normalize(mean=mean, std=std)\n trainTransform = transforms.Compose([\n transforms.RandomResizedCrop(84),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize\n ])\n \n\n valTransform = transforms.Compose([ transforms.Resize([92, 92]),\n transforms.CenterCrop(84),\n transforms.ToTensor(),\n normalize])\n \n \n\n inputW, inputH, nbCls = 84, 84, 351\n\n trainDir = '../data/tiered_imagenet/train/'\n valDir = '../data/tiered_imagenet/val/'\n testDir = '../data/tiered_imagenet/test/'\n episodeJson = '../data/tiered_imagenet/val1000Episode_5_way_{:d}_shot_{:d}_query.json'.format(nSupport, nQuery)\n\n elif dataset == 'Cifar':\n mean = [x/255.0 for x in [129.37731888, 124.10583864, 112.47758569]]\n std = [x/255.0 for x in [68.20947949, 65.43124043, 70.45866994]]\n normalize = transforms.Normalize(mean=mean, std=std)\n trainTransform = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),\n lambda x: np.asarray(x),\n transforms.ToTensor(),\n normalize\n ])\n \n\n valTransform = transforms.Compose([lambda x: np.asarray(x),\n transforms.ToTensor(),\n normalize])\n \n \n inputW, inputH, nbCls = 32, 32, 64\n\n trainDir = '../data/cifar-fs/train/'\n valDir = '../data/cifar-fs/val/'\n testDir = '../data/cifar-fs/test/'\n episodeJson = '../data/cifar-fs/val1000Episode_5_way_{:d}_shot_{:d}_query.json'.format(nSupport, nQuery)\n\n else:\n raise ValueError('Do not support other datasets yet.')\n\n return trainTransform, valTransform, inputW, inputH, trainDir, valDir, testDir, episodeJson, nbCls\n"
] |
[
[
"numpy.asarray"
]
] |
jlee-ds/meshcnn
|
[
"6a3c9efa18f00786e2c71f56934d101a1895e9c2"
] |
[
"data/autoencoder_data.py"
] |
[
"import os\nimport torch\nfrom data.base_dataset import BaseDataset\nfrom util.util import is_mesh_file, pad, pad_vertices\nimport numpy as np\nfrom models.layers.mesh import Mesh\n\nclass AutoEncoderData(BaseDataset):\n\n def __init__(self, opt):\n BaseDataset.__init__(self, opt)\n self.opt = opt\n self.device = torch.device('cuda:{}'.format(opt.gpu_ids[0])) if opt.gpu_ids else torch.device('cpu')\n self.root = opt.dataroot\n self.dir = os.path.join(opt.dataroot)\n self.classes, self.class_to_idx = self.find_classes(self.dir)\n print(self.classes, self.class_to_idx)\n self.paths = self.make_dataset_by_class(self.dir, self.class_to_idx, opt.phase)\n self.nclasses = len(self.classes)\n self.size = len(self.paths)\n self.get_mean_std()\n # # modify for network later.\n opt.nclasses = self.nclasses\n opt.input_nc = self.ninput_channels\n\n def __getitem__(self, index):\n path = self.paths[index][0]\n #print(path)\n mesh = Mesh(file=path, opt=self.opt, hold_history=True, export_folder=self.opt.export_folder)\n meta = {}\n mesh.vs = (mesh.vs - np.mean(mesh.vs, 0)) / np.std(mesh.vs, 0)\n meta['mesh'] = mesh\n meta['export_folder'] = mesh.export_folder\n meta['filename'] = mesh.filename\n # get edge features\n edge_features = mesh.extract_features()\n edge_features = pad(edge_features, self.opt.ninput_edges)\n vs, pad_iter = pad_vertices(mesh.vs, 1402)\n meta['edge_features'] = (edge_features - self.mean) / self.std\n meta['label'] = vs.astype(np.float)\n meta['init_faces'] = mesh.init_faces\n meta['pad_iter'] = pad_iter\n return meta\n\n def __len__(self):\n return self.size\n\n @staticmethod\n def make_dataset(path):\n meshes = []\n assert os.path.isdir(path), '%s is not a valid directory' % path\n\n for root, _, fnames in sorted(os.walk(path)):\n for fname in fnames:\n if is_mesh_file(fname):\n path = os.path.join(root, fname)\n meshes.append(path)\n\n return meshes\n \n @staticmethod \n def find_classes(dir):\n classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]\n classes.sort()\n if '.ipynb_checkpoints' in classes:\n classes.remove('.ipynb_checkpoints')\n class_to_idx = {classes[i]: i for i in range(len(classes))}\n return classes, class_to_idx\n\n @staticmethod\n def make_dataset_by_class(dir, class_to_idx, phase):\n meshes = []\n dir = os.path.expanduser(dir)\n for target in sorted(os.listdir(dir)):\n d = os.path.join(dir, target)\n if not os.path.isdir(d):\n continue\n for root, _, fnames in sorted(os.walk(d)):\n ## LEE\n #file_num = 0\n for fname in sorted(fnames):\n if is_mesh_file(fname) and (root.count(phase)==1):\n path = os.path.join(root, fname)\n item = (path, class_to_idx[target])\n meshes.append(item)\n \n ## LEE\n #file_num += 1\n #if file_num == 100 :\n # break\n return meshes"
] |
[
[
"numpy.std",
"torch.device",
"numpy.mean"
]
] |
hegland/cmepy
|
[
"fa8cdf2fad779badbcb629bf6ee33316724ec4a4"
] |
[
"cmepy/lexarrayset.py"
] |
[
"\"\"\"\nlexical array set operations\n\nthese operations are based upon the one dimensional array set operations\nfrom numpy.lib.arraysetops, but generalised to work for sets of m-tuples,\nwhere each element is stored as a row of a 2d m by n array, using numpy's\n'lexsort' lexical sorting function.\n\"\"\"\n\nimport numpy\n\ndef unique(las, return_inverse=False):\n \"\"\"\n returns a sorted vector of unique states\n \n if the optional flag return_inverse is set to True,\n additionally returns an index vector used to\n inverse the unique operation and recover the\n original vector\n \"\"\"\n \n # argsort the array via lexical sorting using the keys\n # las[0, :] to las[-1, :], in increasing priority\n order = numpy.lexsort(las)\n if numpy.size(order) == 0:\n return las\n slas = las[:, order]\n # then figure out the indices of the first instance of each row\n not_equal_adj = numpy.logical_or.reduce(slas[:, :-1] != slas[:, 1:])\n not_equal_adj = numpy.concatenate(([True], not_equal_adj))\n \n uslas = slas[:, not_equal_adj]\n if return_inverse:\n order_inverse = order.argsort(kind='mergesort')\n # compute the unique inverse indices by summing over the\n # boolean array. because we want to compute 0-based indices\n # it is necessary to set the first boolean to False.\n # (alternatively, we could have subtracted 1 from the entire\n # result after summing)\n not_equal_adj[0] = False\n unique_inverse = numpy.add.accumulate(not_equal_adj)[order_inverse]\n return uslas, unique_inverse\n else:\n return uslas\n\ndef nonunique_member(arr1, las2):\n \"\"\"\n vectorised set membership operation for lexical array arr1 and\n lexical array set las2\n \n in general, the rows of array arr1 can be non-unique\n \n returns a boolean array 'mask' such that\n arr1[:, mask] is the subset of rows of arr1 that are also\n rows of las2\n \"\"\"\n las1, unique_inverse = unique(arr1, return_inverse=True)\n return member(las1, las2)[unique_inverse]\n\ndef member(las1, las2):\n \"\"\"\n vectorised set membership operation for lexical array sets las1, las2\n \n returns a boolean array 'mask' such that\n \n las1[:, mask] is the subset of rows of las1 that\n are also rows of las2\n \"\"\"\n \n las = numpy.hstack((las1, las2))\n \n las1_n = numpy.shape(las1)[1]\n \n # since the 'in' operation is non-commutative we must\n # use a stable sort algorithm. this ensures that\n # if slas[i] == slas[i+1], then slas[i] is the element\n # from las1, while slas[i+1] is the element from las2\n \n # by grepping through the numpy source it seems that lexsort\n # should be a stable sort (is this officially documented anywhere?)\n \n order = numpy.lexsort(las, )\n \n if numpy.size(order) == 0:\n return numpy.zeros((las1_n, ), dtype=numpy.bool)\n slas = las[:, order]\n equal_adj = numpy.logical_and.reduce(slas[:, :-1] == slas[:, 1:])\n mask = numpy.concatenate((equal_adj, [False]))\n \n inverse_order = order.argsort(kind='mergesort')[:las1_n] \n return mask[inverse_order]\n\ndef split(las1, las2):\n \"\"\"\n returns (las1 intersect las2, las1 difference las2)\n \"\"\"\n if numpy.size(las1) == 0:\n # if las1 is empty, return a couple of copies of las1\n return (numpy.array(las1), numpy.array(las1))\n mask = member(las1, las2)\n return (numpy.array(las1[:, mask]),\n numpy.array(las1[:, numpy.logical_not(mask)]))\n\ndef difference(las1, las2):\n \"\"\"\n returns las1 difference las2\n \"\"\"\n if numpy.size(las1) == 0:\n # if las1 is empty, return a copy of las1\n return numpy.array(las1)\n return numpy.array(las1[:, numpy.logical_not(member(las1, las2))])\n\ndef intersection(las1, las2):\n \"\"\"\n intersection of las1 with las2\n \"\"\"\n las = numpy.hstack((las1, las2))\n order = numpy.lexsort(las)\n if numpy.size(order) == 0:\n return las\n slas = las[:, order]\n equal_adj = numpy.logical_and.reduce(slas[:, :-1] == slas[:, 1:])\n return slas[:, :-1][:, equal_adj]\n\ndef union(las1, las2):\n \"\"\"\n union of las1 with las2\n \"\"\"\n return unique(numpy.hstack((las1, las2)))\n\ndef shift(las, offset):\n \"\"\"\n shifts all states in las by offset\n \"\"\"\n offset = numpy.asarray(offset)[:, numpy.newaxis]\n return las + offset\n\ndef empty(dim):\n \"\"\"\n returns an empty LexArraySet of dimension dim.\n \"\"\"\n empty_data = numpy.zeros((dim, 0), dtype=numpy.int)\n return LexArraySet(empty_data)\n\ndef create(data, unique_data=False):\n \"\"\"\n returns a new LexArraySet for the given data\n \"\"\"\n return LexArraySet(data, unique_data)\n\nclass LexArraySet(object):\n \"\"\"\n LexArraySet is an implementation of a set as a 2d array, where\n the members of the set are the rows of the array. The rows\n are ordered using lexical ordering.\n \"\"\"\n \n def __init__(self, data, unique_data=False):\n \"\"\"\n data can either be another LexArraySet instance, in which\n case a copy is made of that instance's data, or a\n two-dimensional numpy array, where each row is interpreted\n as a tuple belonging to the set.\n \n If data is a two-dimensional numpy array, then the optional\n unique_data flag may be set to True to indicate that the\n rows of data are already unique.\n \"\"\"\n if type(data) is LexArraySet:\n self.data = numpy.array(data.data)\n else:\n data = numpy.asarray(data)\n if not unique_data:\n self.data = unique(data)\n else:\n self.data = data\n \n @property\n def size(self):\n \"\"\"\n number of elements in set (equal to number of rows of the lexical array)\n \"\"\"\n shape = numpy.shape(self.data)\n if len(shape) < 2:\n return 0\n else:\n return shape[1]\n \n def member(self, rhs):\n \"\"\"\n las1.member(las2) -> mask; mask[i] True iff row i of las1 is in las2\n \"\"\"\n return member(self.data, rhs.data)\n \n def split(self, rhs):\n \"\"\"\n las1.split(las2) -> (las1.intersect(las2), las1.difference(las2))\n \"\"\"\n intersect, diff = split(self.data, rhs.data)\n las_intersect = LexArraySet(intersect, unique_data=True)\n las_diff = LexArraySet(diff, unique_data=True)\n return las_intersect, las_diff\n \n def difference(self, rhs):\n \"\"\"\n las1.difference(las2) -> diff; diff's rows are those of las1 not in las2\n \"\"\"\n return LexArraySet(difference(self.data, rhs.data), unique_data=True)\n \n def intersection(self, rhs):\n \"\"\"\n las1.intersection(las2) -> isect; isect's rows common to las1, las2\n \"\"\"\n return LexArraySet(intersection(self.data, rhs.data), unique_data=True)\n \n def union(self, rhs):\n \"\"\"\n las1.union(las2) -> u; u's rows are union of rows in las1, las2\n \"\"\"\n return LexArraySet(union(self.data, rhs.data), unique_data=True)\n \n def shift(self, offset):\n \"\"\"\n las.shift(offset) -> slas; where rows of slas are rows of las + offset\n \n offset must be of compatible shape to the rows of las.\n \"\"\"\n return LexArraySet(shift(self.data, offset), unique_data=True)\n \n def difference_update(self, rhs):\n \"\"\"\n in place difference\n \"\"\"\n self.data = difference(self.data, rhs.data)\n \n def intersection_update(self, rhs):\n \"\"\"\n in place intersection\n \"\"\"\n self.data = intersection(self.data, rhs.data)\n \n def union_update(self, rhs):\n \"\"\"\n in place union\n \"\"\"\n self.data = union(self.data, rhs.data)\n \n def shift_update(self, shift):\n \"\"\"\n in place shift\n \"\"\"\n shift = numpy.asarray(shift)[:, numpy.newaxis]\n self.data += shift\n"
] |
[
[
"numpy.concatenate",
"numpy.logical_not",
"numpy.array",
"numpy.asarray",
"numpy.lexsort",
"numpy.zeros",
"numpy.add.accumulate",
"numpy.shape",
"numpy.logical_or.reduce",
"numpy.size",
"numpy.hstack",
"numpy.logical_and.reduce"
]
] |
remorsecs/Kaggle-plant-seedlings-classification
|
[
"2fb837fb09ad07c3950684a8179052aa14a745e9"
] |
[
"libs/model.py"
] |
[
"import torch.nn as nn\n\n\nclass VGG11(nn.Module):\n def __init__(self):\n super().__init__()\n conv_layers = nn.Sequential(\n nn.Conv2d(3, 64, kernel_size=3, padding=1),\n nn.ReLU(True),\n nn.MaxPool2d((2, 2), 2),\n\n nn.Conv2d(64, 128, kernel_size=3, padding=1),\n nn.ReLU(True),\n nn.MaxPool2d((2, 2), 2),\n\n nn.Conv2d(128, 256, kernel_size=3, padding=1),\n nn.ReLU(True),\n nn.Conv2d(256, 256, kernel_size=3, padding=1),\n nn.ReLU(True),\n nn.MaxPool2d((2, 2), 2),\n\n nn.Conv2d(256, 512, kernel_size=3, padding=1),\n nn.ReLU(True),\n nn.Conv2d(512, 512, kernel_size=3, padding=1),\n nn.ReLU(True),\n nn.MaxPool2d((2, 2), 2),\n\n nn.Conv2d(512, 512, kernel_size=3, padding=1),\n nn.ReLU(True),\n nn.Conv2d(512, 512, kernel_size=3, padding=1),\n nn.ReLU(True),\n nn.MaxPool2d((2, 2), 2),\n )\n fc_layers = nn.Sequential(\n nn.Linear(512 * 7 * 7, 4096),\n nn.ReLU(True),\n nn.Dropout(0.5),\n nn.Linear(4096, 4096),\n nn.ReLU(True),\n nn.Dropout(0.5),\n )\n self.feature = nn.Sequential(\n conv_layers,\n nn.Flatten(),\n fc_layers,\n )\n self.classifier = nn.Linear(4096, 12)\n\n def forward(self, x):\n feature = self.feature(x)\n score = self.classifier(feature)\n return score\n"
] |
[
[
"torch.nn.Linear",
"torch.nn.Dropout",
"torch.nn.MaxPool2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.nn.Flatten"
]
] |
VinAIResearch/PointSWD
|
[
"ea676926e21286185e836ab355fee7937540ce69"
] |
[
"criteria_comparing_sets_pcs/all_metrics_calculator.py"
] |
[
"import os.path as osp\nimport sys\n\nimport torch\nimport torch.nn as nn\n\n\nsys.path.append(osp.dirname(osp.dirname(osp.abspath(__file__))))\nfrom criteria_comparing_sets_pcs.jsd_calculator import JsdCalculator\nfrom metrics_from_point_flow.evaluation_metrics import compute_all_metrics\n\n\nclass AllMetricsCalculator(nn.Module):\n def __init__(self):\n super(AllMetricsCalculator, self).__init__()\n\n @staticmethod\n def forward(sample_pcs, ref_pcs, batch_size, **kwargs):\n results = {}\n results.update(compute_all_metrics(sample_pcs, ref_pcs, batch_size, **kwargs))\n for key, value in results.items():\n if torch.is_tensor(value):\n results[key] = value.item()\n if \"save_file\" in kwargs.keys():\n log = \"{}: {}\\n\"\n with open(kwargs[\"save_file\"], \"a\") as fp:\n for key, value in results.items():\n fp.write(log.format(key, value))\n # end for\n # end with\n # end if\n print(\"\\n\")\n log = \"{}: {}\\n\"\n for key, value in results.items():\n print(log.format(key, value))\n # end for\n jsd = JsdCalculator.forward(sample_pcs, ref_pcs, **kwargs)\n return jsd\n\n\nif __name__ == \"__main__\":\n sample_pcs = torch.empty(10, 2048, 3).uniform_(0, 1).cuda()\n ref_pcs = torch.empty(10, 2048, 3).uniform_(0, 1).cuda()\n batch_size = 10\n print(AllMetricsCalculator.forward(sample_pcs, ref_pcs, batch_size))\n"
] |
[
[
"torch.is_tensor",
"torch.empty"
]
] |
nden/photutils
|
[
"87879b2464ccfcd160f6a0c53ea4c0869a6e1cc2",
"87879b2464ccfcd160f6a0c53ea4c0869a6e1cc2"
] |
[
"photutils/detection/tests/test_findstars.py",
"photutils/segmentation/detect.py"
] |
[
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport os.path as op\nimport itertools\nimport warnings\n\nimport numpy as np\nfrom numpy.testing import assert_allclose\nimport pytest\n\nfrom astropy.table import Table\nfrom astropy.utils.exceptions import AstropyUserWarning\n\nfrom ..findstars import DAOStarFinder, IRAFStarFinder\nfrom ...datasets import make_100gaussians_image\n\ntry:\n import scipy # noqa\n HAS_SCIPY = True\nexcept ImportError:\n HAS_SCIPY = False\n\n\nDATA = make_100gaussians_image()\nTHRESHOLDS = [8.0, 10.0]\nFWHMS = [1.0, 1.5, 2.0]\nwarnings.simplefilter('always', AstropyUserWarning)\n\n\[email protected]('not HAS_SCIPY')\nclass TestDAOStarFinder:\n @pytest.mark.parametrize(('threshold', 'fwhm'),\n list(itertools.product(THRESHOLDS, FWHMS)))\n def test_daofind(self, threshold, fwhm):\n starfinder = DAOStarFinder(threshold, fwhm, sigma_radius=1.5)\n t = starfinder(DATA)\n datafn = ('daofind_test_thresh{0:04.1f}_fwhm{1:04.1f}'\n '.txt'.format(threshold, fwhm))\n datafn = op.join(op.dirname(op.abspath(__file__)), 'data', datafn)\n t_ref = Table.read(datafn, format='ascii')\n\n assert t.colnames == t_ref.colnames\n for col in t.colnames:\n assert_allclose(t[col], t_ref[col])\n\n def test_daofind_threshold_fwhm_inputs(self):\n with pytest.raises(TypeError):\n DAOStarFinder(threshold=np.ones((2, 2)), fwhm=3.)\n\n with pytest.raises(TypeError):\n DAOStarFinder(threshold=3., fwhm=np.ones((2, 2)))\n\n def test_daofind_include_border(self):\n starfinder = DAOStarFinder(threshold=10, fwhm=2, sigma_radius=1.5,\n exclude_border=False)\n t = starfinder(DATA)\n assert len(t) == 20\n\n def test_daofind_exclude_border(self):\n starfinder = DAOStarFinder(threshold=10, fwhm=2, sigma_radius=1.5,\n exclude_border=True)\n t = starfinder(DATA)\n assert len(t) == 19\n\n def test_daofind_nosources(self):\n data = np.ones((3, 3))\n starfinder = DAOStarFinder(threshold=10, fwhm=1)\n t = starfinder(data)\n assert len(t) == 0\n\n def test_daofind_sharpness(self):\n \"\"\"Sources found, but none pass the sharpness criteria.\"\"\"\n starfinder = DAOStarFinder(threshold=50, fwhm=1.0, sharplo=1.)\n t = starfinder(DATA)\n assert len(t) == 0\n\n def test_daofind_roundness(self):\n \"\"\"Sources found, but none pass the roundness criteria.\"\"\"\n starfinder = DAOStarFinder(threshold=50, fwhm=1.0, roundlo=1.)\n t = starfinder(DATA)\n assert len(t) == 0\n\n def test_daofind_flux_negative(self):\n \"\"\"Test handling of negative flux (here created by large sky).\"\"\"\n data = np.ones((5, 5))\n data[2, 2] = 10.\n starfinder = DAOStarFinder(threshold=0.1, fwhm=1.0, sky=10)\n t = starfinder(data)\n assert not np.isfinite(t['mag'])\n\n def test_daofind_negative_fit_peak(self):\n \"\"\"\n Regression test that sources with negative fit peaks (i.e.\n hx/hy<=0) are excluded.\n \"\"\"\n\n starfinder = DAOStarFinder(threshold=7., fwhm=1.5, roundlo=-np.inf,\n roundhi=np.inf, sharplo=-np.inf,\n sharphi=np.inf)\n t = starfinder(DATA)\n assert len(t) == 102\n\n def test_daofind_peakmax_filtering(self):\n \"\"\"\n Regression test that objects with ``peak`` >= ``peakmax`` are\n filtered out.\n \"\"\"\n\n peakmax = 20\n starfinder = DAOStarFinder(threshold=7., fwhm=1.5, roundlo=-np.inf,\n roundhi=np.inf, sharplo=-np.inf,\n sharphi=np.inf, peakmax=peakmax)\n t = starfinder(DATA)\n assert len(t) == 37\n assert all(t['peak'] < peakmax)\n\n def test_daofind_brightest_filtering(self):\n \"\"\"\n Regression test that only top ``brightest`` objects are\n selected.\n \"\"\"\n\n brightest = 40\n peakmax = 20\n starfinder = DAOStarFinder(threshold=7., fwhm=1.5, roundlo=-np.inf,\n roundhi=np.inf, sharplo=-np.inf,\n sharphi=np.inf, brightest=brightest)\n t = starfinder(DATA)\n # combined with peakmax\n assert len(t) == brightest\n starfinder = DAOStarFinder(threshold=7., fwhm=1.5, roundlo=-np.inf,\n roundhi=np.inf, sharplo=-np.inf,\n sharphi=np.inf, brightest=brightest,\n peakmax=peakmax)\n t = starfinder(DATA)\n assert len(t) == 37\n\n def test_daofind_mask(self):\n \"\"\"Test DAOStarFinder with a mask.\"\"\"\n\n starfinder = DAOStarFinder(threshold=10, fwhm=1.5)\n mask = np.zeros_like(DATA, dtype=bool)\n mask[100:200] = True\n tbl1 = starfinder(DATA)\n tbl2 = starfinder(DATA, mask=mask)\n assert len(tbl1) > len(tbl2)\n\n\[email protected]('not HAS_SCIPY')\nclass TestIRAFStarFinder:\n @pytest.mark.parametrize(('threshold', 'fwhm'),\n list(itertools.product(THRESHOLDS, FWHMS)))\n def test_irafstarfind(self, threshold, fwhm):\n starfinder = IRAFStarFinder(threshold, fwhm, sigma_radius=1.5)\n t = starfinder(DATA)\n datafn = ('irafstarfind_test_thresh{0:04.1f}_fwhm{1:04.1f}'\n '.txt'.format(threshold, fwhm))\n datafn = op.join(op.dirname(op.abspath(__file__)), 'data', datafn)\n t_ref = Table.read(datafn, format='ascii')\n\n assert t.colnames == t_ref.colnames\n for col in t.colnames:\n assert_allclose(t[col], t_ref[col])\n\n def test_irafstarfind_threshold_fwhm_inputs(self):\n with pytest.raises(TypeError):\n IRAFStarFinder(threshold=np.ones((2, 2)), fwhm=3.)\n\n with pytest.raises(TypeError):\n IRAFStarFinder(threshold=3., fwhm=np.ones((2, 2)))\n\n def test_irafstarfind_nosources(self):\n data = np.ones((3, 3))\n starfinder = IRAFStarFinder(threshold=10, fwhm=1)\n t = starfinder(data)\n assert len(t) == 0\n\n def test_irafstarfind_sharpness(self):\n \"\"\"Sources found, but none pass the sharpness criteria.\"\"\"\n starfinder = IRAFStarFinder(threshold=50, fwhm=1.0, sharplo=2.)\n t = starfinder(DATA)\n assert len(t) == 0\n\n def test_irafstarfind_roundness(self):\n \"\"\"Sources found, but none pass the roundness criteria.\"\"\"\n starfinder = IRAFStarFinder(threshold=50, fwhm=1.0, roundlo=1.)\n t = starfinder(DATA)\n assert len(t) == 0\n\n def test_irafstarfind_sky(self):\n starfinder = IRAFStarFinder(threshold=25.0, fwhm=2.0, sky=10.)\n t = starfinder(DATA)\n assert len(t) == 4\n\n def test_irafstarfind_largesky(self):\n starfinder = IRAFStarFinder(threshold=25.0, fwhm=2.0, sky=100.)\n t = starfinder(DATA)\n assert len(t) == 0\n\n def test_irafstarfind_peakmax_filtering(self):\n \"\"\"\n Regression test that objects with ``peak`` >= ``peakmax`` are\n filtered out.\n \"\"\"\n peakmax = 20\n starfinder = IRAFStarFinder(threshold=7., fwhm=2, roundlo=-np.inf,\n roundhi=np.inf, sharplo=-np.inf,\n sharphi=np.inf, peakmax=peakmax)\n t = starfinder(DATA)\n assert len(t) == 117\n assert all(t['peak'] < peakmax)\n\n def test_irafstarfind_brightest_filtering(self):\n \"\"\"\n Regression test that only top ``brightest`` objects are selected.\n \"\"\"\n brightest = 40\n starfinder = IRAFStarFinder(threshold=7., fwhm=2, roundlo=-np.inf,\n roundhi=np.inf, sharplo=-np.inf,\n sharphi=np.inf, brightest=brightest)\n t = starfinder(DATA)\n assert len(t) == brightest\n\n def test_irafstarfind_mask(self):\n \"\"\"Test IRAFStarFinder with a mask.\"\"\"\n\n starfinder = IRAFStarFinder(threshold=10, fwhm=1.5)\n mask = np.zeros_like(DATA, dtype=bool)\n mask[100:200] = True\n tbl1 = starfinder(DATA)\n tbl2 = starfinder(DATA, mask=mask)\n assert len(tbl1) > len(tbl2)\n",
"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport numpy as np\nfrom astropy.stats import gaussian_fwhm_to_sigma\nfrom astropy.convolution import Gaussian2DKernel\n\nfrom .core import SegmentationImage\nfrom ..detection import detect_threshold\nfrom ..utils.convolution import filter_data\n\n\n__all__ = ['detect_sources', 'make_source_mask']\n\n\ndef detect_sources(data, threshold, npixels, filter_kernel=None,\n connectivity=8, mask=None):\n \"\"\"\n Detect sources above a specified threshold value in an image and\n return a `~photutils.segmentation.SegmentationImage` object.\n\n Detected sources must have ``npixels`` connected pixels that are\n each greater than the ``threshold`` value. If the filtering option\n is used, then the ``threshold`` is applied to the filtered image.\n The input ``mask`` can be used to mask pixels in the input data.\n Masked pixels will not be included in any source.\n\n This function does not deblend overlapping sources. First use this\n function to detect sources followed by\n :func:`~photutils.segmentation.deblend_sources` to deblend sources.\n\n Parameters\n ----------\n data : array_like\n The 2D array of the image.\n\n threshold : float or array-like\n The data value or pixel-wise data values to be used for the\n detection threshold. A 2D ``threshold`` must have the same\n shape as ``data``. See `~photutils.detection.detect_threshold`\n for one way to create a ``threshold`` image.\n\n npixels : int\n The number of connected pixels, each greater than ``threshold``,\n that an object must have to be detected. ``npixels`` must be a\n positive integer.\n\n filter_kernel : array-like (2D) or `~astropy.convolution.Kernel2D`, optional\n The 2D array of the kernel used to filter the image before\n thresholding. Filtering the image will smooth the noise and\n maximize detectability of objects with a shape similar to the\n kernel.\n\n connectivity : {4, 8}, optional\n The type of pixel connectivity used in determining how pixels\n are grouped into a detected source. The options are 4 or 8\n (default). 4-connected pixels touch along their edges.\n 8-connected pixels touch along their edges or corners. For\n reference, SExtractor uses 8-connected pixels.\n\n mask : array_like (bool)\n A boolean mask, with the same shape as the input ``data``, where\n `True` values indicate masked pixels. Masked pixels will not be\n included in any source.\n\n Returns\n -------\n segment_image : `~photutils.segmentation.SegmentationImage`\n A 2D segmentation image, with the same shape as ``data``, where\n sources are marked by different positive integer values. A\n value of zero is reserved for the background.\n\n See Also\n --------\n :func:`photutils.detection.detect_threshold`,\n :class:`photutils.segmentation.SegmentationImage`,\n :func:`photutils.segmentation.source_properties`\n :func:`photutils.segmentation.deblend_sources`\n\n Examples\n --------\n\n .. plot::\n :include-source:\n\n # make a table of Gaussian sources\n from astropy.table import Table\n table = Table()\n table['amplitude'] = [50, 70, 150, 210]\n table['x_mean'] = [160, 25, 150, 90]\n table['y_mean'] = [70, 40, 25, 60]\n table['x_stddev'] = [15.2, 5.1, 3., 8.1]\n table['y_stddev'] = [2.6, 2.5, 3., 4.7]\n table['theta'] = np.array([145., 20., 0., 60.]) * np.pi / 180.\n\n # make an image of the sources with Gaussian noise\n from photutils.datasets import make_gaussian_sources_image\n from photutils.datasets import make_noise_image\n shape = (100, 200)\n sources = make_gaussian_sources_image(shape, table)\n noise = make_noise_image(shape, type='gaussian', mean=0.,\n stddev=5., random_state=12345)\n image = sources + noise\n\n # detect the sources\n from photutils import detect_threshold, detect_sources\n threshold = detect_threshold(image, snr=3)\n from astropy.convolution import Gaussian2DKernel\n sigma = 3.0 / (2.0 * np.sqrt(2.0 * np.log(2.0))) # FWHM = 3\n kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3)\n kernel.normalize()\n segm = detect_sources(image, threshold, npixels=5,\n filter_kernel=kernel)\n\n # plot the image and the segmentation image\n import matplotlib.pyplot as plt\n fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(8, 8))\n ax1.imshow(image, origin='lower', interpolation='nearest')\n ax2.imshow(segm.data, origin='lower', interpolation='nearest')\n \"\"\"\n\n from scipy import ndimage\n\n if (npixels <= 0) or (int(npixels) != npixels):\n raise ValueError('npixels must be a positive integer, got '\n '\"{0}\"'.format(npixels))\n\n image = (filter_data(data, filter_kernel, mode='constant', fill_value=0.0,\n check_normalization=True) > threshold)\n\n if mask is not None:\n if mask.shape != image.shape:\n raise ValueError('mask must have the same shape as the input '\n 'image.')\n image &= ~mask\n\n if connectivity == 4:\n selem = ndimage.generate_binary_structure(2, 1)\n elif connectivity == 8:\n selem = ndimage.generate_binary_structure(2, 2)\n else:\n raise ValueError('Invalid connectivity={0}. '\n 'Options are 4 or 8'.format(connectivity))\n\n segm_img, nobj = ndimage.label(image, structure=selem)\n\n # remove objects with less than npixels\n # NOTE: for typical data, making the cutout images is ~10x faster\n # than using segm_img directly\n segm_slices = ndimage.find_objects(segm_img)\n for i, slices in enumerate(segm_slices):\n cutout = segm_img[slices]\n segment_mask = (cutout == (i+1))\n if np.count_nonzero(segment_mask) < npixels:\n cutout[segment_mask] = 0\n\n # now relabel to make consecutive label indices\n segm_img, nobj = ndimage.label(segm_img, structure=selem)\n\n return SegmentationImage(segm_img)\n\n\ndef make_source_mask(data, snr, npixels, mask=None, mask_value=None,\n filter_fwhm=None, filter_size=3, filter_kernel=None,\n sigclip_sigma=3.0, sigclip_iters=5, dilate_size=11):\n \"\"\"\n Make a source mask using source segmentation and binary dilation.\n\n Parameters\n ----------\n data : array_like\n The 2D array of the image.\n\n snr : float\n The signal-to-noise ratio per pixel above the ``background`` for\n which to consider a pixel as possibly being part of a source.\n\n npixels : int\n The number of connected pixels, each greater than ``threshold``,\n that an object must have to be detected. ``npixels`` must be a\n positive integer.\n\n mask : array_like, bool, optional\n A boolean mask with the same shape as ``data``, where a `True`\n value indicates the corresponding element of ``data`` is masked.\n Masked pixels are ignored when computing the image background\n statistics.\n\n mask_value : float, optional\n An image data value (e.g., ``0.0``) that is ignored when\n computing the image background statistics. ``mask_value`` will\n be ignored if ``mask`` is input.\n\n filter_fwhm : float, optional\n The full-width at half-maximum (FWHM) of the Gaussian kernel to\n filter the image before thresholding. ``filter_fwhm`` and\n ``filter_size`` are ignored if ``filter_kernel`` is defined.\n\n filter_size : float, optional\n The size of the square Gaussian kernel image. Used only if\n ``filter_fwhm`` is defined. ``filter_fwhm`` and ``filter_size``\n are ignored if ``filter_kernel`` is defined.\n\n filter_kernel : array-like (2D) or `~astropy.convolution.Kernel2D`, optional\n The 2D array of the kernel used to filter the image before\n thresholding. Filtering the image will smooth the noise and\n maximize detectability of objects with a shape similar to the\n kernel. ``filter_kernel`` overrides ``filter_fwhm`` and\n ``filter_size``.\n\n sigclip_sigma : float, optional\n The number of standard deviations to use as the clipping limit\n when calculating the image background statistics.\n\n sigclip_iters : int, optional\n The number of iterations to perform sigma clipping, or `None` to\n clip until convergence is achieved (i.e., continue until the last\n iteration clips nothing) when calculating the image background\n statistics.\n\n dilate_size : int, optional\n The size of the square array used to dilate the segmentation\n image.\n\n Returns\n -------\n mask : 2D `~numpy.ndarray`, bool\n A 2D boolean image containing the source mask.\n \"\"\"\n\n from scipy import ndimage\n\n threshold = detect_threshold(data, snr, background=None, error=None,\n mask=mask, mask_value=None,\n sigclip_sigma=sigclip_sigma,\n sigclip_iters=sigclip_iters)\n\n kernel = None\n if filter_kernel is not None:\n kernel = filter_kernel\n if filter_fwhm is not None:\n sigma = filter_fwhm * gaussian_fwhm_to_sigma\n kernel = Gaussian2DKernel(sigma, x_size=filter_size,\n y_size=filter_size)\n if kernel is not None:\n kernel.normalize()\n\n segm = detect_sources(data, threshold, npixels, filter_kernel=kernel)\n\n selem = np.ones((dilate_size, dilate_size))\n return ndimage.binary_dilation(segm.data.astype(np.bool), selem)\n"
] |
[
[
"numpy.testing.assert_allclose",
"numpy.zeros_like",
"numpy.ones",
"numpy.isfinite"
],
[
"numpy.count_nonzero",
"scipy.ndimage.label",
"scipy.ndimage.find_objects",
"numpy.ones",
"scipy.ndimage.generate_binary_structure"
]
] |
kjdavidson/NoisePy
|
[
"a7445dd2f68f64cb562d6a87096e5f12a2c3b612",
"a7445dd2f68f64cb562d6a87096e5f12a2c3b612"
] |
[
"src/application_modules/measure_dvv.py",
"test/data_check/check_linearity_fft.py"
] |
[
"import sys\nimport time\nimport obspy\nimport pyasdf\nimport os, glob\nimport datetime\nimport numpy as np\nimport pandas as pd\nfrom mpi4py import MPI\nimport matplotlib.pyplot as plt\nfrom obspy.signal.filter import bandpass\n\nsys.path.insert(1,'../')\nimport noise_module\n\n# register datetime converter\nfrom pandas.plotting import register_matplotlib_converters\nregister_matplotlib_converters()\n\n'''\nthis application script of NoisePy is to perform dv/v analysis on the resulted cross-correlation\nfunctions from S2. Note that, to use this script, the `keep_substack` parameter in S2 has to be turned\n`True` when running S2. So the sub-stacked waveforms can be saved and further to be compared with the \nall-stacked waveforms to measure dv/v.\n\nAuthors: Chengxin Jiang ([email protected])\n Marine Denolle ([email protected])\n\nNOTE:\n 0) this script is only showing an example of how dv/v can be measured on the resulted file from S2, and \n the users need to expand/modify this script in order to apply for regional studies;\n 1) See Yuan et al., (2019) for more details on the comparison of different methods for mesuring dv/v as\n well as the numerical validation. \n'''\n\n############################################\n############ PAMAETER SECTION ##############\n############################################\n\n# input data and targeted component\nrootpath = '/Users/chengxin/Documents/NoisePy_example/SCAL/' # root path for this data processing\nsfile = os.path.join(rootpath,'STACK_month/CI.BLC/CI.BLC_CI.MPI.h5') # ASDF file containing stacked data\noutdir = os.path.join(rootpath,'figures/monitoring') # dir where to output dispersive image and extracted dispersion\nif not os.path.isdir(outdir):\n os.mkdir(outdir)\n\n# targeted component\nstack_method = 'linear' # which stacked data to measure dispersion info\nccomp = 'ZZ' # cross component\n\n# pre-defined group velocity to window direct and code waves\nvmin = 0.8 # minimum velocity of the direct waves -> start of the coda window\nlwin = 150 # window length in sec for the coda waves\n\n# basic parameters \nfreq = [0.1,0.2,0.3,0.5] # targeted frequency band for waveform monitoring\nnfreq = len(freq)-1\nonelag = False # make measurement one one lag or two \nnorm_flag = True # whether to normalize the cross-correlation waveforms\ndo_stretch = True # use strecthing method or not\ndo_dtw = False # use dynamic time warping method or not\ndo_mwcs = True # use moving-window cross spectrum method or not\ndo_mwcc = False # use moving-window cross correlation method or not\ndo_wts = True # use wavelet streching method or not\ndo_wxs = True # use wavelet cross spectrum method or not\n\n# parameters for stretching method\nepsilon = 2/100 # limit for dv/v (in decimal)\nnbtrial = 50 # number of increment of dt [-epsilon,epsilon] for the streching\n\n# parameters for DTW\nmlag = 50 # maxmum points to move (times dt gives the maximum time shifts)\nb = 5 # strain limit (to be tested)\ndirect = 1 # direction to accumulate errors (1=forward, -1=backward)\n\n# parameters for MWCS & MWCC\nmove_win_sec = 1.2*int(1/np.min(freq)) # moving window length (in sec)\nstep_sec = 0.3*move_win_sec # step for moving window sliding (in sec)\n\n# parameters for wavelet domain methods\ndj =1/12 # Spacing between discrete scales. Default value is 1/12.\ns0 =-1 # Smallest scale of the wavelet. Default value is 2*dt.\nJ =-1 # Number of scales less one.\nwvn='morlet' # wavelet class\n\n##############################################\n############ LOAD WAVEFORM DATA ##############\n##############################################\n\n# load stacked and sub-stacked waveforms \nwith pyasdf.ASDFDataSet(sfile,mode='r') as ds:\n dtype = 'Allstack0'+stack_method\n substacks = ds.auxiliary_data.list()\n nwin = len(substacks)-2\n try:\n dt = ds.auxiliary_data[dtype][ccomp].parameters['dt']\n dist = ds.auxiliary_data[dtype][ccomp].parameters['dist']\n maxlag = ds.auxiliary_data[dtype][ccomp].parameters['maxlag']\n tdata = ds.auxiliary_data[dtype][ccomp].data[:]\n except Exception:\n raise ValueError('cannot open %s to read'%sfile)\n\n# make conda window based on vmin\ntwin = [int(dist/vmin),int(dist/vmin)+lwin]\nif twin[1] > maxlag:\n raise ValueError('proposed window exceeds limit! reduce %d'%lwin)\n\n# ref and tvec\nref = tdata\ntvec_all = np.arange(-maxlag,maxlag+dt,dt)\n# add 20 s to the coda window for plotting purpose\ndisp_indx = np.where(np.abs(tvec_all)<=np.max(twin)+20)[0]\n# casual and acasual coda window \npwin_indx = np.where((tvec_all>=np.min(twin))&(tvec_all<np.max(twin)))[0]\nnwin_indx = np.where((tvec_all<=-np.min(twin))&(tvec_all>=-np.max(twin)))[0]\ntvec_disp = tvec_all[disp_indx]\n# npts for the win and raw\nnpts_all = len(tvec_all)\nnpts_win = len(pwin_indx)\n\n# save parameters as a dictionary\npara = {'twin':twin,'freq':freq,'dt':dt,'ccomp':ccomp,'onelag':onelag,'norm_flag':norm_flag,'npts_all':npts_all,'npts_win':npts_win}\n\n# allocate matrix for cur and ref waveforms and corr coefficient\ncur = np.zeros(shape=(nwin,npts_all),dtype=np.float32)\ntcur = np.zeros(shape=(nwin,npts_all),dtype=np.float32)\npcor_cc = np.zeros(shape=(nwin),dtype=np.float32)\nncor_cc = np.zeros(shape=(nwin),dtype=np.float32)\ntimestamp = np.empty(nwin,dtype='datetime64[s]')\n\n# tick inc for plotting \nif nwin>100:\n tick_inc = int(nwin/10)\nelif nwin>10:\n tick_inc = int(nwin/5) \nelse:\n tick_inc = 2\n\n# load all current waveforms and get corr-coeff\nwith pyasdf.ASDFDataSet(sfile,mode='r') as ds:\n\n # loop through each freq band\n for ifreq in range(nfreq):\n \n # freq parameters\n freq1 = freq[ifreq]\n freq2 = freq[ifreq+1]\n para['freq'] = [freq1,freq2]\n move_win_sec = 1.2*int(1/freq1)\n\n # reference waveform\n tref = bandpass(ref,freq1,freq2,int(1/dt),corners=4,zerophase=True)\n if norm_flag:\n tref = tref/np.max(np.abs(tref))\n\n # loop through each cur waveforms and do filtering\n igood = 0\n for ii in range(nwin):\n try:\n cur[igood] = ds.auxiliary_data[substacks[ii+2]][ccomp].data[:]\n except Exception:\n continue\n timestamp[igood] = obspy.UTCDateTime(np.float(substacks[ii+2][1:]))\n tcur[igood] = bandpass(cur[igood],freq1,freq2,int(1/dt),corners=4,zerophase=True)\n if norm_flag:\n tcur[igood] /= np.max(np.abs(tcur[igood]))\n \n # get cc coeffient\n pcor_cc[igood] = np.corrcoef(tref[pwin_indx],tcur[igood,pwin_indx])[0,1]\n ncor_cc[igood] = np.corrcoef(tref[nwin_indx],tcur[igood,nwin_indx])[0,1]\n igood += 1 \n nwin = igood\n\n ############ PLOT WAVEFORM DATA AND CC ##############\n # plot the raw waveform and the correlation coefficient\n plt.figure(figsize=(11,12))\n ax0= plt.subplot(311)\n # 2D waveform matrix\n ax0.matshow(tcur[:igood,disp_indx],cmap='seismic',extent=[tvec_disp[0],tvec_disp[-1],nwin,0],aspect='auto')\n ax0.plot([0,0],[0,nwin],'k--',linewidth=2)\n ax0.set_title('%s, dist:%5.2fkm, filter @%4.2f-%4.2fHz' % (sfile.split('/')[-1],dist,freq1,freq2))\n ax0.set_xlabel('time [s]')\n ax0.set_ylabel('wavefroms')\n ax0.set_yticks(np.arange(0,nwin,step=tick_inc))\n # shade the coda part\n ax0.fill(np.concatenate((tvec_all[nwin_indx],np.flip(tvec_all[nwin_indx],axis=0)),axis=0), \\\n np.concatenate((np.ones(len(nwin_indx))*0,np.ones(len(nwin_indx))*nwin),axis=0),'c', alpha=0.3,linewidth=1)\n ax0.fill(np.concatenate((tvec_all[pwin_indx],np.flip(tvec_all[pwin_indx],axis=0)),axis=0), \\\n np.concatenate((np.ones(len(nwin_indx))*0,np.ones(len(nwin_indx))*nwin),axis=0),'y', alpha=0.3)\n ax0.xaxis.set_ticks_position('bottom')\n # reference waveform\n ax1 = plt.subplot(613)\n ax1.plot(tvec_disp,tref[disp_indx],'k-',linewidth=1)\n ax1.autoscale(enable=True, axis='x', tight=True)\n ax1.grid(True)\n ax1.legend(['reference'],loc='upper right')\n # the cross-correlation coefficient\n ax2 = plt.subplot(614)\n ax2.plot(timestamp[:igood],pcor_cc[:igood],'yo-',markersize=2,linewidth=1)\n ax2.plot(timestamp[:igood],ncor_cc[:igood],'co-',markersize=2,linewidth=1)\n ax2.set_xticks(timestamp[0:nwin:tick_inc])\n ax2.set_ylabel('cc coeff')\n ax2.legend(['positive','negative'],loc='upper right')\n\n ###############################################\n ############ MONITORING PROCESSES #############\n ###############################################\n \n # allocate matrix for dvv and its unc\n dvv_stretch = np.zeros(shape=(nwin,4),dtype=np.float32)\n dvv_dtw = np.zeros(shape=(nwin,4),dtype=np.float32)\n dvv_mwcs = np.zeros(shape=(nwin,4),dtype=np.float32)\n dvv_wcc = np.zeros(shape=(nwin,4),dtype=np.float32)\n dvv_wts = np.zeros(shape=(nwin,4),dtype=np.float32)\n dvv_wxs = np.zeros(shape=(nwin,4),dtype=np.float32)\n\n # loop through each win again\n for ii in range(nwin):\n\n # casual and acasual lags for both ref and cur waveforms\n pcur = tcur[ii,pwin_indx]\n ncur = tcur[ii,nwin_indx]\n pref = tref[pwin_indx]\n nref = tref[nwin_indx]\n\n # functions working in time domain\n if do_stretch:\n dvv_stretch[ii,0],dvv_stretch[ii,1],cc,cdp = noise_module.stretching(pref,pcur,epsilon,nbtrial,para)\n dvv_stretch[ii,2],dvv_stretch[ii,3],cc,cdp = noise_module.stretching(nref,ncur,epsilon,nbtrial,para)\n if do_dtw:\n dvv_dtw[ii,0],dvv_dtw[ii,1],dist = noise_module.dtw_dvv(pref,pcur,para,mlag,b,direct)\n dvv_dtw[ii,2],dvv_dtw[ii,3],dist = noise_module.dtw_dvv(nref,ncur,para,mlag,b,direct)\n\n # check parameters for mwcs\n if move_win_sec > 0.5*(np.max(twin)-np.min(twin)):\n raise IOError('twin too small for MWCS')\n\n # functions with moving window \n if do_mwcs:\n dvv_mwcs[ii,0],dvv_mwcs[ii,1] = noise_module.mwcs_dvv(pref,pcur,move_win_sec,step_sec,para)\n dvv_mwcs[ii,2],dvv_mwcs[ii,3] = noise_module.mwcs_dvv(nref,ncur,move_win_sec,step_sec,para)\n if do_mwcc:\n dvv_wcc[ii,0],dvv_wcc[ii,1] = noise_module.WCC_dvv(pref,pcur,move_win_sec,step_sec,para)\n dvv_wcc[ii,2],dvv_wcc[ii,3] = noise_module.WCC_dvv(pref,pcur,move_win_sec,step_sec,para)\n\n allfreq = False # average dv/v over the frequency band for wts and wxs\n if do_wts:\n dvv_wts[ii,0],dvv_wts[ii,1] = noise_module.wts_allfreq(pref,pcur,allfreq,para,epsilon,nbtrial,dj,s0,J,wvn)\n dvv_wts[ii,2],dvv_wts[ii,3] = noise_module.wts_allfreq(nref,ncur,allfreq,para,epsilon,nbtrial,dj,s0,J,wvn)\n if do_wxs:\n dvv_wxs[ii,0],dvv_wxs[ii,1] = noise_module.wxs_allfreq(pref,pcur,allfreq,para,dj,s0,J)\n dvv_wxs[ii,2],dvv_wxs[ii,3] = noise_module.wxs_allfreq(nref,ncur,allfreq,para,dj,s0,J)\n\n '''\n allfreq = True # look at all frequency range\n para['freq'] = freq\n\n # functions in wavelet domain to compute dvv for all frequncy\n if do_wts:\n dfreq,dv_wts1,unc1 = noise_module.wts_allfreq(ref[pwin_indx],cur[pwin_indx],allfreq,para,epsilon,nbtrial,dj,s0,J,wvn)\n dfreq,dv_wts2,unc2 = noise_module.wts_allfreq(ref[nwin_indx],cur[nwin_indx],allfreq,para,epsilon,nbtrial,dj,s0,J,wvn)\n if do_wxs:\n dfreq,dv_wxs1,unc1 = noise_module.wxs_allfreq(ref[pwin_indx],cur[pwin_indx],allfreq,para,dj,s0,J)\n dfreq,dv_wxs2,unc2 = noise_module.wxs_allfreq(ref[nwin_indx],cur[nwin_indx],allfreq,para,dj,s0,J)\n '''\n\n ###############################################\n ############ PLOTTING SECTION #################\n ###############################################\n\n # dv/v at each filtered frequency band\n ax3 = plt.subplot(313)\n legend_mark = []\n if do_stretch:\n ax3.plot(timestamp[:igood],dvv_stretch[:,0],'yo-',markersize=6,linewidth=0.5)\n ax3.plot(timestamp[:igood],dvv_stretch[:,2],'co-',markersize=6,linewidth=0.5)\n legend_mark.append('str+')\n legend_mark.append('str-')\n if do_dtw:\n ax3.plot(timestamp[:igood],dvv_dtw[:,0],'yv-',markersize=6,linewidth=0.5)\n ax3.plot(timestamp[:igood],dvv_dtw[:,2],'cv-',markersize=6,linewidth=0.5)\n legend_mark.append('dtw+')\n legend_mark.append('dtw-')\n if do_mwcs:\n ax3.plot(timestamp[:igood],dvv_mwcs[:,0],'ys-',markersize=6,linewidth=0.5)\n ax3.plot(timestamp[:igood],dvv_mwcs[:,2],'cs-',markersize=6,linewidth=0.5)\n legend_mark.append('mwcs+')\n legend_mark.append('mwcs-')\n if do_mwcc:\n ax3.plot(timestamp[:igood],dvv_wcc[:,0],'y*-',markersize=6,linewidth=0.5)\n ax3.plot(timestamp[:igood],dvv_wcc[:,2],'c*-',markersize=6,linewidth=0.5)\n legend_mark.append('wcc+')\n legend_mark.append('wcc-')\n if do_wts:\n ax3.plot(timestamp[:igood],dvv_wts[:,0],'yx-',markersize=6,linewidth=0.5)\n ax3.plot(timestamp[:igood],dvv_wts[:,2],'cx-',markersize=6,linewidth=0.5)\n legend_mark.append('wts+')\n legend_mark.append('wts-')\n if do_wxs:\n ax3.plot(timestamp[:igood],dvv_wxs[:,0],'yp-',markersize=6,linewidth=0.5)\n ax3.plot(timestamp[:igood],dvv_wxs[:,2],'cp-',markersize=6,linewidth=0.5)\n legend_mark.append('wxs+')\n legend_mark.append('wxs-')\n ax3.legend(legend_mark,loc='upper right')\n #ax3.grid('true')\n ax3.set_ylabel('dv/v [%]')\n\n # save figure or just show\n outfname = outdir+'/{0:s}_{1:4.2f}_{2:4.2f}Hz.pdf'.format(sfile.split('/')[-1],freq1,freq2)\n plt.savefig(outfname, format='pdf', dpi=400)\n plt.close()\n",
"import pyasdf \nimport numpy as np \nimport scipy.fftpack\nimport matplotlib.pyplot as plt \n\n'''\nthis script takes a chunk of noise spectrum for a station pair and \ncompare their cross-correlation functions computed using two schemes:\none is averaging the frequency domain and the other is in the time\ndomain\n'''\n\ndef cross_correlation1(fft1,fft2,maxlag,dt,Nfft):\n #------convert all 2D arrays into 1D to speed up--------\n corr = np.zeros(fft1.shape,dtype=np.complex64)\n corr = np.conj(fft1) * fft2\n\n ncorr = np.zeros((fft1.shape[0],Nfft),dtype=np.complex64)\n ncorr[:,:Nfft//2] = corr[:,:]\n ncorr[:,-(Nfft//2)+1:]=np.flip(np.conj(ncorr[:,1:(Nfft//2)]),axis=1)\n ncorr[:,0]=complex(0,0)\n ncorr = np.real(np.fft.ifftshift(scipy.fftpack.ifft(ncorr, Nfft, axis=1)))\n\n tcorr = np.arange(-Nfft//2 + 1, Nfft//2)*dt\n ind = np.where(np.abs(tcorr) <= maxlag)[0]\n ncorr = ncorr[:,ind]\n ncorr = np.mean(ncorr,axis=0)\n return ncorr\n\ndef cross_correlation2(fft1,fft2,maxlag,dt,Nfft):\n #------convert all 2D arrays into 1D to speed up--------\n corr = np.zeros(fft1.shape,dtype=np.complex64)\n corr = np.conj(fft1) * fft2\n\n ncorr = np.zeros(shape=Nfft,dtype=np.complex64)\n ncorr[:Nfft//2] = np.mean(corr,axis=0)\n ncorr[-(Nfft//2)+1:]=np.flip(np.conj(ncorr[1:(Nfft//2)]),axis=0)\n ncorr[0]=complex(0,0)\n ncorr = np.fft.ifftshift(scipy.fftpack.ifft(ncorr, Nfft, axis=0))\n print(ncorr.real,ncorr.imag)\n\n tcorr = np.arange(-Nfft//2 + 1, Nfft//2)*dt\n ind = np.where(np.abs(tcorr) <= maxlag)[0]\n ncorr = ncorr[ind]\n return ncorr\n\n\n#-----common parameters------\niday = '2010_01_10'\nicomp = 'EHZ'\ndt = 0.05\nmaxlag = 800\n\nsfile1 = '/Users/chengxin/Documents/Harvard/Kanto_basin/code/KANTO/FFT/N.AC2H.h5'\nsfile2 = '/Users/chengxin/Documents/Harvard/Kanto_basin/code/KANTO/FFT/N.CHHH.h5'\n\n#-----------reading the data------------\nds1 = pyasdf.ASDFDataSet(sfile1,mode='r')\nds2 = pyasdf.ASDFDataSet(sfile2,mode='r')\n\nspect1 = ds1.auxiliary_data[icomp][iday].data[:]\nspect2 = ds2.auxiliary_data[icomp][iday].data[:]\nstd1 = ds1.auxiliary_data[icomp][iday].parameters['std']\nstd2 = ds2.auxiliary_data[icomp][iday].parameters['std']\nnwin = spect1.shape[0]\nnfft = spect1.shape[1]*2\n\nprint('data dimension for spect1 and spect2 are %d and %d' % (spect1.ndim,spect2.ndim))\n\n#------select the sections-------\nindx1 = np.where(std1<10)[0]\nindx2 = np.where(std2<10)[0]\nbb=np.intersect1d(indx1,indx2)\nprint(spect1[bb,:],spect2[bb,:])\n\ncorr1=cross_correlation1(spect1[bb,:],spect2[bb,:],np.round(maxlag),dt,nfft)\ncorr2=cross_correlation2(spect1[bb,:],spect2[bb,:],np.round(maxlag),dt,nfft)\n\n#---plotting----\nplt.subplot(311)\nplt.plot(corr1)\nplt.subplot(312)\nplt.plot(corr2)\nplt.subplot(313)\nplt.plot(corr2)\nplt.plot(corr1)\nplt.show()"
] |
[
[
"numpy.max",
"numpy.empty",
"numpy.zeros",
"numpy.float",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.close",
"numpy.min",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.abs",
"numpy.flip",
"numpy.corrcoef",
"pandas.plotting.register_matplotlib_converters",
"matplotlib.pyplot.subplot"
],
[
"numpy.zeros",
"numpy.round",
"matplotlib.pyplot.plot",
"numpy.mean",
"numpy.where",
"numpy.conj",
"numpy.arange",
"numpy.intersect1d",
"numpy.abs",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplot"
]
] |
kanesp/keras
|
[
"7f8c62b90274f9c5a261984c098312ff8fab3d66"
] |
[
"keras/layers/preprocessing/index_lookup_test.py"
] |
[
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Keras text vectorization preprocessing layer.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v2 as tf\n\nimport itertools\nimport os\nimport random\nimport string\n\nfrom absl.testing import parameterized\nimport numpy as np\n\nimport keras\nfrom keras import keras_parameterized\nfrom keras import testing_utils\nfrom keras.layers.preprocessing import index_lookup\nfrom keras.layers.preprocessing import index_lookup_v1\nfrom keras.layers.preprocessing import preprocessing_test_utils\nfrom keras.saving import save\nfrom keras.utils.generic_utils import CustomObjectScope\n\n\ndef get_layer_class():\n if tf.executing_eagerly():\n return index_lookup.IndexLookup\n else:\n return index_lookup_v1.IndexLookup\n\n\ndef _get_end_to_end_test_cases():\n test_cases = (\n {\n \"testcase_name\":\n \"test_strings_soft_vocab_cap\",\n # Create an array where 'earth' is the most frequent term, followed by\n # 'wind', then 'and', then 'fire'. This ensures that the vocab\n # accumulator is sorting by frequency.\n \"vocab_data\":\n np.array([[\"fire\"], [\"earth\"], [\"earth\"], [\"earth\"], [\"earth\"],\n [\"wind\"], [\"wind\"], [\"wind\"], [\"and\"], [\"and\"]]),\n \"input_data\":\n np.array([[\"earth\"], [\"wind\"], [\"and\"], [\"fire\"], [\"fire\"],\n [\"and\"], [\"earth\"], [\"michigan\"]]),\n \"kwargs\": {\n \"max_tokens\": None,\n \"num_oov_indices\": 1,\n \"mask_token\": \"\",\n \"oov_token\": \"[OOV]\",\n \"dtype\": tf.string,\n },\n \"expected_output\": [[2], [3], [4], [5], [5], [4], [2], [1]],\n \"input_dtype\":\n tf.string\n },\n {\n \"testcase_name\":\n \"test_inverse_strings_soft_vocab_cap\",\n # Create an array where 'earth' is the most frequent term, followed by\n # 'wind', then 'and', then 'fire'. This ensures that the vocab\n # accumulator is sorting by frequency.\n \"vocab_data\":\n np.array([[\"fire\"], [\"earth\"], [\"earth\"], [\"earth\"], [\"earth\"],\n [\"wind\"], [\"wind\"], [\"wind\"], [\"and\"], [\"and\"]]),\n \"input_data\":\n np.array([[2], [3], [4], [1], [1], [4], [2], [5]]),\n \"kwargs\": {\n \"max_tokens\": None,\n \"num_oov_indices\": 1,\n \"mask_token\": \"\",\n \"oov_token\": \"[OOV]\",\n \"dtype\": tf.string,\n \"invert\": True\n },\n \"expected_output\":\n np.array([[b\"earth\"], [b\"wind\"], [b\"and\"], [b\"[OOV]\"], [b\"[OOV]\"],\n [b\"and\"], [b\"earth\"], [b\"fire\"]]),\n \"input_dtype\":\n tf.int64\n },\n {\n \"testcase_name\":\n \"test_strings_with_special_tokens\",\n # Mask and oov values in the vocab data should be dropped, and mapped\n # to 0 and 1 respectively when calling the layer.\n \"vocab_data\":\n np.array([[\"fire\"], [\"earth\"], [\"earth\"], [\"earth\"], [\"earth\"],\n [\"\"], [\"\"], [\"\"], [\"[OOV]\"], [\"[OOV]\"], [\"[OOV]\"],\n [\"wind\"], [\"wind\"], [\"wind\"], [\"and\"], [\"and\"]]),\n \"input_data\":\n np.array([[\"earth\"], [\"\"], [\"wind\"], [\"[OOV]\"], [\"and\"], [\"\"],\n [\"fire\"], [\"and\"], [\"[OOV]\"], [\"michigan\"]]),\n \"kwargs\": {\n \"max_tokens\": None,\n \"num_oov_indices\": 1,\n \"mask_token\": \"\",\n \"oov_token\": \"[OOV]\",\n \"dtype\": tf.string,\n },\n \"expected_output\": [[2], [0], [3], [1], [4], [0], [5], [4], [1], [1]],\n \"input_dtype\":\n tf.string\n },\n {\n \"testcase_name\":\n \"test_ints_soft_vocab_cap\",\n # Create an array where 1138 is the most frequent term, followed by\n # 1729, then 725, then 42. This ensures that the vocab accumulator\n # is sorting by frequency.\n \"vocab_data\":\n np.array([[42], [1138], [1138], [1138], [1138], [1729], [1729],\n [1729], [725], [725]],\n dtype=np.int64),\n \"input_data\":\n np.array([[1138], [1729], [725], [42], [42], [725], [1138], [4]],\n dtype=np.int64),\n \"kwargs\": {\n \"max_tokens\": None,\n \"num_oov_indices\": 1,\n \"mask_token\": 0,\n \"oov_token\": -1,\n \"dtype\": tf.int64,\n },\n \"expected_output\": [[2], [3], [4], [5], [5], [4], [2], [1]],\n \"input_dtype\":\n tf.int64\n },\n {\n \"testcase_name\":\n \"test_ints_with_special_tokens\",\n # Mask and oov values in the vocab data should be dropped, and mapped\n # to 0 and 1 respectively when calling the layer.\n \"vocab_data\":\n np.array([[42], [1138], [1138], [1138], [1138], [0], [0], [0],\n [-1], [-1], [-1], [1729], [1729], [1729], [725], [725]],\n dtype=np.int64),\n \"input_data\":\n np.array([[1138], [0], [1729], [-1], [725], [0], [42], [725],\n [-1], [4]],\n dtype=np.int64),\n \"kwargs\": {\n \"max_tokens\": None,\n \"num_oov_indices\": 1,\n \"mask_token\": 0,\n \"oov_token\": -1,\n \"dtype\": tf.int64,\n },\n \"expected_output\": [[2], [0], [3], [1], [4], [0], [5], [4], [1], [1]],\n \"input_dtype\":\n tf.int64\n },\n {\n \"testcase_name\":\n \"test_strings_hard_vocab_cap\",\n # Create an array where 'earth' is the most frequent term, followed by\n # 'wind', then 'and', then 'fire'. This ensures that the vocab\n # accumulator is sorting by frequency.\n \"vocab_data\":\n np.array([[\"fire\"], [\"earth\"], [\"earth\"], [\"earth\"], [\"earth\"],\n [\"wind\"], [\"wind\"], [\"wind\"], [\"and\"], [\"and\"]]),\n \"input_data\":\n np.array([[\"earth\"], [\"wind\"], [\"and\"], [\"fire\"], [\"fire\"],\n [\"and\"], [\"earth\"], [\"michigan\"]]),\n \"kwargs\": {\n \"max_tokens\": 5,\n \"num_oov_indices\": 1,\n \"mask_token\": \"\",\n \"oov_token\": \"[OOV]\",\n \"dtype\": tf.string,\n },\n \"expected_output\": [[2], [3], [4], [1], [1], [4], [2], [1]],\n \"input_dtype\":\n tf.string\n },\n {\n \"testcase_name\":\n \"test_inverse_strings_hard_vocab_cap\",\n # Create an array where 'earth' is the most frequent term, followed by\n # 'wind', then 'and', then 'fire'. This ensures that the vocab\n # accumulator is sorting by frequency.\n \"vocab_data\":\n np.array([[\"fire\"], [\"earth\"], [\"earth\"], [\"earth\"], [\"earth\"],\n [\"wind\"], [\"wind\"], [\"wind\"], [\"and\"], [\"and\"]]),\n \"input_data\":\n np.array([[2], [3], [4], [1], [1], [4], [2], [5]]),\n \"kwargs\": {\n \"max_tokens\": 5,\n \"num_oov_indices\": 1,\n \"mask_token\": \"\",\n \"oov_token\": \"[OOV]\",\n \"dtype\": tf.string,\n \"invert\": True\n },\n \"expected_output\":\n np.array([[b\"earth\"], [b\"wind\"], [b\"and\"], [b\"[OOV]\"], [b\"[OOV]\"],\n [b\"and\"], [b\"earth\"], [b\"[OOV]\"]]),\n \"input_dtype\":\n tf.int64\n },\n {\n \"testcase_name\":\n \"test_ints_hard_vocab_cap\",\n # Create an array where 1138 is the most frequent term, followed by\n # 1729, then 725, then 42. This ensures that the vocab accumulator\n # is sorting by frequency.\n \"vocab_data\":\n np.array([[42], [1138], [1138], [1138], [1138], [1729], [1729],\n [1729], [725], [725]],\n dtype=np.int64),\n \"input_data\":\n np.array([[1138], [1729], [725], [42], [42], [725], [1138], [4]],\n dtype=np.int64),\n \"kwargs\": {\n \"max_tokens\": 5,\n \"num_oov_indices\": 1,\n \"mask_token\": 0,\n \"oov_token\": -1,\n \"dtype\": tf.int64,\n },\n \"expected_output\": [[2], [3], [4], [1], [1], [4], [2], [1]],\n \"input_dtype\":\n tf.int64\n },\n {\n \"testcase_name\":\n \"test_ints_tf_idf_output\",\n \"vocab_data\":\n np.array([[42], [1138], [1138], [1138], [1138], [1729], [1729],\n [1729], [725], [725]]),\n \"input_data\":\n np.array([[1138], [1729], [725], [42], [42], [725], [1138], [4]]),\n \"kwargs\": {\n \"max_tokens\": 5,\n \"num_oov_indices\": 1,\n \"mask_token\": 0,\n \"oov_token\": -1,\n \"output_mode\": index_lookup.TFIDF,\n \"dtype\": tf.int64,\n },\n \"expected_output\": [[0, 1.098612, 0, 0, 0], [0, 0, 1.252763, 0, 0],\n [0, 0, 0, 1.466337, 0], [0, 0, 0, 0, 1.7917595],\n [0, 0, 0, 0, 1.7917595], [0, 0, 0, 1.4663371, 0],\n [0, 1.098612, 0, 0, 0], [1.402368, 0, 0, 0, 0]],\n \"input_dtype\":\n tf.int64\n },\n {\n \"testcase_name\":\n \"test_strings_tf_idf_output\",\n \"vocab_data\":\n np.array([[\"fire\"], [\"earth\"], [\"earth\"], [\"earth\"], [\"earth\"],\n [\"wind\"], [\"wind\"], [\"wind\"], [\"and\"], [\"and\"]]),\n \"input_data\":\n np.array([[\"earth\"], [\"wind\"], [\"and\"], [\"fire\"], [\"fire\"],\n [\"and\"], [\"earth\"], [\"michigan\"]]),\n \"kwargs\": {\n \"max_tokens\": 5,\n \"num_oov_indices\": 1,\n \"mask_token\": \"\",\n \"oov_token\": \"[OOV]\",\n \"output_mode\": index_lookup.TFIDF,\n \"dtype\": tf.string,\n },\n \"expected_output\": [[0, 1.098612, 0, 0, 0], [0, 0, 1.252763, 0, 0],\n [0, 0, 0, 1.466337, 0], [0, 0, 0, 0, 1.7917595],\n [0, 0, 0, 0, 1.7917595], [0, 0, 0, 1.4663371, 0],\n [0, 1.098612, 0, 0, 0], [1.402368, 0, 0, 0, 0]],\n \"input_dtype\":\n tf.string\n },\n )\n\n crossed_test_cases = []\n # Cross above test cases with use_dataset in (True, False)\n for use_dataset in (True, False):\n for case in test_cases:\n case = case.copy()\n if use_dataset:\n case[\"testcase_name\"] = case[\"testcase_name\"] + \"_with_dataset\"\n case[\"use_dataset\"] = use_dataset\n crossed_test_cases.append(case)\n\n return crossed_test_cases\n\n\n@keras_parameterized.run_all_keras_modes\nclass IndexLookupLayerTest(keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest):\n\n @parameterized.named_parameters(*_get_end_to_end_test_cases())\n def test_layer_end_to_end_with_adapt(self, vocab_data, input_data, kwargs,\n use_dataset, expected_output,\n input_dtype):\n cls = get_layer_class()\n if \"invert\" in kwargs and kwargs[\"invert\"]:\n expected_output_dtype = kwargs[\"dtype\"]\n elif \"output_mode\" in kwargs and kwargs[\"output_mode\"] != index_lookup.INT:\n expected_output_dtype = tf.float32\n else:\n expected_output_dtype = tf.int64\n\n input_shape = input_data.shape\n\n if use_dataset:\n # Keras APIs expect batched datasets.\n # TODO(rachelim): `model.predict` predicts the result on each\n # dataset batch separately, then tries to concatenate the results\n # together. When the results have different shapes on the non-concat\n # axis (which can happen in the output_mode = INT case for\n # IndexLookup), the concatenation fails. In real use cases, this may\n # not be an issue because users are likely to pipe the preprocessing layer\n # into other keras layers instead of predicting it directly. A workaround\n # for these unit tests is to have the dataset only contain one batch, so\n # no concatenation needs to happen with the result. For consistency with\n # numpy input, we should make `predict` join differently shaped results\n # together sensibly, with 0 padding.\n input_data = tf.data.Dataset.from_tensor_slices(input_data).batch(\n input_shape[0])\n vocab_data = tf.data.Dataset.from_tensor_slices(vocab_data).batch(\n input_shape[0])\n\n with CustomObjectScope({\"IndexLookup\": cls}):\n output_data = testing_utils.layer_test(\n cls,\n kwargs=kwargs,\n input_shape=input_shape,\n input_data=input_data,\n input_dtype=input_dtype,\n expected_output_dtype=expected_output_dtype,\n validate_training=False,\n adapt_data=vocab_data)\n if \"invert\" in kwargs and kwargs[\"invert\"]:\n self.assertAllEqual(expected_output, output_data)\n else:\n self.assertAllClose(expected_output, output_data)\n\n\n@keras_parameterized.run_all_keras_modes\nclass CategoricalEncodingInputTest(\n keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest):\n\n def test_sparse_string_input(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = tf.SparseTensor(\n indices=[[0, 0], [1, 2]],\n values=[\"fire\", \"michigan\"],\n dense_shape=[3, 4])\n\n expected_indices = [[0, 0], [1, 2]]\n expected_values = [5, 1]\n expected_dense_shape = [3, 4]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string, sparse=True)\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_data = model.predict(input_array, steps=1)\n self.assertAllEqual(expected_indices, output_data.indices)\n self.assertAllEqual(expected_values, output_data.values)\n self.assertAllEqual(expected_dense_shape, output_data.dense_shape)\n\n def test_sparse_int_input(self):\n vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)\n input_array = tf.SparseTensor(\n indices=[[0, 0], [1, 2]],\n values=np.array([13, 32], dtype=np.int64),\n dense_shape=[3, 4])\n\n expected_indices = [[0, 0], [1, 2]]\n expected_values = [5, 1]\n expected_dense_shape = [3, 4]\n\n input_data = keras.Input(shape=(None,), dtype=tf.int64, sparse=True)\n layer = get_layer_class()(\n max_tokens=None,\n dtype=tf.int64,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_data = model.predict(input_array, steps=1)\n self.assertAllEqual(expected_indices, output_data.indices)\n self.assertAllEqual(expected_values, output_data.values)\n self.assertAllEqual(expected_dense_shape, output_data.dense_shape)\n\n def test_ragged_string_input(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = tf.ragged.constant(\n [[\"earth\", \"wind\", \"fire\"], [\"fire\", \"and\", \"earth\", \"michigan\"]])\n expected_output = [[2, 3, 5], [5, 4, 2, 1]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string, ragged=True)\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_ragged_int_input(self):\n vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)\n input_array = tf.ragged.constant([[10, 11, 13], [13, 12, 10, 42]],\n dtype=np.int64)\n expected_output = [[2, 3, 5], [5, 4, 2, 1]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.int64, ragged=True)\n layer = get_layer_class()(\n max_tokens=None,\n dtype=tf.int64,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_int32_input_with_int64_keys(self):\n vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)\n input_array = tf.ragged.constant([[10, 11, 13], [13, 12, 10, 42]],\n dtype=np.int32)\n expected_output = [[2, 3, 5], [5, 4, 2, 1]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.int32, ragged=True)\n layer = get_layer_class()(\n max_tokens=None,\n dtype=tf.int64,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n\n@keras_parameterized.run_all_keras_modes\nclass CategoricalEncodingMultiOOVTest(\n keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest):\n\n def test_sparse_string_input_multi_bucket(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = tf.SparseTensor(\n indices=[[0, 0], [1, 2]],\n values=[\"fire\", \"ohio\"],\n dense_shape=[3, 4])\n\n expected_indices = [[0, 0], [1, 2]]\n expected_values = [6, 2]\n expected_dense_shape = [3, 4]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string, sparse=True)\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=2,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_data = model.predict(input_array, steps=1)\n self.assertAllEqual(expected_indices, output_data.indices)\n self.assertAllEqual(expected_values, output_data.values)\n self.assertAllEqual(expected_dense_shape, output_data.dense_shape)\n\n def test_sparse_int_input_multi_bucket(self):\n vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)\n input_array = tf.SparseTensor(\n indices=[[0, 0], [1, 2]],\n values=np.array([13, 133], dtype=np.int64),\n dense_shape=[3, 4])\n\n expected_indices = [[0, 0], [1, 2]]\n expected_values = [6, 2]\n expected_dense_shape = [3, 4]\n\n input_data = keras.Input(shape=(None,), dtype=tf.int64, sparse=True)\n layer = get_layer_class()(\n max_tokens=None,\n dtype=tf.int64,\n num_oov_indices=2,\n mask_token=0,\n oov_token=-1)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_data = model.predict(input_array, steps=1)\n self.assertAllEqual(expected_indices, output_data.indices)\n self.assertAllEqual(expected_values, output_data.values)\n self.assertAllEqual(expected_dense_shape, output_data.dense_shape)\n\n def test_ragged_string_input_multi_bucket(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = tf.ragged.constant(\n [[\"earth\", \"wind\", \"fire\"], [\"fire\", \"and\", \"earth\", \"ohio\"]])\n expected_output = [[3, 4, 6], [6, 5, 3, 2]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string, ragged=True)\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=2,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_ragged_int_input_multi_bucket(self):\n vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)\n input_array = tf.ragged.constant([[10, 11, 13], [13, 12, 10, 133]],\n dtype=np.int64)\n expected_output = [[3, 4, 6], [6, 5, 3, 2]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.int64, ragged=True)\n layer = get_layer_class()(\n max_tokens=None,\n dtype=tf.int64,\n num_oov_indices=2,\n mask_token=0,\n oov_token=-1)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n\n@keras_parameterized.run_all_keras_modes\nclass CategoricalEncodingAdaptTest(\n keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest):\n\n def test_sparse_adapt(self):\n vocab_data = tf.SparseTensor(\n indices=[[0, 0], [0, 1], [1, 2]],\n values=[\"michigan\", \"fire\", \"michigan\"],\n dense_shape=[3, 4])\n vocab_dataset = tf.data.Dataset.from_tensors(vocab_data)\n\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n layer.adapt(vocab_dataset)\n expected_vocabulary = [\"\", \"[OOV]\", \"michigan\", \"fire\"]\n self.assertAllEqual(expected_vocabulary, layer.get_vocabulary())\n\n def test_ragged_adapt(self):\n vocab_data = tf.ragged.constant([[\"michigan\"],\n [\"fire\", \"michigan\"]])\n vocab_dataset = tf.data.Dataset.from_tensors(vocab_data)\n\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n layer.adapt(vocab_dataset)\n expected_vocabulary = [\"\", \"[OOV]\", \"michigan\", \"fire\"]\n self.assertAllEqual(expected_vocabulary, layer.get_vocabulary())\n\n def test_sparse_int_input(self):\n vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)\n input_array = tf.SparseTensor(\n indices=[[0, 0], [1, 2]],\n values=np.array([13, 32], dtype=np.int64),\n dense_shape=[3, 4])\n\n expected_indices = [[0, 0], [1, 2]]\n expected_values = [5, 1]\n expected_dense_shape = [3, 4]\n\n input_data = keras.Input(shape=(None,), dtype=tf.int64, sparse=True)\n layer = get_layer_class()(\n max_tokens=None,\n dtype=tf.int64,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_data = model.predict(input_array, steps=1)\n self.assertAllEqual(expected_indices, output_data.indices)\n self.assertAllEqual(expected_values, output_data.values)\n self.assertAllEqual(expected_dense_shape, output_data.dense_shape)\n\n def test_ragged_string_input(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = tf.ragged.constant(\n [[\"earth\", \"wind\", \"fire\"], [\"fire\", \"and\", \"earth\", \"michigan\"]])\n expected_output = [[2, 3, 5], [5, 4, 2, 1]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string, ragged=True)\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_ragged_int_input(self):\n vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)\n input_array = tf.ragged.constant([[10, 11, 13], [13, 12, 10, 42]],\n dtype=np.int64)\n expected_output = [[2, 3, 5], [5, 4, 2, 1]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.int64, ragged=True)\n layer = get_layer_class()(\n max_tokens=None,\n dtype=tf.int64,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_single_string_generator_dataset(self):\n\n def word_gen():\n for _ in itertools.count(1):\n yield \"\".join(random.choice(string.ascii_letters) for i in range(2))\n\n ds = tf.data.Dataset.from_generator(word_gen, tf.string,\n tf.TensorShape([]))\n batched_ds = ds.take(2)\n input_t = keras.Input(shape=(), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=10,\n num_oov_indices=0,\n mask_token=None,\n oov_token=None,\n dtype=tf.string)\n _ = layer(input_t)\n layer.adapt(batched_ds)\n\n\n@keras_parameterized.run_all_keras_modes\nclass IndexLookupOutputTest(keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest):\n\n def _write_to_temp_file(self, file_name, vocab_list):\n vocab_path = os.path.join(self.get_temp_dir(), file_name + \".txt\")\n with tf.io.gfile.GFile(vocab_path, \"w\") as writer:\n for vocab in vocab_list:\n writer.write(vocab + \"\\n\")\n writer.flush()\n writer.close()\n return vocab_path\n\n def test_int_output(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"fire\", \"and\", \"earth\", \"michigan\"]])\n expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_int_output_shape(self):\n input_data = keras.Input(batch_size=16, shape=(4,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=2,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n int_data = layer(input_data)\n self.assertAllEqual(int_data.shape.as_list(), [16, 4])\n\n def test_int_output_no_reserved_zero(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"fire\", \"and\", \"earth\", \"michigan\"]])\n expected_output = [[1, 2, 3, 4], [4, 3, 1, 0]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=None,\n oov_token=\"[OOV]\",\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_int_output_no_oov(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"ohio\"],\n [\"fire\", \"and\", \"earth\", \"michigan\"]])\n expected_output = [[1, 2, 3, -1], [4, 3, 1, -1]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=0,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_int_output_explicit_vocab(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"fire\", \"and\", \"earth\", \"michigan\"]])\n expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n vocabulary=vocab_data,\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_binary_output_hard_maximum(self):\n \"\"\"Check binary output when pad_to_max_tokens=True.\"\"\"\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"fire\", \"\"],\n [\"fire\", \"fire\", \"and\", \"earth\", \"michigan\"]])\n expected_output = [\n [0, 1, 1, 1, 1, 0],\n [1, 1, 0, 1, 1, 0],\n ]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=6,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n output_mode=index_lookup.BINARY,\n pad_to_max_tokens=True,\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n binary_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=binary_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_binary_output_no_oov(self):\n \"\"\"Check binary output when pad_to_max_tokens=True.\"\"\"\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"fire\", \"ohio\"],\n [\"fire\", \"fire\", \"and\", \"earth\", \"michigan\"]])\n expected_output = [\n [1, 1, 1, 1, 0],\n [1, 0, 1, 1, 0],\n ]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=5,\n num_oov_indices=0,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n output_mode=index_lookup.BINARY,\n pad_to_max_tokens=True,\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n binary_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=binary_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_binary_output_hard_maximum_multiple_adapts(self):\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"earth\"],\n [\"ohio\", \"and\", \"earth\", \"michigan\"]])\n adapt_data = [\"earth\", \"earth\", \"earth\", \"earth\", \"wind\", \"wind\", \"wind\"]\n first_expected_output = [\n [1, 1, 1, 0, 0],\n [1, 1, 0, 0, 0],\n ]\n second_adapt_data = [\n \"earth\", \"earth\", \"earth\", \"earth\", \"wind\", \"wind\", \"wind\", \"and\",\n \"and\", \"fire\"\n ]\n second_expected_output = [\n [0, 1, 1, 1, 0],\n [1, 1, 0, 1, 0],\n ]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=5,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n output_mode=index_lookup.BINARY,\n pad_to_max_tokens=True,\n dtype=tf.string)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n\n # Test the first adapt\n layer.adapt(adapt_data)\n first_output = model.predict(input_array)\n # Test the second adapt\n layer.adapt(second_adapt_data)\n second_output = model.predict(input_array)\n self.assertAllEqual(first_expected_output, first_output)\n self.assertAllEqual(second_expected_output, second_output)\n\n def test_binary_output_soft_maximum(self):\n \"\"\"Check binary output when pad_to_max_tokens=False.\"\"\"\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"fire\", \"\"],\n [\"fire\", \"and\", \"earth\", \"michigan\", \"\"]])\n expected_output = [\n [0, 1, 1, 1, 1],\n [1, 1, 0, 1, 1],\n ]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n output_mode=index_lookup.BINARY,\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n binary_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=binary_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_binary_output_shape(self):\n input_data = keras.Input(batch_size=16, shape=(4,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=2,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n output_mode=index_lookup.BINARY,\n dtype=tf.string)\n binary_data = layer(input_data)\n self.assertAllEqual(binary_data.shape.as_list(), [16, 2])\n\n def test_count_output_hard_maxiumum(self):\n \"\"\"Check count output when pad_to_max_tokens=True.\"\"\"\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"wind\", \"\"],\n [\"fire\", \"fire\", \"fire\", \"michigan\", \"\"]])\n expected_output = [\n [0, 1, 2, 1, 0, 0],\n [1, 0, 0, 0, 3, 0],\n ]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=6,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n output_mode=index_lookup.COUNT,\n pad_to_max_tokens=True,\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n count_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=count_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_count_output_soft_maximum(self):\n \"\"\"Check count output when pad_to_max_tokens=False.\"\"\"\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"wind\", \"\"],\n [\"fire\", \"fire\", \"fire\", \"michigan\", \"\"]])\n expected_output = [\n [0, 1, 2, 1, 0],\n [1, 0, 0, 0, 3],\n ]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n output_mode=index_lookup.COUNT,\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n count_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=count_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_count_output_shape(self):\n input_data = keras.Input(batch_size=16, shape=(4,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=2,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n output_mode=index_lookup.COUNT,\n dtype=tf.string)\n count_data = layer(input_data)\n self.assertAllEqual(count_data.shape.as_list(), [16, 2])\n\n def test_ifidf_output_hard_maximum(self):\n \"\"\"Check tf-idf output when pad_to_max_tokens=True.\"\"\"\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n # OOV idf weight (bucket 0) should 0.5, the average of passed weights.\n idf_weights = [.4, .25, .75, .6]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"earth\", \"\"],\n [\"ohio\", \"fire\", \"earth\", \"michigan\", \"\"]])\n expected_output = [\n [0.00, 0.80, 0.25, 0.75, 0.00, 0.00],\n [1.00, 0.40, 0.00, 0.00, 0.60, 0.00],\n ]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=6,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n output_mode=index_lookup.TFIDF,\n pad_to_max_tokens=True,\n dtype=tf.string)\n layer.set_vocabulary(vocab_data, idf_weights=idf_weights)\n layer_output = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=layer_output)\n output_dataset = model.predict(input_array)\n self.assertAllClose(expected_output, output_dataset)\n\n def test_ifidf_output_soft_maximum(self):\n \"\"\"Check tf-idf output when pad_to_max_tokens=False.\"\"\"\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n # OOV idf weight (bucket 0) should 0.5, the average of passed weights.\n idf_weights = [.4, .25, .75, .6]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"earth\", \"\"],\n [\"ohio\", \"fire\", \"earth\", \"michigan\", \"\"]])\n expected_output = [\n [0.00, 0.80, 0.25, 0.75, 0.00],\n [1.00, 0.40, 0.00, 0.00, 0.60],\n ]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n output_mode=index_lookup.TFIDF,\n dtype=tf.string)\n layer.set_vocabulary(vocab_data, idf_weights=idf_weights)\n layer_output = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=layer_output)\n output_dataset = model.predict(input_array)\n self.assertAllClose(expected_output, output_dataset)\n\n def test_ifidf_output_shape(self):\n input_data = keras.Input(batch_size=16, shape=(4,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=2,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n output_mode=index_lookup.COUNT,\n dtype=tf.string)\n layer_output = layer(input_data)\n self.assertAllEqual(layer_output.shape.as_list(), [16, 2])\n\n def test_int_output_file_vocab(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"fire\", \"\", \"earth\", \"michigan\"]])\n expected_output = [[2, 3, 4, 5], [5, 0, 2, 1]]\n\n vocab_file = self._write_to_temp_file(\"temp\", vocab_data)\n vocabulary_initializer = tf.lookup.TextFileInitializer(\n filename=vocab_file,\n key_dtype=tf.string,\n key_index=tf.lookup.TextFileIndex.WHOLE_LINE,\n value_dtype=tf.int64,\n value_index=tf.lookup.TextFileIndex.LINE_NUMBER,\n value_index_offset=2)\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n vocabulary=vocabulary_initializer,\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_int_output_int_file_vocab(self):\n vocab_data = [\"10\", \"20\", \"30\", \"40\"]\n input_array = np.array([[10, 20, 30, 40], [40, 0, 10, 42]])\n expected_output = [[2, 3, 4, 5], [5, 0, 2, 1]]\n\n vocab_file = self._write_to_temp_file(\"temp\", vocab_data)\n vocabulary_initializer = tf.lookup.TextFileInitializer(\n filename=vocab_file,\n key_dtype=tf.int64,\n key_index=tf.lookup.TextFileIndex.WHOLE_LINE,\n value_dtype=tf.int64,\n value_index=tf.lookup.TextFileIndex.LINE_NUMBER,\n value_index_offset=2)\n\n input_data = keras.Input(shape=(None,), dtype=tf.int64)\n layer = get_layer_class()(\n vocabulary=vocabulary_initializer,\n max_tokens=None,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1,\n dtype=tf.int64)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n\n@keras_parameterized.run_all_keras_modes\nclass IndexLookupVocabularyTest(keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest\n ):\n\n def test_int_output_explicit_vocab(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"fire\", \"and\", \"earth\", \"michigan\"]])\n expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n vocabulary=vocab_data,\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_int_output_explicit_vocab_with_special_tokens(self):\n vocab_data = [\"\", \"[OOV]\", \"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"fire\", \"and\", \"earth\", \"michigan\"]])\n expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n vocabulary=vocab_data,\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_vocab_with_max_cap(self):\n vocab_data = [\"\", \"[OOV]\", \"wind\", \"and\", \"fire\"]\n layer = get_layer_class()(\n max_tokens=5,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n returned_vocab = layer.get_vocabulary()\n self.assertAllEqual(vocab_data, returned_vocab)\n self.assertAllEqual(layer.vocab_size(), 5)\n\n def test_int_vocab_with_max_cap(self):\n vocab_data = [0, -1, 42, 1276, 1138]\n layer = get_layer_class()(\n max_tokens=5,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1,\n dtype=tf.int64)\n layer.set_vocabulary(vocab_data)\n returned_vocab = layer.get_vocabulary()\n self.assertAllEqual(vocab_data, returned_vocab)\n self.assertAllEqual(layer.vocab_size(), 5)\n\n def test_vocab_with_multiple_oov_indices(self):\n vocab_data = [\"\", \"[OOV]\", \"[OOV]\", \"[OOV]\", \"wind\"]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=3,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n returned_vocab = layer.get_vocabulary()\n self.assertAllEqual(vocab_data, returned_vocab)\n\n def test_int_vocab_with_multiple_oov_indices(self):\n vocab_data = [0, -1, -1, -1, 42]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=3,\n mask_token=0,\n oov_token=-1,\n dtype=tf.int64)\n layer.set_vocabulary(vocab_data)\n returned_vocab = layer.get_vocabulary()\n self.assertAllEqual(vocab_data, returned_vocab)\n\n def test_non_unique_vocab_fails(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\", \"fire\"]\n with self.assertRaisesRegex(ValueError, \".*repeated term.*fire.*\"):\n _ = get_layer_class()(\n vocabulary=vocab_data,\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n\n def test_vocab_with_oov_and_wrong_mask_fails(self):\n vocab_data = [\"custom_mask\", \"[OOV]\", \"earth\", \"wind\", \"and\", \"fire\"]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n with self.assertRaisesRegex(ValueError, \".*does not have the mask token.*\"):\n layer.set_vocabulary(vocab_data)\n\n def test_vocab_with_oov_and_no_mask_fails(self):\n vocab_data = [\"[OOV]\", \"earth\", \"wind\", \"and\", \"fire\"]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n with self.assertRaisesRegex(ValueError, \".*Reserved OOV.*\"):\n layer.set_vocabulary(vocab_data)\n\n def test_vocab_with_mask_but_no_oov_fails(self):\n vocab_data = [\"\", \"earth\", \"wind\", \"and\", \"fire\"]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n with self.assertRaisesRegex(ValueError, \".*does not have the OOV token.*\"):\n layer.set_vocabulary(vocab_data)\n\n def test_vocab_with_repeated_element_fails(self):\n vocab_data = [\"earth\", \"earth\", \"wind\", \"and\", \"fire\"]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n with self.assertRaisesRegex(ValueError, \".*repeated term.*earth.*\"):\n layer.set_vocabulary(vocab_data)\n\n def test_vocab_with_reserved_oov_element_fails(self):\n vocab_data = [\"earth\", \"test\", \"[OOV]\", \"wind\", \"and\", \"fire\"]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n with self.assertRaisesRegex(ValueError, \".*Reserved OOV.*\"):\n layer.set_vocabulary(vocab_data)\n\n def test_vocab_with_reserved_mask_element_fails(self):\n vocab_data = [\"earth\", \"mask_token\", \"wind\", \"and\", \"fire\"]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"mask_token\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n with self.assertRaisesRegex(ValueError, \".*Reserved mask.*\"):\n layer.set_vocabulary(vocab_data)\n\n def test_vocab_set_after_call_pad_to_max_false_fails(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n pad_to_max_tokens=False,\n output_mode=index_lookup.BINARY,\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n # Calling the layer should lock the vocabulary.\n _ = layer([[\"earth\"]])\n with self.assertRaisesRegex(RuntimeError, \"vocabulary cannot be changed\"):\n layer.set_vocabulary(vocab_data)\n\n def test_vocab_with_idf_weights_non_tfidf_output_fails(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n weight_data = [1, 1, 1, 1, 1]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n output_mode=index_lookup.BINARY,\n dtype=tf.string)\n with self.assertRaisesRegex(ValueError,\n \"`idf_weights` should only be set if\"):\n layer.set_vocabulary(vocab_data, idf_weights=weight_data)\n\n def test_vocab_with_idf_weights_length_mismatch_fails(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n weight_data = [1, 1, 1, 1, 1] # too long\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n output_mode=index_lookup.TFIDF,\n dtype=tf.string)\n with self.assertRaisesRegex(\n ValueError, \"`idf_weights` must be the same length as vocab\"):\n layer.set_vocabulary(vocab_data, idf_weights=weight_data)\n\n def test_vocab_without_idf_weights_tfidf_output_fails(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n output_mode=index_lookup.TFIDF,\n dtype=tf.string)\n with self.assertRaisesRegex(\n ValueError, \"`idf_weights` must be set if output_mode is TFIDF\"):\n layer.set_vocabulary(vocab_data)\n\n def test_non_unique_int_vocab_fails(self):\n vocab_data = [12, 13, 14, 15, 15]\n with self.assertRaisesRegex(ValueError, \"repeated term.*15\"):\n _ = get_layer_class()(\n vocabulary=vocab_data,\n max_tokens=None,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1,\n dtype=tf.int64)\n\n def test_int_vocab_with_oov_and_wrong_mask_fails(self):\n vocab_data = [1234, -1, 11, 21, 13, 14]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1,\n dtype=tf.int64)\n with self.assertRaisesRegex(ValueError, \"does not have the mask token `0`\"):\n layer.set_vocabulary(vocab_data)\n\n def test_int_vocab_with_oov_and_no_mask_fails(self):\n vocab_data = [-1, 11, 12, 13, 14]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1,\n dtype=tf.int64)\n with self.assertRaisesRegex(ValueError, \"Reserved OOV\"):\n layer.set_vocabulary(vocab_data)\n\n def test_int_vocab_with_mask_but_no_oov_fails(self):\n vocab_data = [0, 11, 12, 13, 14]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1,\n dtype=tf.int64)\n with self.assertRaisesRegex(ValueError, \"does not have the OOV token `-1`\"):\n layer.set_vocabulary(vocab_data)\n\n def test_int_vocab_with_repeated_element_fails(self):\n vocab_data = [11, 11, 34, 23, 124]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1,\n dtype=tf.int64)\n with self.assertRaisesRegex(ValueError, \"repeated term.*11\"):\n layer.set_vocabulary(vocab_data)\n\n def test_int_vocab_with_reserved_oov_element_fails(self):\n vocab_data = [14, 38, -1, 34, 3, 84]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1,\n dtype=tf.int64)\n with self.assertRaisesRegex(ValueError, \"Reserved OOV\"):\n layer.set_vocabulary(vocab_data)\n\n def test_int_vocab_with_reserved_mask_element_fails(self):\n vocab_data = [125, 0, 3, 4, 94]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1,\n dtype=tf.int64)\n with self.assertRaisesRegex(ValueError, \"Reserved mask\"):\n layer.set_vocabulary(vocab_data)\n\n\n@keras_parameterized.run_all_keras_modes\nclass IndexLookupInverseVocabularyTest(\n keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest):\n\n def test_int_output_explicit_vocab(self):\n vocab_data = [\"\", \"[OOV]\", \"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[2, 3, 4, 5], [5, 4, 2, 1]])\n expected_output = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"fire\", \"and\", \"earth\", \"[OOV]\"]])\n\n input_data = keras.Input(shape=(None,), dtype=tf.int64)\n layer = get_layer_class()(\n vocabulary=vocab_data,\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string,\n invert=True)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_vocab_with_max_cap(self):\n vocab_data = [\"\", \"[OOV]\", \"wind\", \"and\", \"fire\"]\n layer = get_layer_class()(\n max_tokens=5,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string,\n invert=True)\n layer.set_vocabulary(vocab_data)\n returned_vocab = layer.get_vocabulary()\n self.assertAllEqual(vocab_data, returned_vocab)\n\n def test_int_vocab_with_max_cap(self):\n vocab_data = [0, -1, 42, 1276, 1138]\n layer = get_layer_class()(\n max_tokens=5,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1,\n dtype=tf.int64,\n invert=True)\n layer.set_vocabulary(vocab_data)\n returned_vocab = layer.get_vocabulary()\n self.assertAllEqual(vocab_data, returned_vocab)\n\n def test_non_unique_vocab_fails(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\", \"fire\"]\n with self.assertRaisesRegex(ValueError, \".*repeated term.*fire.*\"):\n _ = get_layer_class()(\n vocabulary=vocab_data,\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string,\n invert=True)\n\n def test_non_int_output_fails(self):\n with self.assertRaisesRegex(ValueError, \"`output_mode` must be int\"):\n _ = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string,\n output_mode=index_lookup.COUNT,\n invert=True)\n\n def test_vocab_with_repeated_element_fails(self):\n vocab_data = [\"earth\", \"earth\", \"wind\", \"and\", \"fire\"]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string,\n invert=True)\n with self.assertRaisesRegex(ValueError, \".*repeated term.*earth.*\"):\n layer.set_vocabulary(vocab_data)\n\n def test_vocab_with_reserved_mask_element_fails(self):\n vocab_data = [\"earth\", \"mask_token\", \"wind\", \"and\", \"fire\"]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"mask_token\",\n oov_token=\"[OOV]\",\n dtype=tf.string,\n invert=True)\n with self.assertRaisesRegex(ValueError, \".*Reserved mask.*\"):\n layer.set_vocabulary(vocab_data)\n\n def test_non_unique_int_vocab_fails(self):\n vocab_data = [12, 13, 14, 15, 15]\n with self.assertRaisesRegex(ValueError, \".*repeated term.*15.*\"):\n _ = get_layer_class()(\n vocabulary=vocab_data,\n max_tokens=None,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1,\n dtype=tf.int64,\n invert=True)\n\n def test_int_vocab_with_repeated_element_fails(self):\n vocab_data = [11, 11, 34, 23, 124]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1,\n dtype=tf.int64,\n invert=True)\n with self.assertRaisesRegex(ValueError, \".*repeated term.*11.*\"):\n layer.set_vocabulary(vocab_data)\n\n\n@keras_parameterized.run_all_keras_modes(always_skip_eager=True)\nclass IndexLookupSaveableTest(keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest):\n\n def test_ops_are_not_added_with_multiple_get_set_weights(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=10,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n weights = model.get_weights()\n model.set_weights(weights)\n keras.backend.get_session().graph.finalize()\n weights = model.get_weights()\n model.set_weights(weights)\n\n def test_layer_saving_with_h5(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=10,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n path = os.path.join(self.get_temp_dir(), \"model\")\n with self.assertRaisesRegex(NotImplementedError,\n \"Save or restore weights that is not.*\"):\n save.save_model(model, path, save_format=\"h5\")\n\n\n@keras_parameterized.run_all_keras_modes\nclass IndexLookupErrorTest(keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest):\n\n def test_too_long_vocab_fails_in_single_setting(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n\n layer = get_layer_class()(\n max_tokens=4,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n with self.assertRaisesRegex(ValueError,\n \"vocabulary larger than the maximum vocab.*\"):\n layer.set_vocabulary(vocab_data)\n\n def test_zero_max_tokens_fails(self):\n with self.assertRaisesRegex(ValueError, \".*max_tokens.*\"):\n _ = get_layer_class()(\n max_tokens=0,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n\n\n@keras_parameterized.run_all_keras_modes\nclass IndexLookupSavingTest(keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest):\n\n def test_vocabulary_persistence_across_saving(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"fire\", \"and\", \"earth\", \"michigan\"]])\n expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]\n\n # Build and validate a golden model.\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(output_dataset, expected_output)\n\n # Save the model to disk.\n output_path = os.path.join(self.get_temp_dir(), \"tf_keras_saved_model\")\n model.save(output_path, save_format=\"tf\")\n\n # Delete the session and graph to ensure that the loaded model is generated\n # from scratch.\n # TODO(b/149526183): Can't clear session when TF2 is disabled.\n if tf.__internal__.tf2.enabled():\n keras.backend.clear_session()\n\n loaded_model = keras.models.load_model(\n output_path, custom_objects={\"IndexLookup\": get_layer_class()})\n\n # Ensure that the loaded model is unique (so that the save/load is real)\n self.assertIsNot(model, loaded_model)\n\n # Validate correctness of the new model.\n new_output_dataset = loaded_model.predict(input_array)\n self.assertAllEqual(new_output_dataset, expected_output)\n\n\n@keras_parameterized.run_all_keras_modes\nclass IndexLookupStringCombinerTest(\n keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest):\n\n def compare_text_accumulators(self, a, b, msg=None):\n if a is None or b is None:\n self.assertAllEqual(a, b, msg=msg)\n\n self.assertAllEqual(a.count_dict, b.count_dict, msg=msg)\n\n compare_accumulators = compare_text_accumulators\n\n def update_accumulator(self, accumulator, data):\n accumulator.count_dict.update(dict(zip(data[\"vocab\"], data[\"counts\"])))\n\n return accumulator\n\n def test_combiner_api_compatibility_int_mode(self):\n data = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"earth\", \"wind\", \"and\", \"michigan\"]])\n combiner = index_lookup._IndexLookupCombiner()\n expected_accumulator_output = {\n \"vocab\": np.array([\"and\", \"earth\", \"wind\", \"fire\", \"michigan\"]),\n \"counts\": np.array([2, 2, 2, 1, 1]),\n }\n expected_extract_output = {\n \"vocab\": np.array([\"wind\", \"earth\", \"and\", \"michigan\", \"fire\"]),\n \"idf_weights\": None,\n }\n expected_accumulator = combiner._create_accumulator()\n expected_accumulator = self.update_accumulator(expected_accumulator,\n expected_accumulator_output)\n self.validate_accumulator_serialize_and_deserialize(combiner, data,\n expected_accumulator)\n self.validate_accumulator_uniqueness(combiner, data)\n self.validate_accumulator_extract(combiner, data, expected_extract_output)\n\n # TODO(askerryryan): Add tests confirming equivalence to behavior of\n # existing tf.keras.preprocessing.text.Tokenizer.\n @parameterized.named_parameters(\n {\n \"testcase_name\":\n \"top_k_smaller_than_full_vocab\",\n \"data\":\n np.array([[\"earth\", \"wind\"], [\"fire\", \"wind\"], [\"and\"],\n [\"fire\", \"wind\"]]),\n \"vocab_size\":\n 3,\n \"expected_accumulator_output\": {\n \"vocab\": np.array([\"wind\", \"fire\", \"earth\", \"and\"]),\n \"counts\": np.array([3, 2, 1, 1]),\n },\n \"expected_extract_output\": {\n \"vocab\": np.array([\"wind\", \"fire\", \"earth\"]),\n \"idf_weights\": None,\n },\n },\n {\n \"testcase_name\":\n \"top_k_larger_than_full_vocab\",\n \"data\":\n np.array([[\"earth\", \"wind\"], [\"fire\", \"wind\"], [\"and\"],\n [\"fire\", \"wind\"]]),\n \"vocab_size\":\n 10,\n \"expected_accumulator_output\": {\n \"vocab\": np.array([\"wind\", \"fire\", \"earth\", \"and\"]),\n \"counts\": np.array([3, 2, 1, 1]),\n },\n \"expected_extract_output\": {\n \"vocab\": np.array([\"wind\", \"fire\", \"earth\", \"and\"]),\n \"idf_weights\": None,\n },\n },\n {\n \"testcase_name\":\n \"no_top_k\",\n \"data\":\n np.array([[\"earth\", \"wind\"], [\"fire\", \"wind\"], [\"and\"],\n [\"fire\", \"wind\"]]),\n \"vocab_size\":\n None,\n \"expected_accumulator_output\": {\n \"vocab\": np.array([\"wind\", \"fire\", \"earth\", \"and\"]),\n \"counts\": np.array([3, 2, 1, 1]),\n },\n \"expected_extract_output\": {\n \"vocab\": np.array([\"wind\", \"fire\", \"earth\", \"and\"]),\n \"idf_weights\": None,\n },\n },\n {\n \"testcase_name\": \"single_element_per_row\",\n \"data\": np.array([[\"earth\"], [\"wind\"], [\"fire\"], [\"wind\"], [\"and\"]]),\n \"vocab_size\": 3,\n \"expected_accumulator_output\": {\n \"vocab\": np.array([\"wind\", \"and\", \"earth\", \"fire\"]),\n \"counts\": np.array([2, 1, 1, 1]),\n },\n \"expected_extract_output\": {\n \"vocab\": np.array([\"wind\", \"fire\", \"earth\"]),\n \"idf_weights\": None,\n },\n },\n # Which tokens are retained are based on global frequency, and thus are\n # sensitive to frequency within a document. In contrast, because idf only\n # considers the presence of a token in a document, it is insensitive\n # to the frequency of the token within the document.\n {\n \"testcase_name\":\n \"retained_tokens_sensitive_to_within_document_frequency\",\n \"data\":\n np.array([[\"earth\", \"earth\"], [\"wind\", \"wind\"], [\"fire\", \"fire\"],\n [\"wind\", \"wind\"], [\"and\", \"michigan\"]]),\n \"vocab_size\":\n 3,\n \"expected_accumulator_output\": {\n \"vocab\": np.array([\"wind\", \"earth\", \"fire\", \"and\", \"michigan\"]),\n \"counts\": np.array([4, 2, 2, 1, 1]),\n },\n \"expected_extract_output\": {\n \"vocab\": np.array([\"wind\", \"fire\", \"earth\"]),\n \"idf_weights\": None,\n },\n })\n def test_combiner_computation(self, data, vocab_size,\n expected_accumulator_output,\n expected_extract_output):\n combiner = index_lookup._IndexLookupCombiner(vocab_size=vocab_size)\n expected_accumulator = combiner._create_accumulator()\n expected_accumulator = self.update_accumulator(expected_accumulator,\n expected_accumulator_output)\n self.validate_accumulator_computation(combiner, data, expected_accumulator)\n self.validate_accumulator_extract(combiner, data, expected_extract_output)\n\n\n@keras_parameterized.run_all_keras_modes\nclass IndexLookupIntCombinerTest(keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest\n ):\n\n def compare_text_accumulators(self, a, b, msg=None):\n if a is None or b is None:\n self.assertAllEqual(a, b, msg=msg)\n\n self.assertAllEqual(a.count_dict, b.count_dict, msg=msg)\n\n compare_accumulators = compare_text_accumulators\n\n def update_accumulator(self, accumulator, data):\n accumulator.count_dict.update(dict(zip(data[\"vocab\"], data[\"counts\"])))\n\n return accumulator\n\n def test_combiner_api_compatibility_int_mode(self):\n data = np.array([[42, 1138, 725, 1729], [42, 1138, 725, 203]])\n combiner = index_lookup._IndexLookupCombiner()\n expected_accumulator_output = {\n \"vocab\": np.array([1138, 725, 42, 1729, 203]),\n \"counts\": np.array([2, 2, 2, 1, 1]),\n }\n expected_extract_output = {\n \"vocab\": np.array([1138, 725, 42, 1729, 203]),\n \"idf_weights\": None,\n }\n expected_accumulator = combiner._create_accumulator()\n expected_accumulator = self.update_accumulator(expected_accumulator,\n expected_accumulator_output)\n self.validate_accumulator_serialize_and_deserialize(combiner, data,\n expected_accumulator)\n self.validate_accumulator_uniqueness(combiner, data)\n self.validate_accumulator_extract(combiner, data, expected_extract_output)\n\n # TODO(askerryryan): Add tests confirming equivalence to behavior of\n # existing tf.keras.preprocessing.text.Tokenizer.\n @parameterized.named_parameters(\n {\n \"testcase_name\": \"top_k_smaller_than_full_vocab\",\n \"data\": np.array([[42, 1138], [1729, 1138], [725], [1729, 1138]]),\n \"vocab_size\": 3,\n \"expected_accumulator_output\": {\n \"vocab\": np.array([1138, 1729, 725, 42]),\n \"counts\": np.array([3, 2, 1, 1]),\n },\n \"expected_extract_output\": {\n \"vocab\": np.array([1138, 1729, 725]),\n \"idf_weights\": None,\n },\n },\n {\n \"testcase_name\": \"top_k_larger_than_full_vocab\",\n \"data\": np.array([[42, 1138], [1729, 1138], [725], [1729, 1138]]),\n \"vocab_size\": 10,\n \"expected_accumulator_output\": {\n \"vocab\": np.array([1138, 1729, 725, 42]),\n \"counts\": np.array([3, 2, 1, 1]),\n },\n \"expected_extract_output\": {\n \"vocab\": np.array([1138, 1729, 725, 42]),\n \"idf_weights\": None,\n },\n },\n {\n \"testcase_name\": \"no_top_k\",\n \"data\": np.array([[42, 1138], [1729, 1138], [725], [1729, 1138]]),\n \"vocab_size\": None,\n \"expected_accumulator_output\": {\n \"vocab\": np.array([1138, 1729, 725, 42]),\n \"counts\": np.array([3, 2, 1, 1]),\n },\n \"expected_extract_output\": {\n \"vocab\": np.array([1138, 1729, 725, 42]),\n \"idf_weights\": None,\n },\n },\n {\n \"testcase_name\": \"single_element_per_row\",\n \"data\": np.array([[42], [1138], [1729], [1138], [725]]),\n \"vocab_size\": 3,\n \"expected_accumulator_output\": {\n \"vocab\": np.array([1138, 1729, 725, 42]),\n \"counts\": np.array([2, 1, 1, 1]),\n },\n \"expected_extract_output\": {\n \"vocab\": np.array([1138, 1729, 725]),\n \"idf_weights\": None,\n },\n },\n # Which tokens are retained are based on global frequency, and thus are\n # sensitive to frequency within a document. In contrast, because idf only\n # considers the presence of a token in a document, it is insensitive\n # to the frequency of the token within the document.\n {\n \"testcase_name\":\n \"retained_tokens_sensitive_to_within_document_frequency\",\n \"data\":\n np.array([[42, 42], [1138, 1138], [1729, 1729], [1138, 1138],\n [725, 203]]),\n \"vocab_size\":\n 3,\n \"expected_accumulator_output\": {\n \"vocab\": np.array([1138, 42, 1729, 725, 203]),\n \"counts\": np.array([4, 2, 2, 1, 1]),\n },\n \"expected_extract_output\": {\n \"vocab\": np.array([1138, 1729, 42]),\n \"idf_weights\": None,\n },\n })\n def test_combiner_computation(self, data, vocab_size,\n expected_accumulator_output,\n expected_extract_output):\n combiner = index_lookup._IndexLookupCombiner(vocab_size=vocab_size)\n expected_accumulator = combiner._create_accumulator()\n expected_accumulator = self.update_accumulator(expected_accumulator,\n expected_accumulator_output)\n self.validate_accumulator_computation(combiner, data, expected_accumulator)\n self.validate_accumulator_extract(combiner, data, expected_extract_output)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n"
] |
[
[
"tensorflow.compat.v2.SparseTensor",
"numpy.array",
"tensorflow.compat.v2.TensorShape",
"tensorflow.compat.v2.ragged.constant",
"tensorflow.compat.v2.data.Dataset.from_tensor_slices",
"tensorflow.compat.v2.__internal__.tf2.enabled",
"tensorflow.compat.v2.data.Dataset.from_tensors",
"tensorflow.compat.v2.lookup.TextFileInitializer",
"tensorflow.compat.v2.executing_eagerly",
"tensorflow.compat.v2.io.gfile.GFile",
"tensorflow.compat.v2.test.main"
]
] |
Qub3k/subjective-exp-consistency-check
|
[
"ad159e9ed161e7f04016cc053d90b8e20f6963ed"
] |
[
"qnormal.py"
] |
[
"# Authors: Krzysztof Rusek <[email protected]>\n# Jakub Nawała <[email protected]>\n\nimport numpy as np\nimport probability_grid_estimation as pge\n\n\ndef prob(psi, sigma, cdf=False):\n \"\"\"\n\n :param psi: QNormal parameter, vector\n :param sigma: QNormal parameter, vector\n :param cdf: If true return pdf\n :return: probabilities\n \"\"\"\n grid = pge.get_each_answer_probability_for_qnormal([psi], [sigma])\n probs = grid.to_numpy(dtype=np.float64)[0]\n if cdf:\n probs = np.cumsum(probs, axis=-1)\n return probs\n\n\ndef sample(psi, sigma, experiments, n):\n \"\"\"\n\n :param psi: GSD parameter\n :param sigma: GSD parameter\n :param experiments: Number of testers\n :param n: number of samples\n :return: random sample from the QNormal distribution\n \"\"\"\n\n probs = prob(psi, sigma)\n s = np.random.multinomial(experiments, probs, size=(n))\n return s\n"
] |
[
[
"numpy.random.multinomial",
"numpy.cumsum"
]
] |
mukaiu/PaddleNLP
|
[
"0315365dbafa6e3b1c7147121ba85e05884125a5",
"0315365dbafa6e3b1c7147121ba85e05884125a5",
"0315365dbafa6e3b1c7147121ba85e05884125a5",
"0315365dbafa6e3b1c7147121ba85e05884125a5"
] |
[
"paddlenlp/utils/tools.py",
"examples/semantic_indexing/ann_util.py",
"model_zoo/bert/run_pretrain.py",
"examples/language_model/bigbird/run_glue.py"
] |
[
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport paddle\nfrom .log import logger\n\n\ndef static_params_to_dygraph(model, static_tensor_dict):\n \"\"\"Simple tool for convert static paramters to dygraph paramters dict.\n\n **NOTE** The model must both support static graph and dygraph mode.\n\n Args:\n model (nn.Layer): the model of a neural network.\n static_tensor_dict (string): path of which locate the saved paramters in static mode.\n Usualy load by `paddle.static.load_program_state`.\n\n Returns:\n [tensor dict]: a state dict the same as the dygraph mode.\n \"\"\"\n state_dict = model.state_dict()\n # static_tensor_dict = paddle.static.load_program_state(static_params_path)\n\n ret_dict = dict()\n for n, p in state_dict.items():\n if p.name not in static_tensor_dict:\n logger.info(\"%s paramter is missing from you state dict.\" % n)\n continue\n ret_dict[n] = static_tensor_dict[p.name]\n\n return ret_dict\n\n\ndef dygraph_params_to_static(model, dygraph_tensor_dict, topo=None):\n \"\"\"Simple tool for convert dygraph paramters to static paramters dict.\n\n **NOTE** The model must both support static graph and dygraph mode.\n\n Args:\n model (nn.Layer): the model of a neural network.\n dygraph_tensor_dict (string): path of which locate the saved paramters in static mode.\n\n Returns:\n [tensor dict]: a state dict the same as the dygraph mode.\n \"\"\"\n state_dict = model.state_dict()\n\n ret_dict = dict()\n for name, parm in state_dict.items():\n if name not in dygraph_tensor_dict:\n logger.info(\"%s paramter is missing from you state dict.\" % name)\n continue\n\n tensor = dygraph_tensor_dict[name]\n if parm.is_distributed:\n assert topo is not None\n for dim, v in enumerate(tensor.shape):\n if parm.shape[dim] != v:\n break\n\n splited = np.split(tensor, topo.mp_info.size,\n axis=dim)[topo.mp_info.rank]\n ret_dict[parm.name] = splited\n else:\n ret_dict[parm.name] = tensor\n\n return ret_dict\n\n\nclass TimeCostAverage(object):\n \"\"\"\n Simple tool for calcluating time average cost in the process of training and inferencing.\n \"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n \"\"\"\n Reset the recoder state, and reset the `cnt` to zero.\n \"\"\"\n self.cnt = 0\n self.total_time = 0\n\n def record(self, usetime):\n \"\"\"\n Recoding the time cost in current step and accumulating the `cnt`.\n \"\"\"\n self.cnt += 1\n self.total_time += usetime\n\n def get_average(self):\n \"\"\"\n Returning the average time cost after the start of training.\n \"\"\"\n if self.cnt == 0:\n return 0\n return self.total_time / self.cnt\n\n\ndef get_env_device():\n \"\"\"\n Return the device name of running enviroment.\n \"\"\"\n if paddle.is_compiled_with_cuda():\n return 'gpu'\n elif paddle.is_compiled_with_npu():\n return 'npu'\n elif paddle.is_compiled_with_rocm():\n return 'rocm'\n elif paddle.is_compiled_with_xpu():\n return 'xpu'\n return 'cpu'\n\n\ndef compare_version(version, pair_version):\n \"\"\"\n Args:\n version (str): The first version string needed to be compared.\n The format of version string should be as follow : \"xxx.yyy.zzz\".\n pair_version (str): The second version string needed to be compared.\n The format of version string should be as follow : \"xxx.yyy.zzz\".\n Returns:\n int: The result of comparasion. 1 means version > pair_version; 0 means\n version = pair_version; -1 means version < pair_version.\n \n Examples:\n >>> compare_version(\"2.2.1\", \"2.2.0\")\n >>> 1\n >>> compare_version(\"2.2.0\", \"2.2.0\")\n >>> 0\n >>> compare_version(\"2.2.0-rc0\", \"2.2.0\")\n >>> -1\n >>> compare_version(\"2.3.0-rc0\", \"2.2.0\")\n >>> 1\n \"\"\"\n version = version.strip()\n pair_version = pair_version.strip()\n if version == pair_version:\n return 0\n version_list = version.split(\".\")\n pair_version_list = pair_version.split(\".\")\n for version_code, pair_version_code in zip(version_list, pair_version_list):\n if not version_code.isnumeric():\n return -1\n if not pair_version_code.isnumeric():\n return 1\n if int(version_code) > int(pair_version_code):\n return 1\n elif int(version_code) < int(pair_version_code):\n return -1\n return 0\n\n\ndef get_bool_ids_greater_than(probs, limit=0.5, return_prob=False):\n \"\"\"\n Get idx of the last dimension in probability arrays, which is greater than a limitation.\n\n Args:\n probs (List[List[float]]): The input probability arrays.\n limit (float): The limitation for probability.\n return_prob (bool): Whether to return the probability\n Returns:\n List[List[int]]: The index of the last dimension meet the conditions.\n \"\"\"\n probs = np.array(probs)\n dim_len = len(probs.shape)\n if dim_len > 1:\n result = []\n for p in probs:\n result.append(get_bool_ids_greater_than(p, limit, return_prob))\n return result\n else:\n result = []\n for i, p in enumerate(probs):\n if p > limit:\n if return_prob:\n result.append((i, p))\n else:\n result.append(i)\n return result\n\n\ndef get_span(start_ids, end_ids, with_prob=False):\n \"\"\"\n Get span set from position start and end list.\n\n Args:\n start_ids (List[int]/List[tuple]): The start index list.\n end_ids (List[int]/List[tuple]): The end index list.\n with_prob (bool): If True, each element for start_ids and end_ids is a tuple aslike: (index, probability).\n Returns:\n set: The span set without overlapping, every id can only be used once .\n \"\"\"\n if with_prob:\n start_ids = sorted(start_ids, key=lambda x: x[0])\n end_ids = sorted(end_ids, key=lambda x: x[0])\n else:\n start_ids = sorted(start_ids)\n end_ids = sorted(end_ids)\n\n start_pointer = 0\n end_pointer = 0\n len_start = len(start_ids)\n len_end = len(end_ids)\n couple_dict = {}\n while start_pointer < len_start and end_pointer < len_end:\n if with_prob:\n start_id = start_ids[start_pointer][0]\n end_id = end_ids[end_pointer][0]\n else:\n start_id = start_ids[start_pointer]\n end_id = end_ids[end_pointer]\n\n if start_id == end_id:\n couple_dict[end_ids[end_pointer]] = start_ids[start_pointer]\n start_pointer += 1\n end_pointer += 1\n continue\n if start_id < end_id:\n couple_dict[end_ids[end_pointer]] = start_ids[start_pointer]\n start_pointer += 1\n continue\n if start_id > end_id:\n end_pointer += 1\n continue\n result = [(couple_dict[end], end) for end in couple_dict]\n result = set(result)\n return result\n",
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# coding=UTF-8\n\nimport numpy as np\nimport hnswlib\nfrom paddlenlp.utils.log import logger\n\n\ndef build_index(args, data_loader, model):\n\n index = hnswlib.Index(space='ip', dim=args.output_emb_size)\n\n # Initializing index\n # max_elements - the maximum number of elements (capacity). Will throw an exception if exceeded\n # during insertion of an element.\n # The capacity can be increased by saving/loading the index, see below.\n #\n # ef_construction - controls index search speed/build speed tradeoff\n #\n # M - is tightly connected with internal dimensionality of the data. Strongly affects memory consumption (~M)\n # Higher M leads to higher accuracy/run_time at fixed ef/efConstruction\n index.init_index(max_elements=args.hnsw_max_elements,\n ef_construction=args.hnsw_ef,\n M=args.hnsw_m)\n\n # Controlling the recall by setting ef:\n # higher ef leads to better accuracy, but slower search\n index.set_ef(args.hnsw_ef)\n\n # Set number of threads used during batch search/construction\n # By default using all available cores\n index.set_num_threads(16)\n\n logger.info(\"start build index..........\")\n\n all_embeddings = []\n\n for text_embeddings in model.get_semantic_embedding(data_loader):\n all_embeddings.append(text_embeddings.numpy())\n\n all_embeddings = np.concatenate(all_embeddings, axis=0)\n index.add_items(all_embeddings)\n\n logger.info(\"Total index number:{}\".format(index.get_current_count()))\n\n return index\n",
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport collections\nimport itertools\nimport logging\nimport os\nimport random\nimport time\nimport h5py\nimport yaml\nimport distutils.util\nfrom functools import partial\nfrom concurrent.futures import ThreadPoolExecutor\n\nimport numpy as np\n\nimport paddle\nimport paddle.distributed as dist\nfrom paddle.io import DataLoader, Dataset\n\nfrom paddlenlp.data import Stack, Tuple, Pad\nfrom paddlenlp.utils import profiler\nfrom paddlenlp.utils.tools import TimeCostAverage\nfrom paddlenlp.transformers import BertForPretraining, BertModel, BertPretrainingCriterion\nfrom paddlenlp.transformers import ErnieForPretraining, ErnieModel, ErniePretrainingCriterion\nfrom paddlenlp.transformers import BertTokenizer, ErnieTokenizer\nfrom paddlenlp.transformers import LinearDecayWithWarmup\n\nFORMAT = '%(asctime)s-%(levelname)s: %(message)s'\nlogging.basicConfig(level=logging.INFO, format=FORMAT)\nlogger = logging.getLogger(__name__)\n\nMODEL_CLASSES = {\n \"bert\":\n (BertModel, BertForPretraining, BertPretrainingCriterion, BertTokenizer),\n \"ernie\":\n (ErnieModel, ErnieForPretraining, ErniePretrainingCriterion, ErnieTokenizer)\n}\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--model_type\",\n default=None,\n type=str,\n required=True,\n help=\"Model type selected in the list: \" +\n \", \".join(MODEL_CLASSES.keys()),\n )\n parser.add_argument(\n \"--model_name_or_path\",\n default=None,\n type=str,\n required=True,\n help=\"Path to pre-trained model or shortcut name selected in the list: \"\n + \", \".join(\n sum([\n list(classes[-1].pretrained_init_configuration.keys())\n for classes in MODEL_CLASSES.values()\n ], [])),\n )\n parser.add_argument(\n \"--input_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The input directory where the data will be read from.\",\n )\n parser.add_argument(\n \"--output_dir\",\n default=None,\n type=str,\n required=True,\n help=\n \"The output directory where the model predictions and checkpoints will be written.\",\n )\n\n parser.add_argument(\n \"--max_predictions_per_seq\",\n default=80,\n type=int,\n help=\"The maximum total of masked tokens in input sequence\")\n\n parser.add_argument(\n \"--batch_size\",\n default=8,\n type=int,\n help=\"Batch size per GPU/CPU for training.\",\n )\n parser.add_argument(\"--learning_rate\",\n default=5e-5,\n type=float,\n help=\"The initial learning rate for Adam.\")\n parser.add_argument(\"--weight_decay\",\n default=0.0,\n type=float,\n help=\"Weight decay if we apply some.\")\n parser.add_argument(\"--adam_epsilon\",\n default=1e-8,\n type=float,\n help=\"Epsilon for Adam optimizer.\")\n parser.add_argument(\"--max_grad_norm\",\n default=1.0,\n type=float,\n help=\"Max gradient norm.\")\n parser.add_argument(\n \"--num_train_epochs\",\n default=3,\n type=int,\n help=\"Total number of training epochs to perform.\",\n )\n parser.add_argument(\n \"--max_steps\",\n default=-1,\n type=int,\n help=\n \"If > 0: set total number of training steps to perform. Override num_train_epochs.\",\n )\n parser.add_argument(\"--warmup_steps\",\n default=0,\n type=int,\n help=\"Linear warmup over warmup_steps.\")\n\n parser.add_argument(\"--logging_steps\",\n type=int,\n default=500,\n help=\"Log every X updates steps.\")\n parser.add_argument(\"--save_steps\",\n type=int,\n default=500,\n help=\"Save checkpoint every X updates steps.\")\n parser.add_argument(\"--seed\",\n type=int,\n default=42,\n help=\"random seed for initialization\")\n parser.add_argument(\"--device\",\n type=str,\n default=\"gpu\",\n choices=[\"cpu\", \"gpu\", \"xpu\"],\n help=\"Device for selecting for the training.\")\n parser.add_argument(\"--use_amp\",\n type=distutils.util.strtobool,\n default=False,\n help=\"Enable mixed precision training.\")\n parser.add_argument(\"--scale_loss\",\n type=float,\n default=2**15,\n help=\"The value of scale_loss for fp16.\")\n parser.add_argument(\"--to_static\",\n type=distutils.util.strtobool,\n default=False,\n help=\"Enable training under @to_static.\")\n\n # For benchmark.\n parser.add_argument(\n '--profiler_options',\n type=str,\n default=None,\n help=\n 'The option of profiler, which should be in format \\\"key1=value1;key2=value2;key3=value3\\\".'\n )\n parser.add_argument(\n \"--fuse_transformer\",\n type=distutils.util.strtobool,\n default=False,\n help=\n \"Whether to use FusedTransformerEncoderLayer to replace a TransformerEncoderLayer or not.\"\n )\n args = parser.parse_args()\n return args\n\n\ndef set_seed(args):\n random.seed(args.seed + paddle.distributed.get_rank())\n np.random.seed(args.seed + paddle.distributed.get_rank())\n paddle.seed(args.seed + paddle.distributed.get_rank())\n\n\nclass WorkerInitObj(object):\n\n def __init__(self, seed):\n self.seed = seed\n\n def __call__(self, id):\n np.random.seed(seed=self.seed + id)\n random.seed(self.seed + id)\n\n\ndef create_pretraining_dataset(input_file, max_pred_length, shared_list, args,\n worker_init):\n train_data = PretrainingDataset(input_file=input_file,\n max_pred_length=max_pred_length)\n # files have been sharded, no need to dispatch again\n train_batch_sampler = paddle.io.BatchSampler(train_data,\n batch_size=args.batch_size,\n shuffle=True)\n\n # DataLoader cannot be pickled because of its place.\n # If it can be pickled, use global function instead of lambda and use\n # ProcessPoolExecutor instead of ThreadPoolExecutor to prefetch.\n def _collate_data(data, stack_fn=Stack()):\n num_fields = len(data[0])\n out = [None] * num_fields\n # input_ids, segment_ids, input_mask, masked_lm_positions,\n # masked_lm_labels, next_sentence_labels, mask_token_num\n for i in (0, 1, 2, 5):\n out[i] = stack_fn([x[i] for x in data])\n batch_size, seq_length = out[0].shape\n size = num_mask = sum(len(x[3]) for x in data)\n # Padding for divisibility by 8 for fp16 or int8 usage\n if size % 8 != 0:\n size += 8 - (size % 8)\n # masked_lm_positions\n # Organize as a 1D tensor for gather or use gather_nd\n out[3] = np.full(size, 0, dtype=np.int32)\n # masked_lm_labels\n out[4] = np.full([size, 1], -1, dtype=np.int64)\n mask_token_num = 0\n for i, x in enumerate(data):\n for j, pos in enumerate(x[3]):\n out[3][mask_token_num] = i * seq_length + pos\n out[4][mask_token_num] = x[4][j]\n mask_token_num += 1\n # mask_token_num\n out.append(np.asarray([mask_token_num], dtype=np.float32))\n return out\n\n train_data_loader = DataLoader(dataset=train_data,\n batch_sampler=train_batch_sampler,\n collate_fn=_collate_data,\n num_workers=0,\n worker_init_fn=worker_init,\n return_list=True)\n return train_data_loader, input_file\n\n\ndef create_input_specs():\n input_ids = paddle.static.InputSpec(name=\"input_ids\",\n shape=[-1, -1],\n dtype=\"int64\")\n segment_ids = paddle.static.InputSpec(name=\"segment_ids\",\n shape=[-1, -1],\n dtype=\"int64\")\n position_ids = None\n input_mask = paddle.static.InputSpec(name=\"input_mask\",\n shape=[-1, 1, 1, -1],\n dtype=\"float32\")\n masked_lm_positions = paddle.static.InputSpec(name=\"masked_lm_positions\",\n shape=[-1],\n dtype=\"int32\")\n return [\n input_ids, segment_ids, position_ids, input_mask, masked_lm_positions\n ]\n\n\nclass PretrainingDataset(Dataset):\n\n def __init__(self, input_file, max_pred_length):\n self.input_file = input_file\n self.max_pred_length = max_pred_length\n f = h5py.File(input_file, \"r\")\n keys = [\n 'input_ids', 'input_mask', 'segment_ids', 'masked_lm_positions',\n 'masked_lm_ids', 'next_sentence_labels'\n ]\n self.inputs = [np.asarray(f[key][:]) for key in keys]\n f.close()\n\n def __len__(self):\n 'Denotes the total number of samples'\n return len(self.inputs[0])\n\n def __getitem__(self, index):\n\n [\n input_ids, input_mask, segment_ids, masked_lm_positions,\n masked_lm_ids, next_sentence_labels\n ] = [\n input[index].astype(np.int64)\n if indice < 5 else np.asarray(input[index].astype(np.int64))\n for indice, input in enumerate(self.inputs)\n ]\n # TODO: whether to use reversed mask by changing 1s and 0s to be\n # consistent with nv bert\n input_mask = (1 - np.reshape(input_mask.astype(np.float32),\n [1, 1, input_mask.shape[0]])) * -1e9\n\n index = self.max_pred_length\n # store number of masked tokens in index\n # outputs of torch.nonzero diff with that of numpy.nonzero by zip\n padded_mask_indices = (masked_lm_positions == 0).nonzero()[0]\n if len(padded_mask_indices) != 0:\n index = padded_mask_indices[0].item()\n mask_token_num = index\n else:\n index = self.max_pred_length\n mask_token_num = self.max_pred_length\n # masked_lm_labels = np.full(input_ids.shape, -1, dtype=np.int64)\n # masked_lm_labels[masked_lm_positions[:index]] = masked_lm_ids[:index]\n masked_lm_labels = masked_lm_ids[:index]\n masked_lm_positions = masked_lm_positions[:index]\n # softmax_with_cross_entropy enforce last dim size equal 1\n masked_lm_labels = np.expand_dims(masked_lm_labels, axis=-1)\n next_sentence_labels = np.expand_dims(next_sentence_labels, axis=-1)\n\n return [\n input_ids, segment_ids, input_mask, masked_lm_positions,\n masked_lm_labels, next_sentence_labels\n ]\n\n\ndef do_train(args):\n paddle.set_device(args.device)\n if paddle.distributed.get_world_size() > 1:\n paddle.distributed.init_parallel_env()\n\n set_seed(args)\n worker_init = WorkerInitObj(args.seed + paddle.distributed.get_rank())\n\n args.model_type = args.model_type.lower()\n base_class, model_class, criterion_class, tokenizer_class = MODEL_CLASSES[\n args.model_type]\n\n tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)\n\n pretrained_models_list = list(\n model_class.pretrained_init_configuration.keys())\n if args.model_name_or_path in pretrained_models_list:\n config = model_class.pretrained_init_configuration[\n args.model_name_or_path]\n config['fuse'] = args.fuse_transformer\n model = model_class(base_class(**config))\n else:\n model = model_class.from_pretrained(args.model_name_or_path)\n criterion = criterion_class(\n getattr(model, model_class.base_model_prefix).config[\"vocab_size\"])\n # decorate @to_static for benchmark, skip it by default.\n if args.to_static:\n specs = create_input_specs()\n model = paddle.jit.to_static(model, input_spec=specs)\n logger.info(\n \"Successfully to apply @to_static with specs: {}\".format(specs))\n\n if paddle.distributed.get_world_size() > 1:\n model = paddle.DataParallel(model)\n\n # If use default last_epoch, lr of the first iteration is 0.\n # Use `last_epoch = 0` to be consistent with nv bert.\n num_training_steps = args.max_steps if args.max_steps > 0 else len(\n train_data_loader) * args.num_train_epochs\n\n lr_scheduler = LinearDecayWithWarmup(args.learning_rate,\n num_training_steps,\n args.warmup_steps,\n last_epoch=0)\n\n # Generate parameter names needed to perform weight decay.\n # All bias and LayerNorm parameters are excluded.\n decay_params = [\n p.name for n, p in model.named_parameters()\n if not any(nd in n for nd in [\"bias\", \"norm\"])\n ]\n optimizer = paddle.optimizer.AdamW(\n learning_rate=lr_scheduler,\n epsilon=args.adam_epsilon,\n parameters=model.parameters(),\n weight_decay=args.weight_decay,\n apply_decay_param_fun=lambda x: x in decay_params)\n if args.use_amp:\n scaler = paddle.amp.GradScaler(init_loss_scaling=args.scale_loss)\n\n pool = ThreadPoolExecutor(1)\n global_step = 0\n tic_train = time.time()\n for epoch in range(args.num_train_epochs):\n files = [\n os.path.join(args.input_dir, f) for f in os.listdir(args.input_dir)\n if os.path.isfile(os.path.join(args.input_dir, f)) and \"train\" in f\n ]\n files.sort()\n num_files = len(files)\n random.Random(args.seed + epoch).shuffle(files)\n f_start_id = 0\n\n shared_file_list = {}\n\n if paddle.distributed.get_world_size() > num_files:\n remainder = paddle.distributed.get_world_size() % num_files\n data_file = files[\n (f_start_id * paddle.distributed.get_world_size() +\n paddle.distributed.get_rank() + remainder * f_start_id) %\n num_files]\n else:\n data_file = files[(f_start_id * paddle.distributed.get_world_size()\n + paddle.distributed.get_rank()) % num_files]\n\n previous_file = data_file\n\n train_data_loader, _ = create_pretraining_dataset(\n data_file, args.max_predictions_per_seq, shared_file_list, args,\n worker_init)\n\n # TODO(guosheng): better way to process single file\n single_file = True if f_start_id + 1 == len(files) else False\n\n for f_id in range(f_start_id, len(files)):\n if not single_file and f_id == f_start_id:\n continue\n if paddle.distributed.get_world_size() > num_files:\n data_file = files[(f_id * paddle.distributed.get_world_size() +\n paddle.distributed.get_rank() +\n remainder * f_id) % num_files]\n else:\n data_file = files[(f_id * paddle.distributed.get_world_size() +\n paddle.distributed.get_rank()) % num_files]\n\n previous_file = data_file\n dataset_future = pool.submit(create_pretraining_dataset, data_file,\n args.max_predictions_per_seq,\n shared_file_list, args, worker_init)\n train_cost_avg = TimeCostAverage()\n reader_cost_avg = TimeCostAverage()\n total_samples = 0\n batch_start = time.time()\n for step, batch in enumerate(train_data_loader):\n train_reader_cost = time.time() - batch_start\n reader_cost_avg.record(train_reader_cost)\n global_step += 1\n (input_ids, segment_ids, input_mask, masked_lm_positions,\n masked_lm_labels, next_sentence_labels,\n masked_lm_scale) = batch\n with paddle.amp.auto_cast(args.use_amp,\n custom_white_list=[\n \"layer_norm\", \"softmax\", \"gelu\",\n \"fused_attention\",\n \"fused_feedforward\"\n ]):\n prediction_scores, seq_relationship_score = model(\n input_ids=input_ids,\n token_type_ids=segment_ids,\n attention_mask=input_mask,\n masked_positions=masked_lm_positions)\n loss = criterion(prediction_scores, seq_relationship_score,\n masked_lm_labels, next_sentence_labels,\n masked_lm_scale)\n if args.use_amp:\n scaler.scale(loss).backward()\n scaler.minimize(optimizer, loss)\n else:\n loss.backward()\n optimizer.step()\n lr_scheduler.step()\n optimizer.clear_grad()\n total_samples += args.batch_size\n train_run_cost = time.time() - batch_start\n train_cost_avg.record(train_run_cost)\n\n # Profile for model benchmark\n if args.profiler_options is not None:\n profiler.add_profiler_step(args.profiler_options)\n\n if global_step % args.logging_steps == 0:\n if paddle.distributed.get_rank() == 0:\n logger.info(\n \"global step: %d, epoch: %d, batch: %d, loss: %f, \"\n \"avg_reader_cost: %.5f sec, avg_batch_cost: %.5f sec, avg_samples: %.5f, ips: %.5f sequences/sec\"\n % (global_step, epoch, step, loss,\n reader_cost_avg.get_average(),\n train_cost_avg.get_average(), total_samples /\n args.logging_steps, total_samples /\n (args.logging_steps *\n train_cost_avg.get_average())))\n total_samples = 0\n train_cost_avg.reset()\n reader_cost_avg.reset()\n if global_step % args.save_steps == 0 or global_step >= args.max_steps:\n if paddle.distributed.get_rank() == 0:\n output_dir = os.path.join(args.output_dir,\n \"model_%d\" % global_step)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n # need better way to get inner model of DataParallel\n model_to_save = model._layers if isinstance(\n model, paddle.DataParallel) else model\n model_to_save.save_pretrained(output_dir)\n tokenizer.save_pretrained(output_dir)\n paddle.save(\n optimizer.state_dict(),\n os.path.join(output_dir, \"model_state.pdopt\"))\n if global_step >= args.max_steps: \n del train_data_loader\n return\n batch_start = time.time()\n\n del train_data_loader\n train_data_loader, data_file = dataset_future.result(timeout=None)\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n print(args)\n do_train(args)\n",
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport logging\nimport os\nimport sys\nimport random\nimport time\nimport math\nimport distutils.util\nfrom functools import partial\n\nimport numpy as np\nimport paddle\nfrom paddle.io import DataLoader\nfrom paddle.metric import Metric, Accuracy, Precision, Recall\n\nfrom paddlenlp.datasets import load_dataset\nfrom paddlenlp.data import Stack, Tuple, Pad, Dict\nfrom paddlenlp.data.sampler import SamplerHelper\nfrom paddlenlp.transformers import BigBirdModel, BigBirdForSequenceClassification, BigBirdTokenizer\nfrom paddlenlp.transformers import create_bigbird_rand_mask_idx_list\nfrom paddlenlp.transformers import LinearDecayWithWarmup\nfrom paddlenlp.metrics import AccuracyAndF1, Mcc, PearsonAndSpearman\nfrom paddlenlp.utils.log import logger\n\nimport args\n\nMETRIC_CLASSES = {\n \"cola\": Mcc,\n \"sst-2\": Accuracy,\n \"mrpc\": AccuracyAndF1,\n \"sts-b\": PearsonAndSpearman,\n \"qqp\": AccuracyAndF1,\n \"mnli\": Accuracy,\n \"qnli\": Accuracy,\n \"rte\": Accuracy,\n}\n\nMODEL_CLASSES = {\n \"bigbird\": (BigBirdForSequenceClassification, BigBirdTokenizer),\n}\n\n\ndef set_seed(args):\n # Use the same data seed(for data shuffle) for all procs to guarantee data\n # consistency after sharding.\n random.seed(args.seed)\n np.random.seed(args.seed)\n # Maybe different op seeds(for dropout) for different procs is better. By:\n # `paddle.seed(args.seed + paddle.distributed.get_rank())`\n paddle.seed(args.seed)\n\n\ndef convert_example(example,\n tokenizer,\n label_list,\n max_seq_length=512,\n is_test=False):\n \"\"\"convert a glue example into necessary features\"\"\"\n if not is_test:\n # `label_list == None` is for regression task\n label_dtype = \"int64\" if label_list else \"float32\"\n # Get the label\n label = example['labels']\n label = np.array([label], dtype=label_dtype)\n # Convert raw text to feature\n input_ids = [tokenizer.cls_id]\n token_type_ids = None\n\n if (int(is_test) + len(example)) == 2:\n input_ids.extend(\n tokenizer.convert_tokens_to_ids(\n tokenizer(example['sentence'])[:max_seq_length - 2]))\n input_ids.append(tokenizer.sep_id)\n input_len = len(input_ids)\n token_type_ids = input_len * [0]\n else:\n input_ids1 = tokenizer.convert_tokens_to_ids(\n tokenizer(example['sentence1']))\n input_ids2 = tokenizer.convert_tokens_to_ids(\n tokenizer(example['sentence2']))\n total_len = len(input_ids1) + len(\n input_ids2) + tokenizer.num_special_tokens_to_add(pair=True)\n if total_len > max_seq_length:\n input_ids1, input_ids2, _ = tokenizer.truncate_sequences(\n input_ids1, input_ids2, total_len - max_seq_length)\n input_ids.extend(input_ids1)\n input_ids.append(tokenizer.sep_id)\n input_len1 = len(input_ids)\n\n input_ids.extend(input_ids2)\n input_ids.append(tokenizer.sep_id)\n input_len2 = len(input_ids) - input_len1\n\n token_type_ids = input_len1 * [0] + input_len2 * [1]\n\n input_len = len(input_ids)\n if input_len < max_seq_length:\n input_ids.extend([tokenizer.pad_id] * (max_seq_length - input_len))\n token_type_ids.extend([tokenizer.pad_token_type_id] *\n (max_seq_length - input_len))\n\n if not is_test:\n return input_ids, token_type_ids, label\n else:\n return input_ids, token_type_ids\n\n\ndef collect_data(samples, dataset, config):\n stack_fn = Stack(dtype=\"int64\" if dataset.label_list else \"float32\")\n stack_fn1 = Stack()\n\n num_fields = len(samples[0])\n out = [None] * num_fields\n out[0] = stack_fn1([x[0] for x in samples]) # input_ids\n out[1] = stack_fn1([x[1] for x in samples]) # token_type_ids\n if num_fields >= 2:\n out[2] = stack_fn(x[2] for x in samples) # labels\n seq_len = len(out[0][0])\n # Construct the random attention mask for the random attention\n rand_mask_idx_list = create_bigbird_rand_mask_idx_list(\n config[\"num_layers\"], seq_len, seq_len, config[\"nhead\"],\n config[\"block_size\"], config[\"window_size\"],\n config[\"num_global_blocks\"], config[\"num_rand_blocks\"], config[\"seed\"])\n out.extend(rand_mask_idx_list)\n return out\n\n\[email protected]_grad()\ndef evaluate(model, loss_fct, metric, data_loader):\n model.eval()\n metric.reset()\n for batch in data_loader:\n input_ids, segment_ids, labels = batch[:3]\n rand_mask_idx_list = batch[3:]\n # run forward\n logits = model(input_ids,\n segment_ids,\n rand_mask_idx_list=rand_mask_idx_list)\n loss = loss_fct(logits, labels)\n correct = metric.compute(logits, labels)\n metric.update(correct)\n res = metric.accumulate()\n if isinstance(metric, AccuracyAndF1):\n logger.info(\n \"eval loss: %f, acc: %s, precision: %s, recall: %s, f1: %s, acc and f1: %s, \"\n % (\n loss.numpy(),\n res[0],\n res[1],\n res[2],\n res[3],\n res[4],\n ))\n elif isinstance(metric, Mcc):\n logger.info(\"eval loss: %f, mcc: %s, \" % (loss.numpy(), res[0]))\n elif isinstance(metric, PearsonAndSpearman):\n logger.info(\n \"eval loss: %f, pearson: %s, spearman: %s, pearson and spearman: %s, \"\n % (loss.numpy(), res[0], res[1], res[2]))\n else:\n logger.info(\"eval loss: %f, acc: %s, \" % (loss.numpy(), res))\n model.train()\n\n\ndef do_train(args):\n paddle.set_device(args.device)\n worker_num = paddle.distributed.get_world_size()\n if worker_num > 1:\n paddle.distributed.init_parallel_env()\n\n set_seed(args)\n\n args.task_name = args.task_name.lower()\n metric_class = METRIC_CLASSES[args.task_name]\n args.model_type = args.model_type.lower()\n model_class, tokenizer_class = MODEL_CLASSES[args.model_type]\n\n train_ds = load_dataset('glue', args.task_name, splits=\"train\")\n tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)\n\n num_classes = 1 if train_ds.label_list == None else len(train_ds.label_list)\n # In finetune task, bigbird performs better when setting dropout to zero.\n model = model_class.from_pretrained(args.model_name_or_path,\n num_classes=num_classes,\n attn_dropout=0.0,\n hidden_dropout_prob=0.0)\n if worker_num > 1:\n model = paddle.DataParallel(model)\n config = getattr(model, model_class.base_model_prefix).config\n\n trans_func = partial(convert_example,\n tokenizer=tokenizer,\n label_list=train_ds.label_list,\n max_seq_length=args.max_encoder_length)\n train_ds = train_ds.map(trans_func, lazy=True)\n train_batch_sampler = paddle.io.DistributedBatchSampler(\n train_ds, batch_size=args.batch_size, shuffle=True)\n batchify_fn = partial(collect_data, dataset=train_ds, config=config)\n\n train_data_loader = DataLoader(dataset=train_ds,\n batch_sampler=train_batch_sampler,\n collate_fn=batchify_fn,\n num_workers=0,\n return_list=True)\n\n if args.task_name == \"mnli\":\n dev_ds_matched, dev_ds_mismatched = load_dataset(\n 'glue', args.task_name, splits=[\"dev_matched\", \"dev_mismatched\"])\n\n dev_ds_matched = dev_ds_matched.map(trans_func, lazy=True)\n dev_ds_mismatched = dev_ds_mismatched.map(trans_func, lazy=True)\n dev_batch_sampler_matched = paddle.io.BatchSampler(\n dev_ds_matched, batch_size=args.batch_size, shuffle=False)\n dev_data_loader_matched = DataLoader(\n dataset=dev_ds_matched,\n batch_sampler=dev_batch_sampler_matched,\n collate_fn=batchify_fn,\n num_workers=0,\n return_list=True)\n dev_batch_sampler_mismatched = paddle.io.BatchSampler(\n dev_ds_mismatched, batch_size=args.batch_size, shuffle=False)\n dev_data_loader_mismatched = DataLoader(\n dataset=dev_ds_mismatched,\n batch_sampler=dev_batch_sampler_mismatched,\n collate_fn=batchify_fn,\n num_workers=0,\n return_list=True)\n else:\n dev_ds = load_dataset('glue', args.task_name, splits='dev')\n dev_ds = dev_ds.map(trans_func, lazy=True)\n dev_batch_sampler = paddle.io.BatchSampler(dev_ds,\n batch_size=args.batch_size,\n shuffle=False)\n dev_data_loader = DataLoader(dataset=dev_ds,\n batch_sampler=dev_batch_sampler,\n collate_fn=batchify_fn,\n num_workers=0,\n return_list=True)\n\n num_training_steps = args.max_steps if args.max_steps > 0 else (\n len(train_data_loader) * args.epochs)\n warmup = args.warmup_steps if args.warmup_steps > 0 else args.warmup_proportion\n\n lr_scheduler = LinearDecayWithWarmup(args.learning_rate, num_training_steps,\n warmup)\n\n # Generate parameter names needed to perform weight decay.\n # All bias and LayerNorm parameters are excluded.\n decay_params = [\n p.name for n, p in model.named_parameters()\n if not any(nd in n for nd in [\"bias\", \"norm\"])\n ]\n optimizer = paddle.optimizer.AdamW(\n learning_rate=lr_scheduler,\n beta1=0.9,\n beta2=0.999,\n epsilon=args.adam_epsilon,\n parameters=model.parameters(),\n weight_decay=args.weight_decay,\n apply_decay_param_fun=lambda x: x in decay_params)\n\n loss_fct = paddle.nn.loss.CrossEntropyLoss(\n ) if train_ds.label_list else paddle.nn.loss.MSELoss()\n\n metric = metric_class()\n global_step = 0\n tic_train = time.time()\n for epoch in range(args.epochs):\n for step, batch in enumerate(train_data_loader):\n global_step += 1\n input_ids, segment_ids, labels = batch[:3]\n rand_mask_idx_list = batch[3:]\n # run forward\n logits = model(input_ids,\n segment_ids,\n rand_mask_idx_list=rand_mask_idx_list)\n loss = loss_fct(logits, labels)\n # run backward and update params\n loss.backward()\n optimizer.step()\n lr_scheduler.step()\n optimizer.clear_grad()\n\n if global_step % args.logging_steps == 0:\n logger.info(\n \"global step %d/%d, epoch: %d, batch: %d, rank_id: %s, loss: %f, lr: %.10f, speed: %.4f step/s\"\n % (global_step, num_training_steps, epoch, step,\n paddle.distributed.get_rank(), loss, optimizer.get_lr(),\n args.logging_steps / (time.time() - tic_train)))\n tic_train = time.time()\n if global_step % args.save_steps == 0 or global_step == num_training_steps:\n tic_eval = time.time()\n if args.task_name == \"mnli\":\n evaluate(model, loss_fct, metric, dev_data_loader_matched)\n evaluate(model, loss_fct, metric,\n dev_data_loader_mismatched)\n logger.info(\"eval done total : %s s\" %\n (time.time() - tic_eval))\n else:\n evaluate(model, loss_fct, metric, dev_data_loader)\n logger.info(\"eval done total : %s s\" %\n (time.time() - tic_eval))\n if paddle.distributed.get_rank() == 0:\n output_dir = os.path.join(\n args.output_dir, \"%s_ft_model_%d.pdparams\" %\n (args.task_name, global_step))\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n # Need better way to get inner model of DataParallel\n model_to_save = model._layers if isinstance(\n model, paddle.DataParallel) else model\n model_to_save.save_pretrained(output_dir)\n tokenizer.save_pretrained(output_dir)\n\n\ndef print_arguments(args):\n \"\"\"print arguments\"\"\"\n print('----------- Configuration Arguments -----------')\n for arg, value in sorted(vars(args).items()):\n print('%s: %s' % (arg, value))\n print('------------------------------------------------')\n\n\nif __name__ == \"__main__\":\n args = args.parse_args()\n print_arguments(args)\n assert args.device in [\n \"cpu\", \"gpu\", \"xpu\"\n ], \"Invalid device! Available device should be cpu, gpu, or xpu.\"\n do_train(args)\n"
] |
[
[
"numpy.array",
"numpy.split"
],
[
"numpy.concatenate"
],
[
"numpy.random.seed",
"numpy.full",
"numpy.asarray",
"numpy.expand_dims"
],
[
"numpy.random.seed",
"numpy.array"
]
] |
yanb514/I24-trajectory-generation
|
[
"3b1e25f94f42f1e761a13ab57c48d362b1eb7bc0"
] |
[
"homography.py"
] |
[
"# Attention interviewers!!! - this code is indicative of how I like to write. Not better, not worse.\n# Judge me based off of this\n# Thanks, Derek Gloudemans 2021\n\nimport torch\nimport numpy as np\nimport cv2\nimport sys, os\nimport csv\n\ndef line_to_point(line,point):\n \"\"\"\n Given a line defined by two points, finds the distance from that line to the third point\n line - (x0,y0,x1,y1) as floats\n point - (x,y) as floats\n Returns\n -------\n distance - float >= 0\n \"\"\"\n \n numerator = np.abs((line[2]-line[0])*(line[1]-point[1]) - (line[3]-line[1])*(line[0]-point[0]))\n denominator = np.sqrt((line[2]-line[0])**2 +(line[3]-line[1])**2)\n \n return numerator / (denominator + 1e-08)\n\ndef find_vanishing_point(lines):\n \"\"\"\n Finds best (L2 norm) vanishing point given a list of lines\n\n Parameters\n ----------\n lines : [(x0,y0,x1,y1), ...]\n\n Returns\n -------\n vp - (x,y)\n \"\"\"\n \n # mx+b form\n #y0 = ax + c\n #y1 = bx + d\n \n line0 = lines[0]\n line1 = lines[1]\n a = (line0[3] - line0[1])/line0[2] - line0[0]\n b = (line1[3] - line1[1])/line1[2] - line1[0]\n c = line0[1] - a*line0[0]\n d = line1[1] - c*line1[0]\n \n # intersection\n px = (d-c)/(a-b)\n py = a*(d-c)/(a-b) + c\n best_dist = np.inf\n \n # using intersection as starting point, grid out a grid of 11 x 11 points with spacing g\n g = 1e+16\n n_pts = 31\n \n while g > 1:\n #print(\"Gridding at g = {}\".format(g))\n\n # create grid centered around px,py with spacing g\n \n x_pts = np.arange(px-g*(n_pts//2),px+g*(n_pts//2),g)\n y_pts = np.arange(py-g*(n_pts//2),py+g*(n_pts//2),g)\n \n for x in x_pts:\n for y in y_pts:\n # for each point in grid, compute average distance to vanishing point\n dist = 0\n for line in lines:\n dist += line_to_point(line,(x,y))**2\n \n # keep best point in grid\n if dist < best_dist:\n px = x \n py = y\n best_dist = dist\n #print(\"Best vp so far: ({},{}), with average distance {}\".format(px,py,np.sqrt(dist/len(lines))))\n \n # regrid\n g = g / 10.0\n \n return [px,py]\n\nclass Homography():\n \"\"\"\n Homographer provides utiliites for converting between image,space, and state coordinates\n One homographer object corresponds to a single space/state formulation but\n can have multiple camera/image correspondences\n \"\"\"\n\n def __init__(self,f1 = None,f2 = None):\n \"\"\"\n Initializes Homgrapher object. \n \n f1 - arbitrary function that converts a [d,m,3] matrix of points in space \n to a [d,m,s] matrix in state formulation\n f2 - arbitrary function that converts [d,m,s] matrix into [d,m,3] matrix in space\n \n where d is the number of objects\n m is the number of points per object\n s is the state size\n\n returns - nothing\n\n \"\"\"\n \n if f1 is not None:\n self.f1 = f1\n self.f2 = f2\n \n else:\n self.f1 = self.i24_space_to_state\n self.f2 = self.i24_state_to_space\n \n # each correspondence is: name: {H,H_inv,P,corr_pts,space_pts,vps} \n # where H and H inv are 3x34 planar homography matrices and P is a 3x4 projection matrix\n self.correspondence = {}\n \n self.class_heights = {\n \"sedan\":4,\n \"midsize\":5,\n \"van\":6,\n \"pickup\":5,\n \"semi\":12,\n \"truck (other)\":12,\n \"truck\": 12,\n \"motorcycle\":4,\n \"trailer\":3,\n \"other\":5\n }\n \n \n self.class_dims = {\n \"sedan\":[16,6,4],\n \"midsize\":[18,6.5,5],\n \"van\":[20,6,6.5],\n \"pickup\":[20,6,5],\n \"semi\":[55,9,12],\n \"truck (other)\":[25,9,12],\n \"truck\": [25,9,12],\n \"motorcycle\":[7,3,4],\n \"trailer\":[16,7,3],\n \"other\":[18,6.5,5]\n }\n \n self.class_dict = { \"sedan\":0,\n \"midsize\":1,\n \"van\":2,\n \"pickup\":3,\n \"semi\":4,\n \"truck (other)\":5,\n \"truck\": 5,\n \"motorcycle\":6,\n \"trailer\":7,\n 0:\"sedan\",\n 1:\"midsize\",\n 2:\"van\",\n 3:\"pickup\",\n 4:\"semi\",\n 5:\"truck (other)\",\n 6:\"motorcycle\",\n 7:\"trailer\"\n }\n \n self.default_correspondence = None\n \n def add_i24_camera(self,point_path,vp_path,camera_name):\n # load points\n corr_pts= []\n space_pts = []\n with open(point_path,\"r\") as f:\n lines = f.readlines()\n \n for line in lines[1:-4]:\n line = line.rstrip(\"\\n\").split(\",\")\n corr_pts.append ([float(line[0]),float(line[1])])\n space_pts.append([int(line[2]),int(line[3])])\n \n # load vps\n lines1 = []\n lines2 = []\n lines3 = []\n with open(vp_path,\"r\") as f:\n read = csv.reader(f)\n for item in read:\n if item[4] == '0':\n lines1.append(np.array(item).astype(float))\n elif item[4] == '1':\n lines2.append(np.array(item).astype(float))\n elif item[4] == '2':\n lines3.append(np.array(item).astype(float))\n \n # get all axis labels for a particular axis orientation\n vp1 = find_vanishing_point(lines1)\n vp2 = find_vanishing_point(lines2)\n vp3 = find_vanishing_point(lines3)\n vps = [vp1,vp2,vp3]\n \n self.add_correspondence(corr_pts,space_pts,vps,name = camera_name)\n \n \n def i24_space_to_state(self,points):\n \"\"\"\n points - [d,8,3] array of x,y,z points for fbr,fbl,bbr,bbl,ftr,ftl,fbr,fbl\n \n returns - [d,6] array of points in state formulation\n \"\"\"\n d = points.shape[0]\n new_pts = torch.zeros([d,6])\n \n # rear center bottom of vehicle is (x,y)\n \n # x is computed as average of two bottom rear points\n new_pts[:,0] = (points[:,2,0] + points[:,3,0]) / 2.0\n \n # y is computed as average 4 bottom point y values\n new_pts[:,1] = (points[:,0,1] + points[:,1,1] +points[:,2,1] + points[:,3,1]) / 4.0\n \n # l is computed as avg length between bottom front and bottom rear\n new_pts[:,2] = torch.abs ( ((points[:,0,0] + points[:,1,0]) - (points[:,2,0] + points[:,3,0]))/2.0 )\n \n # w is computed as avg length between botom left and bottom right\n new_pts[:,3] = torch.abs( ((points[:,0,1] + points[:,2,1]) - (points[:,1,1] + points[:,3,1]))/2.0)\n\n # h is computed as avg length between all top and all bottom points\n new_pts[:,4] = torch.mean(torch.abs( (points[:,0:4,2] - points[:,4:8,2])),dim = 1)\n \n # direction is +1 if vehicle is traveling along direction of increasing x, otherwise -1\n new_pts[:,5] = torch.sign( ((points[:,0,0] + points[:,1,0]) - (points[:,2,0] + points[:,3,0]))/2.0 ) \n \n return new_pts\n \n def i24_state_to_space(self,points):\n d = points.shape[0]\n new_pts = torch.zeros([d,8,3])\n \n # assign x values\n new_pts[:,[0,1,4,5],0] = (points[:,0] + points[:,5]*points[:,2]).unsqueeze(1).repeat(1,4)\n new_pts[:,[2,3,6,7],0] = (points[:,0]).unsqueeze(1).repeat(1,4)\n \n # assign y values\n new_pts[:,[0,2,4,6],1] = (points[:,1] - points[:,5]*points[:,3]/2.0).unsqueeze(1).repeat(1,4)\n new_pts[:,[1,3,5,7],1] = (points[:,1] + points[:,5]*points[:,3]/2.0).unsqueeze(1).repeat(1,4)\n \n # assign z values\n new_pts[:,4:8,2] = -(points[:,4]).unsqueeze(1).repeat(1,4) \n \n return new_pts\n \n \n def space_to_state(self,points):\n \"\"\"\n points - [d,m,3] matrix of points in 3-space\n \"\"\"\n return self.f1(points)\n \n def state_to_space(self,points):\n \"\"\"\n points - [d,m,s] matrix of points in state formulation\n \"\"\"\n return self.f2(points)\n \n\n def add_correspondence(self,corr_pts,space_pts,vps,name = None):\n \"\"\"\n corr_pts - \n space_pts - \n vps -\n name - str, preferably camera name e.g. p1c4\n \"\"\"\n \n if name is None:\n name = self.default_correspondence\n \n corr_pts = np.stack(corr_pts)\n space_pts = np.stack(space_pts)\n cor = {}\n cor[\"vps\"] = vps\n cor[\"corr_pts\"] = corr_pts\n cor[\"space_pts\"] = space_pts\n \n cor[\"H\"],_ = cv2.findHomography(corr_pts,space_pts)\n cor[\"H_inv\"],_ = cv2.findHomography(space_pts,corr_pts)\n \n \n # P is a [3,4] matrix \n # column 0 - vanishing point for space x-axis (axis 0) in image coordinates (im_x,im_y,im_scale_factor)\n # column 1 - vanishing point for space y-axis (axis 1) in image coordinates (im_x,im_y,im_scale_factor)\n # column 2 - vanishing point for space z-axis (axis 2) in image coordinates (im_x,im_y,im_scale_factor)\n # column 3 - space origin in image coordinates (im_x,im_y,scale_factor)\n # columns 0,1 and 3 are identical to the columns of H, \n # We simply insert the z-axis column (im_x,im_y,1) as the new column 2\n \n P = np.zeros([3,4])\n P[:,0] = cor[\"H_inv\"][:,0]\n P[:,1] = cor[\"H_inv\"][:,1]\n P[:,3] = cor[\"H_inv\"][:,2]\n P[:,2] = np.array([vps[2][0],vps[2][1],1]) * 0.01\n cor[\"P\"] = P\n \n self.correspondence[name] = cor\n \n if self.default_correspondence is None:\n self.default_correspondence = name\n \n \n \n def remove_correspondence(self,name): \n try:\n del self.correspondences[name]\n print(\"Deleted correspondence for {}\".format(name))\n except KeyError:\n print(\"Tried to delete correspondence {}, but this does not exist\".format(name))\n \n \n # TODO - finish implementation!\n def im_to_space(self,points, name = None,heights = None):\n \"\"\"\n Converts points by means of ____________\n \n points - [d,m,2] array of points in image\n \"\"\"\n if name is None:\n name = self.default_correspondence\n \n \n d = points.shape[0]\n \n # convert points into size [dm,3]\n points = points.reshape(-1,2).double()\n points = torch.cat((points,torch.ones([points.shape[0],1]).double()),1) # add 3rd row\n \n if heights is not None:\n H = torch.from_numpy(self.correspondence[name][\"H\"]).transpose(0,1)\n new_pts = torch.matmul(points,H)\n \n # divide each point 0th and 1st column by the 2nd column\n new_pts[:,0] = new_pts[:,0] / new_pts[:,2]\n new_pts[:,1] = new_pts[:,1] / new_pts[:,2]\n \n # drop scale factor column\n new_pts = new_pts[:,:2] \n \n # reshape to [d,m,2]\n new_pts = new_pts.reshape(d,-1,2)\n \n # add third column for height\n new_pts = torch.cat((new_pts,torch.zeros([d,new_pts.shape[1],1]).double()),2)\n \n new_pts[:,[4,5,6,7],2] = heights.unsqueeze(1).repeat(1,4).double()\n \n else:\n print(\"No heights were input\")\n return\n \n return new_pts\n \n \n def space_to_im(self,points,name = None):\n \"\"\"\n Projects 3D space points into image/correspondence using P:\n new_pts = P x points T ---> [dm,3] T = [3,4] x [4,dm]\n performed by flattening batch dimension d and object point dimension m together\n \n points - [d,m,3] array of points in 3-space\n \"\"\"\n if name is None:\n name = self.default_correspondence\n \n d = points.shape[0]\n \n # convert points into size [dm,4]\n points = points.reshape(-1,3)\n points = torch.cat((points.double(),torch.ones([points.shape[0],1]).double()),1) # add 4th row\n \n # [dm,3]\n points = torch.transpose(points,0,1).double()\n \n # project into [dm,3]\n P = torch.from_numpy(self.correspondence[name][\"P\"]).double()\n new_pts= torch.matmul(P,points).transpose(0,1)\n \n # divide each point 0th and 1st column by the 2nd column\n new_pts[:,0] = new_pts[:,0] / new_pts[:,2]\n new_pts[:,1] = new_pts[:,1] / new_pts[:,2]\n \n # drop scale factor column\n new_pts = new_pts[:,:2] \n \n # reshape to [d,m,2]\n new_pts = new_pts.reshape(d,-1,2)\n return new_pts\n \n \n def state_to_im(self,points,name = None):\n \"\"\"\n Calls state_to_space, then space_to_im\n \n points - [d,m,s] matrix of points in state formulation\n \"\"\"\n if name is None:\n name = self.default_correspondence\n \n return self.space_to_im(self.state_to_space(points),name = name)\n \n \n def im_to_state(self,points,name = None, heights = None):\n \"\"\"\n Calls im_to_space, then space_to_state\n \n points - [d,m,2] array of points in image\n \"\"\"\n if name is None:\n name = self.default_correspondence\n \n return self.space_to_state(self.im_to_space(points,heights = heights,name = name))\n \n def guess_heights(self,classes):\n \"\"\"\n classes - [d] vector of string class names\n \n returns - [d] vector of float object height guesses\n \"\"\"\n \n heights = torch.zeros(len(classes))\n \n for i in range(len(classes)):\n try:\n heights[i] = self.class_heights[classes[i]]\n except KeyError:\n heights[i] = self.class_heights[\"other\"]\n \n return heights\n \n def height_from_template(self,template_boxes,template_space_heights,boxes):\n \"\"\"\n Predicts space height of boxes in image space. Given a space height and \n the corresponding image box (and thus image height), the relationship \n between heights in different coordinate systems should be roughly estimable. \n This strategy is used to guess the heights of the second set of boxes in\n image space according to : \n template_im_heights:template_space_heights = new_im_heights:new_box heights\n \n template_boxes - [d,m,2,] array of points corresponding to d object boxes \n (typical usage would be to use boxes from previous frame\n or apriori box predictions for current frame))\n template_space_heights - [d] array of corresponding object heights in space\n boxes - [d,m,2] array of points in image\n \n returns\n \n height - [d] array of object heights in space\n \"\"\"\n \n # get rough heights of objects in image\n template_top = torch.mean(template_boxes[:,4:8,:],dim = 1)\n template_bottom = torch.mean(template_boxes[:,0:4,:],dim = 1)\n template_im_height = torch.sum(torch.sqrt(torch.pow((template_top - template_bottom),2)),dim = 1)\n template_ratio = template_im_height / template_space_heights\n \n box_top = torch.mean(boxes[:,4:8,:],dim = 1)\n box_bottom = torch.mean(boxes[:,0:4,:],dim = 1)\n box_height = torch.sum(torch.sqrt(torch.pow((box_top - box_bottom),2)),dim = 1)\n\n\n height = box_height / template_ratio\n return height\n \n \n def test_transformation(self,points,classes = None,name = None, im = None,heights = None, verbose = True):\n \"\"\"\n Transform image -> space -> state -> space -> image and \n outputs the average reprojection error in pixels for top and bottom box coords\n \n points - [d,8,2] array of pixel coordinates corresponding to object corners\n fbr,fbl,bbr,bbl,ftr,ftl,fbr,fbl\n name - str camera/correspondence name\n im- if a cv2-style image is given, will plot original and reprojected boxes \n heights - [d] array of object heights, otherwise heights will be guessed\n based on class\n \"\"\"\n if name is None:\n name = self.default_correspondence\n \n \n if heights is None:\n if classes is None:\n print(\"Must either specify heights or classes for boxes\")\n return\n else:\n guess_heights = self.guess_heights(classes)\n \n \n else:\n guess_heights = heights\n \n state_pts = self.im_to_state(points,heights = guess_heights,name = name)\n im_pts_repro = self.state_to_im(state_pts,name = name)\n \n # calc error\n error = torch.abs(points - im_pts_repro) \n bottom_error = torch.sqrt(torch.pow(error[:,:4,0],2) + torch.pow(error[:,:4,1],2)).mean()\n top_error = torch.sqrt(torch.pow(error[:,4:8,0],2) + torch.pow(error[:,4:8,1],2)).mean()\n \n if verbose:\n print(\"Average distance between reprojected points and original points:\")\n print(\"-----------------------------\")\n print(\"Top: {} pixels\".format(top_error))\n print(\"Bottom: {} pixels\".format(bottom_error))\n \n # if image, plot\n if im is not None:\n im = self.plot_boxes(im,points,color = (0,255,0))\n im = self.plot_boxes(im,im_pts_repro,color = (0,0,255))\n \n cv2.imshow(\"frame\",im)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n \n return top_error + bottom_error\n \n \n def scale_Z(self,boxes,heights,name = None, granularity = 1e-06, max_scale = 10):\n \"\"\"\n When a new correspondence is added, the 3rd column of P is off by a scale factor\n relative to the other columns. This function scales P optimally\n to minimize the reprojection errror of the given boxes with the given heights\n \n boxes - [d,8,2] array of image points corresponding to object bounding boxes\n d indexes objects\n heights - [d] array of object heights (in space coordinates e.g. feet)\n name - str - correspondence \n granularity - float - controls the minimum step size for grid search \n max_scale - float - roughly, a reasonable upper estimate for the space-unit change\n corresponding to one pixel in the Z direction\n \n returns - None (but alters P in self.correspondence)\n \"\"\"\n if name is None:\n name = self.default_correspondence\n \n P_orig = self.correspondence[name][\"P\"].copy()\n \n upper_bound = max_scale\n lower_bound = granularity\n \n # create a grid of 10 evenly spaced entries between upper and lower bound\n C_grid = np.linspace(lower_bound,upper_bound,num = 10)\n step_size = C_grid[1] - C_grid[0]\n iteration = 1\n \n while step_size > granularity:\n \n best_error = np.inf\n best_C = None\n # for each value of P, get average reprojection error\n for C in C_grid:\n \n # scale P\n P = P_orig.copy()\n P[:,2] *= C\n self.correspondence[name][\"P\"] = P\n \n # test error\n error = self.test_transformation(boxes,name = name, heights = heights,verbose = False)\n \n # if this is the best so far, store it\n if error < best_error:\n best_error = error\n best_C = C\n \n \n # define new upper, lower with width 2*step_size centered on best value\n #print(\"On loop {}: best C so far: {} avg error {}\".format(iteration,best_C,best_error))\n lower_bound = best_C - step_size\n upper_bound = best_C + step_size\n C_grid = np.linspace(lower_bound,upper_bound,num = 10)\n step_size = C_grid[1] - C_grid[0]\n\n #print(\"New C_grid: {}\".format(C_grid.round(4)))\n iteration += 1\n \n \n\n def plot_boxes(self,im,boxes,color = (255,255,255),labels = None,thickness = 1):\n \"\"\"\n As one might expect, plots 3D boxes on input image\n \n im - cv2 matrix-style image\n boxes - [d,8,2] array of image points where d indexes objects\n color - 3-tuple specifying box color to plot\n \"\"\"\n \n DRAW = [[0,1,1,0,1,0,0,0], #bfl\n [0,0,0,1,0,1,0,0], #bfr\n [0,0,0,1,0,0,1,1], #bbl\n [0,0,0,0,0,0,1,1], #bbr\n [0,0,0,0,0,1,1,0], #tfl\n [0,0,0,0,0,0,0,1], #tfr\n [0,0,0,0,0,0,0,1], #tbl\n [0,0,0,0,0,0,0,0]] #tbr\n \n DRAW_BASE = [[0,1,1,1], #bfl\n [0,0,1,1], #bfr\n [0,0,0,1], #bbl\n [0,0,0,0]] #bbr\n \n for idx, bbox_3d in enumerate(boxes):\n \n for a in range(len(bbox_3d)):\n ab = bbox_3d[a]\n for b in range(a,len(bbox_3d)):\n bb = bbox_3d[b]\n if DRAW[a][b] == 1:\n try:\n im = cv2.line(im,(int(ab[0]),int(ab[1])),(int(bb[0]),int(bb[1])),color,thickness)\n except:\n pass\n \n if labels is not None:\n label = labels[idx]\n left = bbox_3d[0,0]\n top = bbox_3d[0,1]\n im = cv2.putText(im,\"{}\".format(label),(int(left),int(top - 10)),cv2.FONT_HERSHEY_PLAIN,1,(0,0,0),3)\n im = cv2.putText(im,\"{}\".format(label),(int(left),int(top - 10)),cv2.FONT_HERSHEY_PLAIN,1,(255,255,255),1)\n \n return im\n \n\ndef load_i24_csv(file):\n \"\"\"\n Simple no-frills function to load data as currently formatted on the i24 project\n labels - first row of string headers for data columns\n data - dict of lists, one key per frame, one entry per frame object\n \"\"\"\n short_name = file.split(\"/\")[-1]\n HEADERS = True\n \n # parse first file\n rows = []\n with open(file,\"r\") as f:\n read = csv.reader(f)\n \n for row in read:\n rows.append(row)\n \n data = {}\n HEADERS = True\n for row_idx in range(len(rows)):\n row = rows[row_idx]\n \n # pass header lines through as-is\n if HEADERS:\n headers = row\n if len(row) > 0 and row[0] == \"Frame #\":\n HEADERS = False\n \n \n else:\n \n if len(row) == 0:\n continue\n \n frame_idx = int(row[0])\n if frame_idx not in data.keys():\n data[frame_idx] = [row]\n else:\n data[frame_idx].append(row)\n \n \n return headers,data\n\n# basic test code\nif __name__ == \"__main__\":\n \n camera_name = \"p2c3\"\n \n vp_path = \"/home/worklab/Documents/derek/i24-dataset-gen/DATA/vp/{}_axes.csv\".format(camera_name)\n point_path = \"/home/worklab/Documents/derek/i24-dataset-gen/DATA/tform/{}_im_lmcs_transform_points.csv\".format(camera_name)\n \n \n # get some data\n data_file = \"/home/worklab/Data/dataset_alpha/manual_correction/rectified_{}_0_track_outputs_3D.csv\".format(camera_name)\n labels,data = load_i24_csv(data_file)\n frame_data = data[0]\n # convert labels from first frame into tensor form\n boxes = []\n classes = []\n for item in frame_data:\n if len(item[11]) > 0:\n boxes.append(np.array(item[11:27]).astype(float))\n classes.append(item[3])\n boxes = torch.from_numpy(np.stack(boxes))\n boxes = torch.stack((boxes[:,::2],boxes[:,1::2]),dim = -1)\n \n # get first frame from sequence\n sequence = \"/home/worklab/Data/cv/video/ground_truth_video_06162021/segments/{}_0.mp4\".format(camera_name)\n cap = cv2.VideoCapture(sequence)\n _,frame = cap.read()\n \n \n # test homography\n hg = Homography()\n hg.add_i24_camera(point_path,vp_path,camera_name)\n \n # fit P and evaluate\n heights = hg.guess_heights(classes)\n hg.scale_Z(boxes,heights,name = camera_name)\n hg.test_transformation(boxes,classes,camera_name,frame)\n \n \n"
] |
[
[
"torch.zeros",
"numpy.array",
"torch.stack",
"numpy.zeros",
"torch.sign",
"torch.from_numpy",
"torch.abs",
"torch.ones",
"numpy.stack",
"numpy.arange",
"numpy.sqrt",
"numpy.abs",
"torch.transpose",
"numpy.linspace",
"torch.matmul",
"torch.mean",
"torch.pow"
]
] |
pengfeidip/SSD_pytorch
|
[
"f17dcfa76e359c288420df1690e9ce4365353f0a"
] |
[
"ssd.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom layers import *\nfrom data import voc, coco\nimport os\n\n\nclass SSD(nn.Module):\n \"\"\"Single Shot Multibox Architecture\n The network is composed of a base VGG network followed by the\n added multibox conv layers. Each multibox layer branches into\n 1) conv2d for class conf scores\n 2) conv2d for localization predictions\n 3) associated priorbox layer to produce default bounding\n boxes specific to the layer's feature map size.\n See: https://arxiv.org/pdf/1512.02325.pdf for more details.\n\n Args:\n phase: (string) Can be \"test\" or \"train\"\n size: (int) input image size\n base: (VGG) VGG16 layers for input, size of either 300 or 500\n extras: (list) extra layers that feed to multibox loc and conf layers\n head: (list) \"multibox head\" consists of loc and conf conv layers\n num_class: (int )number of classinclude the background\n top_k: (int) when phase is test,select the top k default boxes to parse, default=20\n \"\"\"\n\n def __init__(self, phase, size, base, extras, head, num_classes, top_k=400, keep_top_k=200):\n super(SSD, self).__init__()\n self.phase = phase\n self.num_classes = num_classes\n self.cfg = (coco, voc)[num_classes == 21] # 城会玩,这种方式需要学习一下 --- pengfei ---\n self.priorbox = PriorBox(self.cfg)\n self.priors = self.priorbox.forward()\n self.size = size\n\n # SSD network\n self.vgg = nn.ModuleList(base)\n # Layer learns to scale the l2 normalized features from conv4_3\n self.L2Norm = L2Norm(512, 20)\n self.extras = nn.ModuleList(extras)\n\n self.loc = nn.ModuleList(head[0]) # --- pengfei --- conv layers for getting location\n self.conf = nn.ModuleList(head[1])\n\n\n if phase == 'test':\n self.top_k = top_k\n self.keep_top_k = keep_top_k\n self.softmax = nn.Softmax(dim=-1)\n self.detect = Detect(num_classes, bkg_label=0, top_k=self.top_k,\n keep_top_k=self.keep_top_k, conf_thresh=.01, nms_thresh=.45)\n\n def forward(self, x):\n \"\"\"Applies network layers and ops on input image(s) x.\n\n Args:\n x: input image or batch of images. Shape: [batch,3,300,300].\n\n Return:\n Depending on phase:\n test:\n Variable(tensor) of output class label predictions,\n confidence score, and corresponding location predictions for\n each object detected. Shape: [batch,topk,7]\n\n train:\n list of concat outputs from:\n 1: confidence layers, Shape: [batch*num_priors,num_classes]\n 2: localization layers, Shape: [batch,num_priors*4]\n 3: priorbox layers, Shape: [2,num_priors*4]\n \"\"\"\n sources = list()\n loc = list()\n conf = list()\n\n # apply vgg up to conv4_3 relu\n # --- pengfei ---这些都是传统的vgg16部分(不带BN)\n for k in range(23):\n x = self.vgg[k](x)\n\n s = self.L2Norm(x)\n sources.append(s)\n\n # apply vgg up to fc7:\n # --- pengfei --- 因为 还有 pool5 + conv(512, 1024) + conv(1024, 1024)\n for k in range(23, len(self.vgg)):\n x = self.vgg[k](x)\n sources.append(x)\n\n # apply extra layers and cache source layer outputs\n # 释了我之前为啥\"添加层\"不带激活函数的疑惑;至此,获得了全部所需要的 feature_map\n for k, v in enumerate(self.extras):\n x = F.relu(v(x), inplace=True)\n if k % 2 == 1:\n sources.append(x)\n\n # apply multibox head to source layers\n for (x, l, c) in zip(sources, self.loc, self.conf):\n loc.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf.append(c(x).permute(0, 2, 3, 1).contiguous())\n\n loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)\n conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)\n if self.phase == \"test\":\n output = self.detect(\n loc.view(loc.size(0), -1, 4), # loc preds\n self.softmax(conf.view(conf.size(0), -1,\n self.num_classes)), # conf preds\n self.priors.type(type(x.data)) # default boxes\n )\n else:\n output = (\n loc.view(loc.size(0), -1, 4),\n conf.view(conf.size(0), -1, self.num_classes),\n self.priors\n )\n return output\n\n def load_weights(self, base_file):\n other, ext = os.path.splitext(base_file)\n if ext == '.pkl' or '.pth':\n print('Loading weights into state dict...')\n self.load_state_dict(torch.load(base_file,\n map_location=lambda storage, loc: storage))\n print('Finished!')\n else:\n print('Sorry only .pth and .pkl files supported.')\n\n\n# This function is derived from torchvision VGG make_layers()\n# https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py\ndef vgg(cfg, i, batch_norm=False):\n layers = []\n in_channels = i\n for v in cfg:\n if v == 'M':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n elif v == 'C':\n layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)]\n else:\n conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)\n if batch_norm:\n layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]\n else:\n layers += [conv2d, nn.ReLU(inplace=True)]\n in_channels = v\n pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)\n conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)\n conv7 = nn.Conv2d(1024, 1024, kernel_size=1)\n layers += [pool5, conv6,\n nn.ReLU(inplace=True), conv7, nn.ReLU(inplace=True)]\n return layers\n\n\n# [256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256],\n# 这个实现有点意思,哈哈,\n#todo 但是为什么只有卷积没有 activation function --- pengfei --- 因为在代码 87行才用了relu\ndef add_extras(cfg, i, batch_norm=False):\n # Extra layers added to VGG for feature scaling\n layers = []\n in_channels = i\n flag = False\n for k, v in enumerate(cfg):\n if in_channels != 'S':\n if v == 'S':\n layers += [nn.Conv2d(in_channels, cfg[k + 1],\n kernel_size=(1, 3)[flag], stride=2, padding=1)]\n else:\n layers += [nn.Conv2d(in_channels, v, kernel_size=(1, 3)[flag])]\n flag = not flag\n in_channels = v\n return layers\n\n\ndef multibox(vgg, extra_layers, cfg, num_classes):\n loc_layers = []\n conf_layers = []\n vgg_source = [21, -2] # vgg[21] means conv4_3 layers, vgg[-2] means 改造的fc7 before relu activation\n for k, v in enumerate(vgg_source):\n loc_layers += [nn.Conv2d(vgg[v].out_channels,\n cfg[k] * 4, kernel_size=3, padding=1)]\n conf_layers += [nn.Conv2d(vgg[v].out_channels,\n cfg[k] * num_classes, kernel_size=3, padding=1)]\n for k, v in enumerate(extra_layers[1::2], 2):\n loc_layers += [nn.Conv2d(v.out_channels, cfg[k]\n * 4, kernel_size=3, padding=1)]\n conf_layers += [nn.Conv2d(v.out_channels, cfg[k]\n * num_classes, kernel_size=3, padding=1)]\n return vgg, extra_layers, (loc_layers, conf_layers)\n\nbase = {\n '300': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M',\n 512, 512, 512],\n '512': [],\n}\nextras = {\n '300': [256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256],\n '512': [],\n}\nmbox = {\n '300': [4, 6, 6, 6, 4, 4], # number of boxes per feature map location\n '512': [],\n}\n\n\ndef build_ssd(phase, size=300, num_classes=21, top_k=400):\n \"\"\"\n :param phase: (str) \"train\" or \" test\"\n :param size: (int) im size of NN inputs, default{300}\n :param num_classes: (int) number of classes, include background default 21 for VOC\n :param top_k: (int) only when phase is \"test\", top_k is meaningful, it means how many\n boxes in intra-class NMS will be considered\n :return: (SSD) a SSD object\n \"\"\"\n if phase != \"test\" and phase != \"train\":\n print(\"ERROR: Phase: \" + phase + \" not recognized\")\n return\n if size != 300:\n print(\"ERROR: You specified size \" + repr(size) + \". However, \" +\n \"currently only SSD300 (size=300) is supported!\")\n return\n base_, extras_, head_ = multibox(vgg(base[str(size)], 3),\n add_extras(extras[str(size)], 1024),\n mbox[str(size)], num_classes)\n return SSD(phase, size, base_, extras_, head_, num_classes, top_k)\n"
] |
[
[
"torch.nn.Softmax",
"torch.nn.ModuleList",
"torch.nn.MaxPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.load"
]
] |
sum-coderepo/HadoopApp
|
[
"0e8d48c5d541b5935c9054fb1335d829d67d7b59",
"0e8d48c5d541b5935c9054fb1335d829d67d7b59",
"0e8d48c5d541b5935c9054fb1335d829d67d7b59"
] |
[
"NeuralNetwork/ClassificationNeuralNetwok.py",
"Classification/PSET14-03.py",
"Test/Test1.py"
] |
[
"import numpy as np\nfrom sklearn import datasets, linear_model\nimport matplotlib.pyplot as plt\n\n\nclass Config:\n nn_input_dim = 2 # input layer dimensionality\n nn_output_dim = 2 # output layer dimensionality\n # Gradient descent parameters (I picked these by hand)\n epsilon = 0.01 # learning rate for gradient descent\n reg_lambda = 0.01 # regularization strength\n\n\ndef generate_data():\n np.random.seed(0)\n X, y = datasets.make_moons(200, noise=0.20)\n return X, y\n\n\ndef visualize(X, y, model):\n # plt.scatter(X[:, 0], X[:, 1], s=40, c=y, cmap=plt.cm.Spectral)\n # plt.show()\n plot_decision_boundary(lambda x:predict(model,x), X, y)\n plt.title(\"Logistic Regression\")\n plt.show()\n\n\ndef plot_decision_boundary(pred_func, X, y):\n # Set min and max values and give it some padding\n x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5\n y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5\n h = 0.01\n # Generate a grid of points with distance h between them\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n # Predict the function value for the whole gid\n Z = pred_func(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n # Plot the contour and training examples\n plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)\n plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral)\n plt.show()\n\n\n# Helper function to evaluate the total loss on the dataset\ndef calculate_loss(model, X, y):\n num_examples = len(X) # training set size\n W1, b1, W2, b2 = model['W1'], model['b1'], model['W2'], model['b2']\n # Forward propagation to calculate our predictions\n z1 = X.dot(W1) + b1\n a1 = np.tanh(z1)\n z2 = a1.dot(W2) + b2\n exp_scores = np.exp(z2)\n probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)\n # Calculating the loss\n corect_logprobs = -np.log(probs[range(num_examples), y])\n data_loss = np.sum(corect_logprobs)\n # Add regulatization term to loss (optional)\n data_loss += Config.reg_lambda / 2 * (np.sum(np.square(W1)) + np.sum(np.square(W2)))\n return 1. / num_examples * data_loss\n\n\ndef predict(model, x):\n W1, b1, W2, b2 = model['W1'], model['b1'], model['W2'], model['b2']\n # Forward propagation\n z1 = x.dot(W1) + b1\n a1 = np.tanh(z1)\n z2 = a1.dot(W2) + b2\n exp_scores = np.exp(z2)\n probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)\n return np.argmax(probs, axis=1)\n\n\n# This function learns parameters for the neural network and returns the model.\n# - nn_hdim: Number of nodes in the hidden layer\n# - num_passes: Number of passes through the training data for gradient descent\n# - print_loss: If True, print the loss every 1000 iterations\ndef build_model(X, y, nn_hdim, num_passes=20000, print_loss=False):\n # Initialize the parameters to random values. We need to learn these.\n num_examples = len(X)\n np.random.seed(0)\n W1 = np.random.randn(Config.nn_input_dim, nn_hdim) / np.sqrt(Config.nn_input_dim)\n b1 = np.zeros((1, nn_hdim))\n W2 = np.random.randn(nn_hdim, Config.nn_output_dim) / np.sqrt(nn_hdim)\n b2 = np.zeros((1, Config.nn_output_dim))\n\n # This is what we return at the end\n model = {}\n\n # Gradient descent. For each batch...\n for i in range(0, num_passes):\n\n # Forward propagation\n z1 = X.dot(W1) + b1\n a1 = np.tanh(z1)\n z2 = a1.dot(W2) + b2\n exp_scores = np.exp(z2)\n probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)\n\n # Backpropagation\n delta3 = probs\n delta3[range(num_examples), y] -= 1\n dW2 = (a1.T).dot(delta3)\n db2 = np.sum(delta3, axis=0, keepdims=True)\n delta2 = delta3.dot(W2.T) * (1 - np.power(a1, 2))\n dW1 = np.dot(X.T, delta2)\n db1 = np.sum(delta2, axis=0)\n\n # Add regularization terms (b1 and b2 don't have regularization terms)\n dW2 += Config.reg_lambda * W2\n dW1 += Config.reg_lambda * W1\n\n # Gradient descent parameter update\n W1 += -Config.epsilon * dW1\n b1 += -Config.epsilon * db1\n W2 += -Config.epsilon * dW2\n b2 += -Config.epsilon * db2\n\n # Assign new parameters to the model\n model = {'W1': W1, 'b1': b1, 'W2': W2, 'b2': b2}\n\n # Optionally print the loss.\n # This is expensive because it uses the whole dataset, so we don't want to do it too often.\n if print_loss and i % 1000 == 0:\n print(\"Loss after iteration %i: %f\" % (i, calculate_loss(model, X, y)))\n\n return model\n\n\ndef classify(X, y):\n # clf = linear_model.LogisticRegressionCV()\n # clf.fit(X, y)\n # return clf\n\n pass\n\n\ndef main():\n X, y = generate_data()\n model = build_model(X, y, 3, print_loss=True)\n visualize(X, y, model)\n\n\nif __name__ == \"__main__\":\n main()",
"from mnist import MNIST\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport time\nimport datetime as dt\nimport sys\n#Import datasets, classifiers and performance metrics\nfrom sklearn import datasets, svm, metrics\nfrom scipy.io import loadmat\nfrom sklearn.datasets import fetch_mldata\n\n# import custom module\nfrom Classification.mnist_helpers import *\n\n\n# it creates mldata folder in your root project folder\n#mnist = fetch_mldata('MNIST original', data_home='C:\\\\Users\\\\suagrawa\\\\scikit_learn_data')\nfrom sklearn.datasets import load_digits\nmnist = load_digits()\n\n\n#data field is 70k x 784 array, each row represents pixels from 28x28=784 image\nimages = mnist.data\ntargets = mnist.target\n\n\n# Let's have a look at the random 16 images,\n# We have to reshape each data row, from flat array of 784 int to 28x28 2D array\n\n#pick random indexes from 0 to size of our dataset\n#show_some_digits(images,targets)\nX_data = images/255.0\nY = targets\n\n#split data to train and test\n#from sklearn.cross_validation import train_test_split\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X_data, Y, test_size=0.15, random_state=42)\n\n\n################ Classifier with good params ###########\n# Create a classifier: a support vector classifier\n\nparam_C = 10\nparam_gamma = 0.05\nclassifier = svm.SVC(C=param_C,gamma=param_gamma)\n\n#We learn the digits on train part\nstart_time = dt.datetime.now()\nprint('Start learning at {}'.format(str(start_time)))\nclassifier.fit(X_train, y_train)\nend_time = dt.datetime.now()\nprint('Stop learning {}'.format(str(end_time)))\nelapsed_time= end_time - start_time\nprint('Elapsed learning {}'.format(str(elapsed_time)))\n\n\n########################################################\n# Now predict the value of the test\nexpected = y_test\npredicted = classifier.predict(X_test)\n\nshow_some_digits(X_test,predicted,title_text=\"Predicted {}\")\n\nprint(\"Classification report for classifier %s:\\n%s\\n\"\n % (classifier, metrics.classification_report(expected, predicted)))\n\ncm = metrics.confusion_matrix(expected, predicted)\nprint(\"Confusion matrix:\\n%s\" % cm)\n\nplot_confusion_matrix(cm)\n\nprint(\"Accuracy={}\".format(metrics.accuracy_score(expected, predicted)))\n",
"import numpy as np\nfrom numpy import linalg as LA\nimport matplotlib.pyplot as plt\n# stock prices (3x per day)\n# [morning, midday, evening]\nAPPLE = np.array(\n [[1,5],[3,-2],[-1,-4],[-2,1]])\n\n# midday variance\nprint(APPLE.mean(axis=0))\ncov = np.cov(APPLE,rowvar=0)\nprint(cov)\n\nw, v = LA.eig(cov)\nprint(w)\nprint(v)\norigin = [0, 0]\n\neig_vec1 = v[:,0]\neig_vec2 = v[:,1]\n\nprint(eig_vec1)\nprint(eig_vec2)\n\n\n# This line below plots the 2d points\n#plt.scatter(np_array[:,0], np_array[:,1])\n\nplt.quiver(*origin, *eig_vec1, color=['r'], scale=21)\nplt.quiver(*origin, *eig_vec2, color=['b'], scale=21)\nplt.show()\n"
] |
[
[
"numpy.square",
"numpy.dot",
"matplotlib.pyplot.contourf",
"numpy.zeros",
"numpy.random.seed",
"numpy.sum",
"matplotlib.pyplot.title",
"numpy.random.randn",
"numpy.exp",
"numpy.tanh",
"numpy.argmax",
"matplotlib.pyplot.scatter",
"numpy.arange",
"numpy.sqrt",
"numpy.power",
"matplotlib.pyplot.show",
"sklearn.datasets.make_moons"
],
[
"sklearn.metrics.confusion_matrix",
"sklearn.datasets.load_digits",
"sklearn.metrics.accuracy_score",
"sklearn.svm.SVC",
"sklearn.metrics.classification_report",
"sklearn.model_selection.train_test_split"
],
[
"numpy.array",
"numpy.cov",
"numpy.linalg.eig",
"matplotlib.pyplot.show",
"matplotlib.pyplot.quiver"
]
] |
sunshineInmoon/ssd.pytorch
|
[
"a1cb37ea3e5fe64cdcf1c3d0004006baf1d046a1"
] |
[
"train.py"
] |
[
"from data import *\nfrom utils.augmentations import SSDAugmentation\nfrom layers.modules import MultiBoxLoss\nfrom ssd import build_ssd\nimport os\nimport sys\nimport time\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.backends.cudnn as cudnn\nimport torch.nn.init as init\nimport torch.utils.data as data\nimport numpy as np\nimport argparse\n\n\ndef str2bool(v):\n return v.lower() in (\"yes\", \"true\", \"t\", \"1\")\n\n\nparser = argparse.ArgumentParser(\n description='Single Shot MultiBox Detector Training With Pytorch')\ntrain_set = parser.add_mutually_exclusive_group()\nparser.add_argument('--dataset', default='VOC', choices=['VOC', 'COCO'],\n type=str, help='VOC or COCO')\nparser.add_argument('--dataset_root', default=VOC_ROOT,\n help='Dataset root directory path')\nparser.add_argument('--basenet', default='vgg16_reducedfc.pth',\n help='Pretrained base model')\nparser.add_argument('--batch_size', default=32, type=int,\n help='Batch size for training')\nparser.add_argument('--resume', default=None, type=str,\n help='Checkpoint state_dict file to resume training from')\nparser.add_argument('--start_iter', default=0, type=int,\n help='Resume training at this iter')\nparser.add_argument('--num_workers', default=4, type=int,\n help='Number of workers used in dataloading')\nparser.add_argument('--cuda', default=True, type=str2bool,\n help='Use CUDA to train model')\nparser.add_argument('--lr', '--learning-rate', default=1e-3, type=float,\n help='initial learning rate')\nparser.add_argument('--momentum', default=0.9, type=float,\n help='Momentum value for optim')\nparser.add_argument('--weight_decay', default=5e-4, type=float,\n help='Weight decay for SGD')\nparser.add_argument('--gamma', default=0.1, type=float,\n help='Gamma update for SGD')\nparser.add_argument('--visdom', default=False, type=str2bool,\n help='Use visdom for loss visualization')\nparser.add_argument('--save_folder', default='weights/',\n help='Directory for saving checkpoint models')\nargs = parser.parse_args()\n\n\nif torch.cuda.is_available():\n if args.cuda:\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\n if not args.cuda:\n print(\"WARNING: It looks like you have a CUDA device, but aren't \" +\n \"using CUDA.\\nRun with --cuda for optimal training speed.\")\n torch.set_default_tensor_type('torch.FloatTensor')\nelse:\n torch.set_default_tensor_type('torch.FloatTensor')\n\nif not os.path.exists(args.save_folder):\n os.mkdir(args.save_folder)\n\n\ndef train():\n if args.dataset == 'COCO':\n if args.dataset_root == VOC_ROOT:\n if not os.path.exists(COCO_ROOT):\n parser.error('Must specify dataset_root if specifying dataset')\n print(\"WARNING: Using default COCO dataset_root because \" +\n \"--dataset_root was not specified.\")\n args.dataset_root = COCO_ROOT\n cfg = coco\n dataset = COCODetection(root=args.dataset_root,\n transform=SSDAugmentation(cfg['min_dim'],\n MEANS))\n elif args.dataset == 'VOC':\n if args.dataset_root == COCO_ROOT:\n parser.error('Must specify dataset if specifying dataset_root')\n cfg = voc\n dataset = VOCDetection(root=args.dataset_root,\n transform=SSDAugmentation(cfg['min_dim'],\n MEANS)) #数据的读取和预处理\n\n if args.visdom:\n import visdom\n viz = visdom.Visdom()\n\n ssd_net = build_ssd('train', cfg['min_dim'], cfg['num_classes'])\n net = ssd_net\n\n if args.cuda:\n net = torch.nn.DataParallel(ssd_net)\n cudnn.benchmark = True\n\n if args.resume:\n print('Resuming training, loading {}...'.format(args.resume))\n ssd_net.load_weights(args.resume)\n else:\n vgg_weights = torch.load(args.save_folder + args.basenet)\n print('Loading base network...')\n ssd_net.vgg.load_state_dict(vgg_weights)\n\n if args.cuda:\n net = net.cuda()\n\n if not args.resume:\n print('Initializing weights...')\n # initialize newly added layers' weights with xavier method\n ssd_net.extras.apply(weights_init) #对网络中每一层应用weights_init函数,这里是对卷积的weight初始化\n ssd_net.loc.apply(weights_init)\n ssd_net.conf.apply(weights_init)\n\n optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=args.momentum,\n weight_decay=args.weight_decay)\n criterion = MultiBoxLoss(cfg['num_classes'], 0.5, True, 0, True, 3, 0.5,\n False, args.cuda)\n\n net.train()\n # loss counters\n loc_loss = 0\n conf_loss = 0\n epoch = 0\n print('Loading the dataset...')\n\n epoch_size = len(dataset) // args.batch_size\n print('Training SSD on:', dataset.name)\n print('Using the specified args:')\n print(args)\n\n step_index = 0\n\n if args.visdom:\n vis_title = 'SSD.PyTorch on ' + dataset.name\n vis_legend = ['Loc Loss', 'Conf Loss', 'Total Loss']\n iter_plot = create_vis_plot('Iteration', 'Loss', vis_title, vis_legend)\n epoch_plot = create_vis_plot('Epoch', 'Loss', vis_title, vis_legend)\n\n data_loader = data.DataLoader(dataset, args.batch_size,\n num_workers=args.num_workers,\n shuffle=True, collate_fn=detection_collate,\n pin_memory=True)\n # create batch iterator\n batch_iterator = iter(data_loader)\n for iteration in range(args.start_iter, cfg['max_iter']):\n if args.visdom and iteration != 0 and (iteration % epoch_size == 0):\n update_vis_plot(epoch, loc_loss, conf_loss, epoch_plot, None,\n 'append', epoch_size)\n # reset epoch loss counters\n loc_loss = 0\n conf_loss = 0\n epoch += 1\n\n if iteration in cfg['lr_steps']:\n step_index += 1\n adjust_learning_rate(optimizer, args.gamma, step_index)\n\n # load train data\n images, targets = next(batch_iterator)\n\n if args.cuda:\n images = Variable(images.cuda())\n targets = [Variable(ann.cuda(), volatile=True) for ann in targets]\n else:\n images = Variable(images)\n targets = [Variable(ann, volatile=True) for ann in targets]\n # forward\n t0 = time.time()\n out = net(images)\n # backprop\n optimizer.zero_grad()\n loss_l, loss_c = criterion(out, targets)\n loss = loss_l + loss_c\n loss.backward()\n optimizer.step()\n t1 = time.time()\n loc_loss += loss_l.data[0]\n conf_loss += loss_c.data[0]\n\n if iteration % 10 == 0:\n print('timer: %.4f sec.' % (t1 - t0))\n print('iter ' + repr(iteration) + ' || Loss: %.4f ||' % (loss.data[0]), end=' ')\n\n if args.visdom:\n update_vis_plot(iteration, loss_l.data[0], loss_c.data[0],\n iter_plot, epoch_plot, 'append')\n\n if iteration != 0 and iteration % 5000 == 0:\n print('Saving state, iter:', iteration)\n torch.save(ssd_net.state_dict(), 'weights/ssd300_COCO_' +\n repr(iteration) + '.pth')\n torch.save(ssd_net.state_dict(),\n args.save_folder + '' + args.dataset + '.pth')\n\n\ndef adjust_learning_rate(optimizer, gamma, step):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 at every\n specified step\n # Adapted from PyTorch Imagenet example:\n # https://github.com/pytorch/examples/blob/master/imagenet/main.py\n \"\"\"\n lr = args.lr * (gamma ** (step))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\ndef xavier(param):\n init.xavier_uniform(param)\n\n\ndef weights_init(m): #初始化卷积参数\n if isinstance(m, nn.Conv2d):\n xavier(m.weight.data)\n m.bias.data.zero_()\n\n\ndef create_vis_plot(_xlabel, _ylabel, _title, _legend):\n return viz.line(\n X=torch.zeros((1,)).cpu(),\n Y=torch.zeros((1, 3)).cpu(),\n opts=dict(\n xlabel=_xlabel,\n ylabel=_ylabel,\n title=_title,\n legend=_legend\n )\n )\n\n\ndef update_vis_plot(iteration, loc, conf, window1, window2, update_type,\n epoch_size=1):\n viz.line(\n X=torch.ones((1, 3)).cpu() * iteration,\n Y=torch.Tensor([loc, conf, loc + conf]).unsqueeze(0).cpu() / epoch_size,\n win=window1,\n update=update_type\n )\n # initialize epoch plot on first iteration\n if iteration == 0:\n viz.line(\n X=torch.zeros((1, 3)).cpu(),\n Y=torch.Tensor([loc, conf, loc + conf]).unsqueeze(0).cpu(),\n win=window2,\n update=True\n )\n\n\nif __name__ == '__main__':\n train()\n"
] |
[
[
"torch.zeros",
"torch.nn.init.xavier_uniform",
"torch.autograd.Variable",
"torch.set_default_tensor_type",
"torch.ones",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.load",
"torch.Tensor",
"torch.nn.DataParallel"
]
] |
clairebub/interpretability
|
[
"8c71bbc976ce9382705a2395ad651da009ab4785"
] |
[
"metrics/segmentation.py"
] |
[
"# modified from https://github.com/learningtitans/isbi2017-part1/blob/master/metrics.py\n\n\nimport numpy as np\nfrom sklearn.metrics import jaccard_similarity_score\n\nsmooth_default = 1.\n\n\ndef dice_coef(y_true, y_pred, smooth=smooth_default):\n y_true_f = y_true.flatten()\n y_pred_f = y_pred.flatten()\n\n tp = np.sum(y_true_f * y_pred_f)\n tn = np.sum(y_true_f == y_pred_f) - tp\n\n return (2. * tp + smooth) / (len(y_true_f) - tn + tp + smooth)\n\n\ndef jacc_idx(y_true, y_pred, smooth=smooth_default):\n y_true_f = y_true.flatten()\n y_pred_f = y_pred.flatten()\n\n tp = np.sum(y_true_f * y_pred_f)\n tn = np.sum(y_true_f == y_pred_f) - tp\n\n return (tp + smooth) / (len(y_true_f) - tn + smooth)\n\n\ndef accuracy(y_true, y_pred, smooth=smooth_default):\n y_true_f = y_true.flatten()\n y_pred_f = y_pred.flatten()\n\n intersection = np.sum(y_true_f == y_pred_f)\n\n return (intersection + smooth) / (len(y_true_f) + smooth)\n\n\ndef jacc_loss(y_true, y_pred):\n return -jacc_idx(y_true, y_pred)\n\n\ndef dice_loss(y_true, y_pred):\n return -dice_coef(y_true, y_pred)\n\n\ndef dice_jacc_single(mask_true, mask_pred, smooth=smooth_default):\n bool_true = mask_true.reshape(-1).astype(np.bool)\n bool_pred = mask_pred.reshape(-1).astype(np.bool)\n if bool_true.shape != bool_pred.shape:\n raise ValueError(\"Masks of different sizes.\")\n\n bool_sum = bool_true.sum() + bool_pred.sum()\n if bool_sum == 0:\n print\n \"Empty mask\"\n return 0, 0\n intersec = np.logical_and(bool_true, bool_pred).sum()\n dice = 2. * intersec / bool_sum\n jacc = jaccard_similarity_score(bool_true.reshape((1, -1)), bool_pred.reshape((1, -1)), normalize=True, sample_weight=None)\n return dice, jacc\n\n\ndef dice_jacc_mean(mask_true, mask_pred, smooth=smooth_default):\n dice = 0\n jacc = 0\n for i in range(mask_true.shape[0]):\n current_dice, current_jacc = dice_jacc_single(mask_true=mask_true[i], mask_pred=mask_pred[i], smooth=smooth)\n dice = dice + current_dice\n jacc = jacc + current_jacc\n return dice / mask_true.shape[0], jacc / mask_true.shape[0]\n"
] |
[
[
"numpy.sum",
"numpy.logical_and"
]
] |
mcvine/mcvine
|
[
"42232534b0c6af729628009bed165cd7d833789d"
] |
[
"packages/mccomponents/tests/mccomponents/sample/geometry/intersection_TestCase.py"
] |
[
"#!/usr/bin/env python\n#\n\nstandalone = True\n\nimport os\nos.environ['MCVINE_MPI_BINDING'] = 'NONE'\n\nimport mcni, shutil, numpy as np\nfrom mccomponents.sample import samplecomponent\n\n\nimport unittest\nclass TestCase(unittest.TestCase):\n\n\n def test1a(self):\n \"intersection: two blocks\"\n self._test('sampleassembly-variants/sampleassembly.xml.intersection_of_two_blocks', (0., 0., 0.), (.1, .15, .2))\n return\n\n def _test(self, xml, center, size):\n from utils import createSampleAssembly\n saxml = createSampleAssembly('.', './sampleassembly', xml)\n sample = samplecomponent( 'test', saxml)\n check(sample, center, size)\n import shutil\n shutil.rmtree(os.path.dirname(saxml))\n return\n \n pass # end of TestCase\n\n\ndef check(sample, center, size):\n x0,y0,z0 = center\n x,y,z = size\n \n neutron = mcni.neutron(r=(x0,y0,z0-1), v=(0,0,1), prob=1.)\n sample.scatter(neutron)\n assert np.isclose(neutron.state.position[2], z0+z/2.)\n\n neutron = mcni.neutron(r=(x0,y0,z0+1), v=(0,0,-1), prob=1.)\n sample.scatter(neutron)\n assert np.isclose(neutron.state.position[2], z0-z/2.)\n\n neutron = mcni.neutron(r=(x0,y0-1,z0), v=(0,1,0), prob=1.)\n sample.scatter(neutron)\n assert np.isclose(neutron.state.position[1], y0+y/2.)\n\n neutron = mcni.neutron(r=(x0,y0+1,z0), v=(0,-1,0), prob=1.)\n sample.scatter(neutron)\n assert np.isclose(neutron.state.position[1], y0-y/2.)\n\n neutron = mcni.neutron(r=(x0-1,y0,z0), v=(1,0,0), prob=1.)\n sample.scatter(neutron)\n assert np.isclose(neutron.state.position[0], x0+x/2.)\n\n neutron = mcni.neutron(r=(x0+1,y0,z0), v=(-1,0,0), prob=1.)\n sample.scatter(neutron)\n assert np.isclose(neutron.state.position[0], x0-x/2.)\n return\n\ndef main():\n unittest.main()\n return\n \n \nif __name__ == \"__main__\":\n main()\n \n# version\n__id__ = \"$Id$\"\n\n# End of file \n"
] |
[
[
"numpy.isclose"
]
] |
hnt4499/fairseq
|
[
"4b519e9876737db32047167e77bf5f8781edef99",
"4b519e9876737db32047167e77bf5f8781edef99",
"4b519e9876737db32047167e77bf5f8781edef99"
] |
[
"fairseq/tasks/sentence_ranking.py",
"fairseq/data/audio/speech_to_text_dataset.py",
"fairseq/data/audio/raw_audio_dataset.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom loguru import logger\nimport os\n\nimport numpy as np\nfrom fairseq import utils, utils_loguru\nfrom fairseq.data import (\n ConcatSentencesDataset,\n Dictionary,\n IdDataset,\n NestedDictionaryDataset,\n NumelDataset,\n NumSamplesDataset,\n PrependTokenDataset,\n RawLabelDataset,\n RightPadDataset,\n SortDataset,\n TruncateDataset,\n data_utils,\n)\nfrom fairseq.data.shorten_dataset import maybe_shorten_dataset\nfrom fairseq.tasks import LegacyFairseqTask, register_task\n\n\nlogger = logger.patch(utils_loguru.loguru_name_patcher)\n\n\n@register_task(\"sentence_ranking\")\nclass SentenceRankingTask(LegacyFairseqTask):\n \"\"\"\n Ranking task on multiple sentences.\n\n Args:\n dictionary (Dictionary): the dictionary for the input of the task\n \"\"\"\n\n @staticmethod\n def add_args(parser):\n \"\"\"Add task-specific arguments to the parser.\"\"\"\n parser.add_argument(\"data\", metavar=\"FILE\", help=\"file prefix for data\")\n parser.add_argument(\n \"--num-classes\", type=int, help=\"number of sentences to be ranked\"\n )\n parser.add_argument(\n \"--init-token\",\n type=int,\n help=\"add token at the beginning of each batch item\",\n )\n parser.add_argument(\n \"--separator-token\", type=int, help=\"add separator token between inputs\"\n )\n parser.add_argument(\"--no-shuffle\", action=\"store_true\")\n parser.add_argument(\n \"--shorten-method\",\n default=\"none\",\n choices=[\"none\", \"truncate\", \"random_crop\"],\n help=\"if not none, shorten sequences that exceed --tokens-per-sample\",\n )\n parser.add_argument(\n \"--shorten-data-split-list\",\n default=\"\",\n help=\"comma-separated list of dataset splits to apply shortening to, \"\n 'e.g., \"train,valid\" (default: all dataset splits)',\n )\n parser.add_argument(\n \"--max-option-length\", type=int, help=\"max length for each option\"\n )\n\n def __init__(self, args, dictionary):\n super().__init__(args)\n self.dictionary = dictionary\n\n @classmethod\n def load_dictionary(cls, args, filename, source=True):\n \"\"\"Load the dictionary from the filename\n\n Args:\n filename (str): the filename\n \"\"\"\n dictionary = Dictionary.load(filename)\n dictionary.add_symbol(\"<mask>\")\n return dictionary\n\n @classmethod\n def setup_task(cls, args, **kwargs):\n assert (\n args.criterion == \"sentence_ranking\"\n ), \"Must set --criterion=sentence_ranking\"\n\n # load data dictionary\n data_dict = cls.load_dictionary(\n args,\n os.path.join(args.data, \"input0\", \"dict.txt\"),\n source=True,\n )\n logger.info(\"[input] dictionary: {} types\".format(len(data_dict)))\n return SentenceRankingTask(args, data_dict)\n\n def load_dataset(self, split, combine=False, **kwargs):\n \"\"\"Load a given dataset split (e.g., train, valid, test).\"\"\"\n\n def get_path(type, split):\n return os.path.join(self.args.data, type, split)\n\n def make_dataset(type, dictionary):\n split_path = get_path(type, split)\n\n dataset = data_utils.load_indexed_dataset(\n split_path,\n self.source_dictionary,\n self.args.dataset_impl,\n combine=combine,\n )\n return dataset\n\n input0 = make_dataset(\"input0\", self.source_dictionary)\n input_options = [\n make_dataset(\"input{idx}\".format(idx=idx + 1), self.source_dictionary)\n for idx in range(self.args.num_classes)\n ]\n\n if self.args.separator_token is not None:\n input0 = PrependTokenDataset(input0, self.args.separator_token)\n\n src_tokens = []\n for input_option in input_options:\n if self.args.init_token is not None:\n input_option = PrependTokenDataset(input_option, self.args.init_token)\n if self.args.max_option_length is not None:\n input_option = TruncateDataset(\n input_option, self.args.max_option_length\n )\n src_token = ConcatSentencesDataset(input_option, input0)\n src_token = maybe_shorten_dataset(\n src_token,\n split,\n self.args.shorten_data_split_list,\n self.args.shorten_method,\n self.args.max_positions,\n self.args.seed,\n )\n src_tokens.append(src_token)\n\n with data_utils.numpy_seed(self.args.seed):\n shuffle = np.random.permutation(len(src_tokens[0]))\n\n dataset = {\n \"id\": IdDataset(),\n \"nsentences\": NumSamplesDataset(),\n \"ntokens\": NumelDataset(src_tokens[0], reduce=True),\n }\n\n for src_token_idx in range(len(src_tokens)):\n dataset.update(\n {\n \"net_input{idx}\".format(idx=src_token_idx + 1): {\n \"src_tokens\": RightPadDataset(\n src_tokens[src_token_idx],\n pad_idx=self.source_dictionary.pad(),\n ),\n \"src_lengths\": NumelDataset(\n src_tokens[src_token_idx], reduce=False\n ),\n }\n }\n )\n\n label_path = \"{}.label\".format(get_path(\"label\", split))\n if os.path.exists(label_path):\n with open(label_path) as h:\n dataset.update(\n target=RawLabelDataset([int(x.strip()) for x in h.readlines()])\n )\n\n nested_dataset = NestedDictionaryDataset(\n dataset,\n sizes=[np.maximum.reduce([src_token.sizes for src_token in src_tokens])],\n )\n\n if self.args.no_shuffle:\n dataset = nested_dataset\n else:\n dataset = SortDataset(\n nested_dataset,\n # shuffle\n sort_order=[shuffle],\n )\n\n logger.info(\"Loaded {0} with #samples: {1}\".format(split, len(dataset)))\n\n self.datasets[split] = dataset\n return self.datasets[split]\n\n def build_model(self, args):\n from fairseq import models\n\n model = models.build_model(args, self)\n\n model.register_classification_head(\n getattr(args, \"ranking_head_name\", \"sentence_classification_head\"),\n num_classes=1,\n )\n\n return model\n\n def max_positions(self):\n return self.args.max_positions\n\n @property\n def source_dictionary(self):\n return self.dictionary\n\n @property\n def target_dictionary(self):\n return self.dictionary\n",
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport csv\nimport io\nfrom loguru import logger\nimport os.path as op\nimport re\nfrom typing import Dict, List, Optional, Tuple\n\nimport numpy as np\nimport torch\nfrom fairseq.data import (\n ConcatDataset,\n Dictionary,\n FairseqDataset,\n ResamplingDataset,\n data_utils as fairseq_data_utils,\n)\nfrom fairseq.data.audio.audio_utils import get_fbank, get_waveform\nfrom fairseq.data.audio.feature_transforms import CompositeAudioFeatureTransform\nfrom fairseq import utils_loguru\n\n\nlogger = logger.patch(utils_loguru.loguru_name_patcher)\n\n\nclass S2TDataConfig(object):\n \"\"\"Wrapper class for data config YAML\"\"\"\n\n def __init__(self, yaml_path):\n try:\n import yaml\n except ImportError:\n print(\"Please install PyYAML to load YAML files for \" \"S2T data config\")\n self.config = {}\n if op.isfile(yaml_path):\n try:\n with open(yaml_path) as f:\n self.config = yaml.load(f, Loader=yaml.FullLoader)\n except Exception as e:\n logger.info(f\"Failed to load config from {yaml_path}: {e}\")\n else:\n logger.info(f\"Cannot find {yaml_path}\")\n\n @property\n def vocab_filename(self):\n \"\"\"fairseq vocabulary file under data root\"\"\"\n return self.config.get(\"vocab_filename\", \"dict.txt\")\n\n @property\n def shuffle(self) -> bool:\n \"\"\"Shuffle dataset samples before batching\"\"\"\n return self.config.get(\"shuffle\", False)\n\n @property\n def pre_tokenizer(self) -> Dict:\n \"\"\"Pre-tokenizer to apply before subword tokenization. Returning\n a dictionary with `tokenizer` providing the tokenizer name and\n the other items providing the tokenizer-specific arguments.\n Tokenizers are defined in `fairseq.data.encoders.*`\"\"\"\n return self.config.get(\"pre_tokenizer\", {\"tokenizer\": None})\n\n @property\n def bpe_tokenizer(self) -> Dict:\n \"\"\"Subword tokenizer to apply after pre-tokenization. Returning\n a dictionary with `bpe` providing the tokenizer name and\n the other items providing the tokenizer-specific arguments.\n Tokenizers are defined in `fairseq.data.encoders.*`\"\"\"\n return self.config.get(\"bpe_tokenizer\", {\"bpe\": None})\n\n @property\n def prepend_tgt_lang_tag(self) -> bool:\n \"\"\"Prepend target lang ID token as the target BOS (e.g. for to-many\n multilingual setting). During inference, this requires `--prefix-size 1`\n to force BOS to be lang ID token.\"\"\"\n return self.config.get(\"prepend_tgt_lang_tag\", False)\n\n @property\n def input_feat_per_channel(self):\n \"\"\"The dimension of input features (per audio channel)\"\"\"\n return self.config.get(\"input_feat_per_channel\", 80)\n\n @property\n def input_channels(self):\n \"\"\"The number of channels in the input audio\"\"\"\n return self.config.get(\"input_channels\", 1)\n\n @property\n def sampling_alpha(self):\n \"\"\"Hyper-parameter alpha = 1/T for temperature-based resampling.\n (alpha = 1 for no resampling)\"\"\"\n return self.config.get(\"sampling_alpha\", 1.0)\n\n @property\n def use_audio_input(self):\n \"\"\"Needed by the dataset loader to see if the model requires\n raw audio as inputs.\"\"\"\n return self.config.get(\"use_audio_input\", False)\n\n @property\n def audio_root(self):\n \"\"\"Audio paths in the manifest TSV can be relative and this provides\n the root path. Set this to empty string when using absolute paths.\"\"\"\n return self.config.get(\"audio_root\", \"\")\n\n def get_feature_transforms(self, split, is_train):\n \"\"\"Split-specific feature transforms. Allowing train set wildcard `_train`,\n evaluation set wildcard `_eval` and general wildcard `*` for matching.\"\"\"\n from copy import deepcopy\n\n cfg = deepcopy(self.config)\n _cur = cfg.get(\"transforms\", {})\n cur = _cur.get(split)\n cur = _cur.get(\"_train\") if cur is None and is_train else cur\n cur = _cur.get(\"_eval\") if cur is None and not is_train else cur\n cur = _cur.get(\"*\") if cur is None else cur\n cfg[\"transforms\"] = cur\n return cfg\n\n\ndef is_npy_data(data: bytes) -> bool:\n return data[0] == 147 and data[1] == 78\n\n\ndef is_flac_or_wav_data(data: bytes) -> bool:\n is_flac = data[0] == 102 and data[1] == 76\n is_wav = data[0] == 82 and data[1] == 73\n return is_flac or is_wav\n\n\ndef read_from_uncompressed_zip(file_path, offset, file_size) -> bytes:\n with open(file_path, \"rb\") as f:\n f.seek(offset)\n data = f.read(file_size)\n return data\n\n\ndef get_features_from_npy_or_audio(path):\n ext = op.splitext(op.basename(path))[1]\n if ext not in {\".npy\", \".flac\", \".wav\"}:\n raise ValueError(f'Unsupported file format for \"{path}\"')\n return np.load(path) if ext == \".npy\" else get_fbank(path)\n\n\ndef get_features_or_waveform_from_uncompressed_zip(\n path, byte_offset, byte_size, need_waveform=False\n):\n assert path.endswith(\".zip\")\n data = read_from_uncompressed_zip(path, byte_offset, byte_size)\n f = io.BytesIO(data)\n if is_npy_data(data):\n features_or_waveform = np.load(f)\n elif is_flac_or_wav_data(data):\n features_or_waveform = get_waveform(f)[0] if need_waveform else get_fbank(f)\n else:\n raise ValueError(f'Unknown file format for \"{path}\"')\n return features_or_waveform\n\n\ndef get_features_or_waveform(path: str, need_waveform=False):\n \"\"\"Get speech features from .npy file or waveform from .wav/.flac file.\n The file may be inside an uncompressed ZIP file and is accessed via byte\n offset and length.\n\n Args:\n path (str): File path in the format of \"<.npy/.wav/.flac path>\" or\n \"<zip path>:<byte offset>:<byte length>\".\n need_waveform (bool): return waveform instead of features.\n\n Returns:\n features_or_waveform (numpy.ndarray): speech features or waveform.\n \"\"\"\n _path, *extra = path.split(\":\")\n if not op.exists(_path):\n raise FileNotFoundError(f\"File not found: {_path}\")\n\n if len(extra) == 0:\n if need_waveform:\n return get_waveform(_path)\n return get_features_from_npy_or_audio(_path)\n elif len(extra) == 2:\n extra = [int(i) for i in extra]\n features_or_waveform = get_features_or_waveform_from_uncompressed_zip(\n _path, extra[0], extra[1], need_waveform=need_waveform\n )\n else:\n raise ValueError(f\"Invalid path: {path}\")\n\n return features_or_waveform\n\n\ndef _collate_frames(\n frames: List[torch.Tensor], is_audio_input: bool = False\n) -> torch.Tensor:\n \"\"\"\n Convert a list of 2D frames into a padded 3D tensor\n Args:\n frames (list): list of 2D frames of size L[i]*f_dim. Where L[i] is\n length of i-th frame and f_dim is static dimension of features\n Returns:\n 3D tensor of size len(frames)*len_max*f_dim where len_max is max of L[i]\n \"\"\"\n max_len = max(frame.size(0) for frame in frames)\n if is_audio_input:\n out = frames[0].new_zeros((len(frames), max_len))\n else:\n out = frames[0].new_zeros((len(frames), max_len, frames[0].size(1)))\n for i, v in enumerate(frames):\n out[i, : v.size(0)] = v\n return out\n\n\nclass SpeechToTextDataset(FairseqDataset):\n LANG_TAG_TEMPLATE = \"<lang:{}>\"\n\n def __init__(\n self,\n split: str,\n is_train_split: bool,\n data_cfg: S2TDataConfig,\n audio_paths: List[str],\n n_frames: List[int],\n src_texts: Optional[List[str]] = None,\n tgt_texts: Optional[List[str]] = None,\n speakers: Optional[List[str]] = None,\n src_langs: Optional[List[str]] = None,\n tgt_langs: Optional[List[str]] = None,\n ids: Optional[List[str]] = None,\n tgt_dict: Optional[Dictionary] = None,\n pre_tokenizer=None,\n bpe_tokenizer=None,\n ):\n self.split, self.is_train_split = split, is_train_split\n self.data_cfg = data_cfg\n self.audio_paths, self.n_frames = audio_paths, n_frames\n self.n_samples = len(audio_paths)\n assert len(n_frames) == self.n_samples > 0\n assert src_texts is None or len(src_texts) == self.n_samples\n assert tgt_texts is None or len(tgt_texts) == self.n_samples\n assert speakers is None or len(speakers) == self.n_samples\n assert src_langs is None or len(src_langs) == self.n_samples\n assert tgt_langs is None or len(tgt_langs) == self.n_samples\n assert ids is None or len(ids) == self.n_samples\n assert (tgt_dict is None and tgt_texts is None) or (\n tgt_dict is not None and tgt_texts is not None\n )\n self.src_texts, self.tgt_texts = src_texts, tgt_texts\n self.src_langs, self.tgt_langs = src_langs, tgt_langs\n self.tgt_dict = tgt_dict\n self.check_tgt_lang_tag()\n self.ids = ids\n self.shuffle = data_cfg.shuffle if is_train_split else False\n\n self.feature_transforms = CompositeAudioFeatureTransform.from_config_dict(\n self.data_cfg.get_feature_transforms(split, is_train_split)\n )\n\n self.pre_tokenizer = pre_tokenizer\n self.bpe_tokenizer = bpe_tokenizer\n\n logger.info(self.__repr__())\n\n def __repr__(self):\n return (\n self.__class__.__name__\n + f'(split=\"{self.split}\", n_samples={self.n_samples}, '\n f\"prepend_tgt_lang_tag={self.data_cfg.prepend_tgt_lang_tag}, \"\n f\"shuffle={self.shuffle}, transforms={self.feature_transforms})\"\n )\n\n @classmethod\n def is_lang_tag(cls, token):\n pattern = cls.LANG_TAG_TEMPLATE.replace(\"{}\", \"(.*)\")\n return re.match(pattern, token)\n\n def check_tgt_lang_tag(self):\n if self.data_cfg.prepend_tgt_lang_tag:\n assert self.tgt_langs is not None and self.tgt_dict is not None\n tgt_lang_tags = [\n self.LANG_TAG_TEMPLATE.format(t) for t in set(self.tgt_langs)\n ]\n assert all(t in self.tgt_dict for t in tgt_lang_tags)\n\n def tokenize_text(self, text: str):\n if self.pre_tokenizer is not None:\n text = self.pre_tokenizer.encode(text)\n if self.bpe_tokenizer is not None:\n text = self.bpe_tokenizer.encode(text)\n return text\n\n def __getitem__(\n self, index: int\n ) -> Tuple[int, torch.Tensor, Optional[torch.Tensor]]:\n source = get_features_or_waveform(\n self.audio_paths[index], need_waveform=self.data_cfg.use_audio_input\n )\n if self.feature_transforms is not None:\n assert not self.data_cfg.use_audio_input\n source = self.feature_transforms(source)\n source = torch.from_numpy(source).float()\n\n target = None\n if self.tgt_texts is not None:\n tokenized = self.tokenize_text(self.tgt_texts[index])\n target = self.tgt_dict.encode_line(\n tokenized, add_if_not_exist=False, append_eos=True\n ).long()\n if self.data_cfg.prepend_tgt_lang_tag:\n lang_tag = self.LANG_TAG_TEMPLATE.format(self.tgt_langs[index])\n lang_tag_idx = self.tgt_dict.index(lang_tag)\n target = torch.cat((torch.LongTensor([lang_tag_idx]), target), 0)\n return index, source, target\n\n def __len__(self):\n return self.n_samples\n\n def collater(self, samples: List[Tuple[int, torch.Tensor, torch.Tensor]]) -> Dict:\n if len(samples) == 0:\n return {}\n indices = torch.tensor([i for i, _, _ in samples], dtype=torch.long)\n frames = _collate_frames(\n [s for _, s, _ in samples], self.data_cfg.use_audio_input\n )\n # sort samples by descending number of frames\n n_frames = torch.tensor([s.size(0) for _, s, _ in samples], dtype=torch.long)\n n_frames, order = n_frames.sort(descending=True)\n indices = indices.index_select(0, order)\n frames = frames.index_select(0, order)\n\n target, target_lengths = None, None\n prev_output_tokens = None\n ntokens = None\n if self.tgt_texts is not None:\n target = fairseq_data_utils.collate_tokens(\n [t for _, _, t in samples],\n self.tgt_dict.pad(),\n self.tgt_dict.eos(),\n left_pad=False,\n move_eos_to_beginning=False,\n )\n target = target.index_select(0, order)\n target_lengths = torch.tensor(\n [t.size(0) for _, _, t in samples], dtype=torch.long\n ).index_select(0, order)\n prev_output_tokens = fairseq_data_utils.collate_tokens(\n [t for _, _, t in samples],\n self.tgt_dict.pad(),\n self.tgt_dict.eos(),\n left_pad=False,\n move_eos_to_beginning=True,\n )\n prev_output_tokens = prev_output_tokens.index_select(0, order)\n ntokens = sum(t.size(0) for _, _, t in samples)\n\n out = {\n \"id\": indices,\n \"net_input\": {\n \"src_tokens\": frames,\n \"src_lengths\": n_frames,\n \"prev_output_tokens\": prev_output_tokens,\n },\n \"target\": target,\n \"target_lengths\": target_lengths,\n \"ntokens\": ntokens,\n \"nsentences\": len(samples),\n }\n return out\n\n def num_tokens(self, index):\n return self.n_frames[index]\n\n def size(self, index):\n t_len = 0\n if self.tgt_texts is not None:\n tokenized = self.tokenize_text(self.tgt_texts[index])\n t_len = len(tokenized.split(\" \"))\n return self.n_frames[index], t_len\n\n @property\n def sizes(self):\n return np.array(self.n_frames)\n\n @property\n def can_reuse_epoch_itr_across_epochs(self):\n return True\n\n def ordered_indices(self):\n if self.shuffle:\n order = [np.random.permutation(len(self))]\n else:\n order = [np.arange(len(self))]\n # first by descending order of # of frames then by original/random order\n order.append([-n for n in self.n_frames])\n return np.lexsort(order)\n\n def prefetch(self, indices):\n raise False\n\n\nclass SpeechToTextDatasetCreator(object):\n # mandatory columns\n KEY_ID, KEY_AUDIO, KEY_N_FRAMES = \"id\", \"audio\", \"n_frames\"\n KEY_TGT_TEXT = \"tgt_text\"\n # optional columns\n KEY_SPEAKER, KEY_SRC_TEXT = \"speaker\", \"src_text\"\n KEY_SRC_LANG, KEY_TGT_LANG = \"src_lang\", \"tgt_lang\"\n # default values\n DEFAULT_SPEAKER = DEFAULT_SRC_TEXT = DEFAULT_LANG = \"\"\n\n @classmethod\n def _from_list(\n cls,\n split_name: str,\n is_train_split,\n samples: List[List[Dict]],\n data_cfg: S2TDataConfig,\n tgt_dict,\n pre_tokenizer,\n bpe_tokenizer,\n ) -> SpeechToTextDataset:\n audio_paths, n_frames, src_texts, tgt_texts, ids = [], [], [], [], []\n speakers, src_langs, tgt_langs = [], [], []\n for s in samples:\n ids.extend([ss[cls.KEY_ID] for ss in s])\n audio_paths.extend(\n [op.join(data_cfg.audio_root, ss[cls.KEY_AUDIO]) for ss in s]\n )\n n_frames.extend([int(ss[cls.KEY_N_FRAMES]) for ss in s])\n tgt_texts.extend([ss[cls.KEY_TGT_TEXT] for ss in s])\n src_texts.extend(\n [ss.get(cls.KEY_SRC_TEXT, cls.DEFAULT_SRC_TEXT) for ss in s]\n )\n speakers.extend([ss.get(cls.KEY_SPEAKER, cls.DEFAULT_SPEAKER) for ss in s])\n src_langs.extend([ss.get(cls.KEY_SRC_LANG, cls.DEFAULT_LANG) for ss in s])\n tgt_langs.extend([ss.get(cls.KEY_TGT_LANG, cls.DEFAULT_LANG) for ss in s])\n return SpeechToTextDataset(\n split_name,\n is_train_split,\n data_cfg,\n audio_paths,\n n_frames,\n src_texts,\n tgt_texts,\n speakers,\n src_langs,\n tgt_langs,\n ids,\n tgt_dict,\n pre_tokenizer,\n bpe_tokenizer,\n )\n\n @classmethod\n def _get_size_ratios(cls, ids: List[str], sizes: List[int], alpha: float = 1.0):\n \"\"\"Size ratios for temperature-based sampling\n (https://arxiv.org/abs/1907.05019)\"\"\"\n _sizes = np.array(sizes)\n prob = _sizes / _sizes.sum()\n smoothed_prob = prob ** alpha\n smoothed_prob = smoothed_prob / smoothed_prob.sum()\n size_ratio = (smoothed_prob * _sizes.sum()) / _sizes\n\n o_str = str({_i: f\"{prob[i]:.3f}\" for i, _i in enumerate(ids)})\n logger.info(f\"original sampling probability: {o_str}\")\n p_str = str({_i: f\"{smoothed_prob[i]:.3f}\" for i, _i in enumerate(ids)})\n logger.info(f\"balanced sampling probability: {p_str}\")\n sr_str = str({_id: f\"{size_ratio[i]:.3f}\" for i, _id in enumerate(ids)})\n logger.info(f\"balanced sampling size ratio: {sr_str}\")\n return size_ratio.tolist()\n\n @classmethod\n def from_tsv(\n cls,\n root: str,\n data_cfg: S2TDataConfig,\n splits: str,\n tgt_dict,\n pre_tokenizer,\n bpe_tokenizer,\n is_train_split: bool,\n epoch: int,\n seed: int,\n ) -> SpeechToTextDataset:\n samples = []\n _splits = splits.split(\",\")\n for split in _splits:\n tsv_path = op.join(root, f\"{split}.tsv\")\n if not op.isfile(tsv_path):\n raise FileNotFoundError(f\"Dataset not found: {tsv_path}\")\n with open(tsv_path) as f:\n reader = csv.DictReader(\n f,\n delimiter=\"\\t\",\n quotechar=None,\n doublequote=False,\n lineterminator=\"\\n\",\n quoting=csv.QUOTE_NONE,\n )\n samples.append([dict(e) for e in reader])\n assert len(samples) > 0\n\n datasets = [\n cls._from_list(\n name,\n is_train_split,\n [s],\n data_cfg,\n tgt_dict,\n pre_tokenizer,\n bpe_tokenizer,\n )\n for name, s in zip(_splits, samples)\n ]\n\n if is_train_split and len(_splits) > 1 and data_cfg.sampling_alpha != 1.0:\n # temperature-based sampling\n size_ratios = cls._get_size_ratios(\n _splits, [len(s) for s in samples], alpha=data_cfg.sampling_alpha\n )\n datasets = [\n ResamplingDataset(\n d, size_ratio=r, seed=seed, epoch=epoch, replace=(r >= 1.0)\n )\n for d, r in zip(datasets, size_ratios)\n ]\n return ConcatDataset(datasets)\n",
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\nfrom loguru import logger\nimport os\nimport sys\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\n\nfrom .. import FairseqDataset\nfrom fairseq import utils_loguru\n\n\nlogger = logger.patch(utils_loguru.loguru_name_patcher)\n\n\nclass RawAudioDataset(FairseqDataset):\n def __init__(\n self,\n sample_rate,\n max_sample_size=None,\n min_sample_size=None,\n shuffle=True,\n min_length=0,\n pad=False,\n normalize=False,\n ):\n super().__init__()\n\n self.sample_rate = sample_rate\n self.sizes = []\n self.max_sample_size = (\n max_sample_size if max_sample_size is not None else sys.maxsize\n )\n self.min_sample_size = min_sample_size\n self.min_length = min_length\n self.pad = pad\n self.shuffle = shuffle\n self.normalize = normalize\n\n def __getitem__(self, index):\n raise NotImplementedError()\n\n def __len__(self):\n return len(self.sizes)\n\n def postprocess(self, feats, curr_sample_rate):\n if feats.dim() == 2:\n feats = feats.mean(-1)\n\n if curr_sample_rate != self.sample_rate:\n raise Exception(f\"sample rate: {curr_sample_rate}, need {self.sample_rate}\")\n\n assert feats.dim() == 1, feats.dim()\n\n if self.normalize:\n with torch.no_grad():\n feats = F.layer_norm(feats, feats.shape)\n return feats\n\n def crop_to_max_size(self, wav, target_size):\n size = len(wav)\n diff = size - target_size\n if diff <= 0:\n return wav\n\n start = np.random.randint(0, diff + 1)\n end = size - diff + start\n return wav[start:end]\n\n def collater(self, samples):\n samples = [s for s in samples if s[\"source\"] is not None]\n if len(samples) == 0:\n return {}\n\n sources = [s[\"source\"] for s in samples]\n sizes = [len(s) for s in sources]\n\n if self.pad:\n target_size = min(max(sizes), self.max_sample_size)\n else:\n target_size = min(min(sizes), self.max_sample_size)\n\n collated_sources = sources[0].new_zeros(len(sources), target_size)\n padding_mask = (\n torch.BoolTensor(collated_sources.shape).fill_(False) if self.pad else None\n )\n for i, (source, size) in enumerate(zip(sources, sizes)):\n diff = size - target_size\n if diff == 0:\n collated_sources[i] = source\n elif diff < 0:\n assert self.pad\n collated_sources[i] = torch.cat(\n [source, source.new_full((-diff,), 0.0)]\n )\n padding_mask[i, diff:] = True\n else:\n collated_sources[i] = self.crop_to_max_size(source, target_size)\n\n input = {\"source\": collated_sources}\n if self.pad:\n input[\"padding_mask\"] = padding_mask\n return {\"id\": torch.LongTensor([s[\"id\"] for s in samples]), \"net_input\": input}\n\n def num_tokens(self, index):\n return self.size(index)\n\n def size(self, index):\n \"\"\"Return an example's size as a float or tuple. This value is used when\n filtering a dataset with ``--max-positions``.\"\"\"\n if self.pad:\n return self.sizes[index]\n return min(self.sizes[index], self.max_sample_size)\n\n def ordered_indices(self):\n \"\"\"Return an ordered list of indices. Batches will be constructed based\n on this order.\"\"\"\n\n if self.shuffle:\n order = [np.random.permutation(len(self))]\n else:\n order = [np.arange(len(self))]\n\n order.append(self.sizes)\n return np.lexsort(order)[::-1]\n\n\nclass FileAudioDataset(RawAudioDataset):\n def __init__(\n self,\n manifest_path,\n sample_rate,\n max_sample_size=None,\n min_sample_size=None,\n shuffle=True,\n min_length=0,\n pad=False,\n normalize=False,\n ):\n super().__init__(\n sample_rate=sample_rate,\n max_sample_size=max_sample_size,\n min_sample_size=min_sample_size,\n shuffle=shuffle,\n min_length=min_length,\n pad=pad,\n normalize=normalize,\n )\n\n self.fnames = []\n\n skipped = 0\n with open(manifest_path, \"r\") as f:\n self.root_dir = f.readline().strip()\n for line in f:\n items = line.strip().split(\"\\t\")\n assert len(items) == 2, line\n sz = int(items[1])\n if min_length is not None and sz < min_length:\n skipped += 1\n continue\n self.fnames.append(items[0])\n self.sizes.append(sz)\n logger.info(f\"loaded {len(self.fnames)}, skipped {skipped} samples\")\n\n def __getitem__(self, index):\n import soundfile as sf\n\n fname = os.path.join(self.root_dir, self.fnames[index])\n wav, curr_sample_rate = sf.read(fname)\n feats = torch.from_numpy(wav).float()\n feats = self.postprocess(feats, curr_sample_rate)\n return {\"id\": index, \"source\": feats}\n"
] |
[
[
"numpy.maximum.reduce"
],
[
"numpy.array",
"numpy.lexsort",
"numpy.load",
"torch.from_numpy",
"torch.tensor",
"torch.LongTensor"
],
[
"torch.nn.functional.layer_norm",
"numpy.lexsort",
"torch.no_grad",
"torch.from_numpy",
"torch.LongTensor",
"numpy.random.randint",
"torch.BoolTensor"
]
] |
YiyiLiao/deep_marching_cubes
|
[
"6fce0b26d110a6c839b6d46ea2ab67b5bdb470b2"
] |
[
"marching_cube/model/cffi/functions/occupancy_to_topology.py"
] |
[
"# functions/add.py\nimport torch\nfrom torch.autograd import Function\nfrom _ext import forward_utils \nif torch.cuda.is_available():\n from _ext import forward_utils_cuda \n\n\nclass OccupancyToTopology(Function):\n \"\"\" Convert the occupancy probability to topology probability\n see ../src/occupancy_to_topology.c \n ../src/occupancy_connectivity_cuda.c\n ../src/occupancy_to_topology_kernel.cu \n for more details\n \"\"\"\n def forward(self, occupancy):\n W = occupancy.size()[0]-1\n H = occupancy.size()[1]-1\n D = occupancy.size()[2]-1\n\n T = 256\n if not occupancy.is_cuda:\n topology = torch.zeros(W*H*D, T).type(torch.FloatTensor)\n forward_utils.occupancy_to_topology_forward(occupancy, topology)\n else:\n topology = torch.zeros(W*H*D, T).type(torch.FloatTensor).cuda()\n forward_utils_cuda.occupancy_to_topology_cuda_forward(occupancy, topology)\n\n self.occupancy = occupancy\n self.topology = topology \n\n return topology \n\n def backward(self, grad_output):\n if not grad_output.is_cuda:\n grad_occupancy = torch.zeros(self.occupancy.size()).type(torch.FloatTensor)\n forward_utils.occupancy_to_topology_backward(grad_output, self.occupancy, self.topology, grad_occupancy)\n else:\n grad_occupancy = torch.zeros(self.occupancy.size()).type(torch.FloatTensor).cuda()\n forward_utils_cuda.occupancy_to_topology_cuda_backward(grad_output, self.occupancy, self.topology, grad_occupancy)\n # we only need gradient on feat_points\n return grad_occupancy \n"
] |
[
[
"torch.zeros",
"torch.cuda.is_available"
]
] |
adsar/tensorflow
|
[
"b4b2575ec4bf7e6da2686505f61b5f16cb9273ab"
] |
[
"tensorflow/python/ops/nn_ops.py"
] |
[
"# Copyright 2015 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Wrappers for primitive Neural Net (NN) Operations.\"\"\"\n\n# pylint: disable=invalid-name\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.client import graph_util\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import common_shapes\nfrom tensorflow.python.ops import gen_nn_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import random_ops\n# pylint: disable=wildcard-import\nfrom tensorflow.python.ops.gen_nn_ops import *\n# pylint: enable=wildcard-import\n\n\n# Aliases for some automatically-generated names.\nlocal_response_normalization = gen_nn_ops.lrn\n\n\ndef conv2d_transpose(value, filter, output_shape, strides, padding=\"SAME\",\n name=None):\n \"\"\"The transpose of `conv2d`.\n\n This operation is sometimes called \"deconvolution\" after (Deconvolutional\n Networks)[http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf], but is\n actually the transpose (gradient) of `conv2d` rather than an actual\n deconvolution.\n\n Args:\n value: A 4-D `Tensor` of type `float` and shape\n `[batch, height, width, in_channels]`.\n filter: A 4-D `Tensor` with the same type as `value` and shape\n `[height, width, output_channels, in_channels]`. `filter`'s\n `in_channels` dimension must match that of `value`.\n output_shape: A 1-D `Tensor` representing the output shape of the\n deconvolution op.\n strides: A list of ints. The stride of the sliding window for each\n dimension of the input tensor.\n padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.\n name: Optional name for the returned tensor.\n\n Returns:\n A `Tensor` with the same type as `value`.\n\n Raises:\n ValueError: If input/output depth does not match `filter`'s shape, or if\n padding is other than `'VALID'` or `'SAME'`.\n \"\"\"\n with ops.op_scope([value, filter, output_shape], name,\n \"conv2d_transpose\") as name:\n value = ops.convert_to_tensor(value, name=\"value\")\n filter = ops.convert_to_tensor(filter, name=\"filter\")\n if not value.get_shape()[3].is_compatible_with(filter.get_shape()[3]):\n raise ValueError(\n \"input channels does not match filter's input channels, \"\n \"{} != {}\".format(value.get_shape()[3], filter.get_shape()[3]))\n\n output_shape_ = ops.convert_to_tensor(output_shape, name=\"output_shape\")\n if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(4)):\n raise ValueError(\"output_shape must have shape (4,), got {}\"\n .format(output_shape_.get_shape()))\n\n if isinstance(output_shape, (list, np.ndarray)):\n # output_shape's shape should be == [4] if reached this point.\n if not filter.get_shape()[2].is_compatible_with(output_shape[3]):\n raise ValueError(\n \"output_shape does not match filter's output channels, \"\n \"{} != {}\".format(output_shape[3], filter.get_shape()[2]))\n\n if padding != \"VALID\" and padding != \"SAME\":\n raise ValueError(\"padding must be either VALID or SAME:\"\n \" {}\".format(padding))\n\n return gen_nn_ops.conv2d_backprop_input(input_sizes=output_shape_,\n filter=filter,\n out_backprop=value,\n strides=strides,\n padding=padding,\n name=name)\n\n\n# pylint: disable=protected-access\ndef bias_add(value, bias, data_format=None, name=None):\n \"\"\"Adds `bias` to `value`.\n\n This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D.\n Broadcasting is supported, so `value` may have any number of dimensions.\n Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the\n case where both types are quantized.\n\n Args:\n value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`,\n `int16`, `int8`, or `complex64`.\n bias: A 1-D `Tensor` with size matching the last dimension of `value`.\n Must be the same type as `value` unless `value` is a quantized type,\n in which case a different quantized type may be used.\n data_format: A string. 'NHWC' and 'NCHW\" are supported.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` with the same type as `value`.\n \"\"\"\n with ops.op_scope([value, bias], name, \"BiasAdd\") as name:\n value = ops.convert_to_tensor(value, name=\"input\")\n bias = ops.convert_to_tensor(bias, dtype=value.dtype, name=\"bias\")\n return gen_nn_ops._bias_add(value, bias, data_format=data_format, name=name)\n\nops.RegisterShape(\"BiasAdd\")(common_shapes.bias_add_shape)\n\n\nops.RegisterShape(\"BiasAddGrad\")(common_shapes.bias_add_grad_shape)\n\n\n# pylint: disable=protected-access\ndef bias_add_v1(value, bias, name=None):\n \"\"\"Adds `bias` to `value`.\n\n This is a deprecated version of bias_add and will soon to be removed.\n\n This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D.\n Broadcasting is supported, so `value` may have any number of dimensions.\n Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the\n case where both types are quantized.\n\n Args:\n value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`,\n `int16`, `int8`, or `complex64`.\n bias: A 1-D `Tensor` with size matching the last dimension of `value`.\n Must be the same type as `value` unless `value` is a quantized type,\n in which case a different quantized type may be used.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` with the same type as `value`.\n \"\"\"\n with ops.op_scope([value, bias], name, \"BiasAddV1\") as name:\n value = ops.convert_to_tensor(value, name=\"input\")\n bias = ops.convert_to_tensor(bias, dtype=value.dtype, name=\"bias\")\n return gen_nn_ops._bias_add_v1(value, bias, name=name)\n\n\nops.RegisterShape(\"BiasAddV1\")(common_shapes.bias_add_shape)\n\n\nops.RegisterShape(\"BiasAddGradV1\")(common_shapes.bias_add_grad_shape)\n\n\n\ndef relu6(features, name=None):\n \"\"\"Computes Rectified Linear 6: `min(max(features, 0), 6)`.\n\n Args:\n features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`,\n `int16`, or `int8`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` with the same type as `features`.\n \"\"\"\n with ops.op_scope([features], name, \"Relu6\") as name:\n features = ops.convert_to_tensor(features, name=\"features\")\n return gen_nn_ops._relu6(features, name=name)\n\n\ndef softmax_cross_entropy_with_logits(logits, labels, name=None):\n \"\"\"Computes softmax cross entropy between `logits` and `labels`.\n\n Measures the probability error in discrete classification tasks in which the\n classes are mutually exclusive (each entry is in exactly one class). For\n example, each CIFAR-10 image is labeled with one and only one label: an image\n can be a dog or a truck, but not both.\n\n **NOTE:** While the classes are mutually exclusive, their probabilities\n need not be. All that is required is that each row of `labels` is\n a valid probability distribution. If using exclusive `labels`\n (wherein one and only one class is true at a time), see\n `sparse_softmax_cross_entropy_with_logits`.\n\n **WARNING:** This op expects unscaled logits, since it performs a `softmax`\n on `logits` internally for efficiency. Do not call this op with the\n output of `softmax`, as it will produce incorrect results.\n\n `logits` and `labels` must have the same shape `[batch_size, num_classes]`\n and the same dtype (either `float32` or `float64`).\n\n Args:\n logits: Unscaled log probabilities.\n labels: Each row `labels[i]` must be a valid probability distribution.\n name: A name for the operation (optional).\n\n Returns:\n A 1-D `Tensor` of length `batch_size` of the same type as `logits` with the\n softmax cross entropy loss.\n \"\"\"\n # The second output tensor contains the gradients. We use it in\n # _CrossEntropyGrad() in nn_grad but not here.\n cost, unused_backprop = gen_nn_ops._softmax_cross_entropy_with_logits(\n logits, labels, name=name)\n return cost\n\n\ndef sparse_softmax_cross_entropy_with_logits(logits, labels, name=None):\n \"\"\"Computes sparse softmax cross entropy between `logits` and `labels`.\n\n Measures the probability error in discrete classification tasks in which the\n classes are mutually exclusive (each entry is in exactly one class). For\n example, each CIFAR-10 image is labeled with one and only one label: an image\n can be a dog or a truck, but not both.\n\n **NOTE:** For this operation, the probability of a given label is considered\n exclusive. That is, soft classes are not allowed, and the `labels` vector\n must provide a single specific index for the true class for each row of\n `logits` (each minibatch entry). For soft softmax classification with\n a probability distribution for each entry, see\n `softmax_cross_entropy_with_logits`.\n\n **WARNING:** This op expects unscaled logits, since it performs a `softmax`\n on `logits` internally for efficiency. Do not call this op with the\n output of `softmax`, as it will produce incorrect results.\n\n `logits` and must have the shape `[batch_size, num_classes]`\n and the dtype (either `float32` or `float64`).\n\n `labels` must have the shape `[batch_size]` and the dtype `int64`.\n\n Args:\n logits: Unscaled log probabilities.\n labels: Each entry `labels[i]` must be an index in `[0, num_classes)`.\n name: A name for the operation (optional).\n\n Returns:\n A 1-D `Tensor` of length `batch_size` of the same type as `logits` with the\n softmax cross entropy loss.\n \"\"\"\n # The second output tensor contains the gradients. We use it in\n # _CrossEntropyGrad() in nn_grad but not here.\n cost, unused_backprop = gen_nn_ops._sparse_softmax_cross_entropy_with_logits(\n logits, labels, name=name)\n return cost\n\n\[email protected](\"SparseSoftmaxCrossEntropyWithLogits\")\ndef _SparseSoftmaxCrossEntropyWithLogitsShape(op):\n \"\"\"Shape function for SparseSoftmaxCrossEntropyWithLogits op.\"\"\"\n logits_shape = op.inputs[0].get_shape()\n input_shape = logits_shape.with_rank(2)\n batch_size = input_shape[0]\n # labels_shape\n op.inputs[1].get_shape().merge_with(tensor_shape.vector(batch_size))\n return [tensor_shape.vector(batch_size.value), input_shape]\n\n\[email protected](\"SoftmaxCrossEntropyWithLogits\")\ndef _SoftmaxCrossEntropyWithLogitsShape(op):\n \"\"\"Shape function for SoftmaxCrossEntropyWithLogits op.\"\"\"\n logits_shape = op.inputs[0].get_shape()\n labels_shape = op.inputs[1].get_shape()\n input_shape = logits_shape.merge_with(labels_shape).with_rank(2)\n batch_size = input_shape[0]\n return [tensor_shape.vector(batch_size.value), input_shape]\n\n\ndef avg_pool(value, ksize, strides, padding, data_format=\"NHWC\", name=None):\n \"\"\"Performs the average pooling on the input.\n\n Each entry in `output` is the mean of the corresponding size `ksize`\n window in `value`.\n\n Args:\n value: A 4-D `Tensor` of shape `[batch, height, width, channels]` and type\n `float32`, `float64`, `qint8`, `quint8`, or `qint32`.\n ksize: A list of ints that has length >= 4.\n The size of the window for each dimension of the input tensor.\n strides: A list of ints that has length >= 4.\n The stride of the sliding window for each dimension of the\n input tensor.\n padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.\n data_format: A string. 'NHWC' and 'NCHW\" are supported.\n name: Optional name for the operation.\n\n Returns:\n A `Tensor` with the same type as `value`. The average pooled output tensor.\n \"\"\"\n with ops.op_scope([value], name, \"AvgPool\") as name:\n value = ops.convert_to_tensor(value, name=\"input\")\n return gen_nn_ops._avg_pool(value, ksize=ksize, strides=strides,\n padding=padding,\n data_format=data_format,\n name=name)\n\n\ndef max_pool(value, ksize, strides, padding, data_format=\"NHWC\", name=None):\n \"\"\"Performs the max pooling on the input.\n\n Args:\n value: A 4-D `Tensor` with shape `[batch, height, width, channels]` and\n type `tf.float32`.\n ksize: A list of ints that has length >= 4. The size of the window for\n each dimension of the input tensor.\n strides: A list of ints that has length >= 4. The stride of the sliding\n window for each dimension of the input tensor.\n padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.\n data_format: A string. 'NHWC' and 'NCHW\" are supported.\n name: Optional name for the operation.\n\n Returns:\n A `Tensor` with type `tf.float32`. The max pooled output tensor.\n \"\"\"\n with ops.op_scope([value], name, \"MaxPool\") as name:\n value = ops.convert_to_tensor(value, name=\"input\")\n return gen_nn_ops._max_pool(value, ksize=ksize, strides=strides,\n padding=padding,\n data_format=data_format,\n name=name)\n\n\nops.RegisterShape(\"Relu\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"Relu6\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"Elu\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"Softplus\")(common_shapes.unchanged_shape)\nops.RegisterShape(\"Softsign\")(common_shapes.unchanged_shape)\n\n\[email protected](\"ReluGrad\")\[email protected](\"Relu6Grad\")\[email protected](\"EluGrad\")\[email protected](\"SoftplusGrad\")\[email protected](\"SoftsignGrad\")\ndef _BinaryElementwiseShape(op):\n \"\"\"Returns same shape as both inputs to op.\n\n Args:\n op: Input operation.\n\n Returns:\n Shape of both inputs to `op`.\n \"\"\"\n return [op.inputs[0].get_shape().merge_with(op.inputs[1].get_shape())]\n\n\nops.RegisterShape(\"L2Loss\")(common_shapes.scalar_shape)\n\n\nops.RegisterShape(\"LRN\")(common_shapes.unchanged_shape_with_rank(4))\n\n\[email protected](\"LRNGrad\")\ndef _LRNGradShape(op):\n \"\"\"Shape function for LRNGrad op.\"\"\"\n in_grads_shape = op.inputs[0].get_shape().with_rank(4)\n in_image_shape = op.inputs[1].get_shape().with_rank(4)\n out_image_shape = op.inputs[2].get_shape().with_rank(4)\n return [in_grads_shape.merge_with(in_image_shape).merge_with(out_image_shape)]\n\n\nops.RegisterShape(\"Softmax\")(\n common_shapes.unchanged_shape_with_rank(2))\n\n\[email protected](\"InTopK\")\ndef _InTopKShape(op):\n \"\"\"Shape function for InTopK op.\"\"\"\n predictions_shape = op.inputs[0].get_shape().with_rank(2)\n targets_shape = op.inputs[1].get_shape().with_rank(1)\n batch_size = predictions_shape[0].merge_with(targets_shape[0])\n return [tensor_shape.vector(batch_size.value)]\n\n\[email protected](\"TopK\")\[email protected](\"TopKV2\")\ndef _TopKShape(op):\n \"\"\"Shape function for TopK and TopKV2 ops.\"\"\"\n input_shape = op.inputs[0].get_shape().with_rank_at_least(1)\n if len(op.inputs) >= 2:\n k = tensor_util.constant_value(op.inputs[1])\n else:\n k = op.get_attr(\"k\")\n last = input_shape[-1].value\n if last is not None and k is not None and last < k:\n raise ValueError(\"input.shape %s must have last dimension >= k = %d\" %\n (input_shape, k))\n output_shape = input_shape[:-1].concatenate([k])\n return [output_shape, output_shape]\n\n\[email protected](\"BatchNormWithGlobalNormalization\")\ndef _BatchNormShape(op):\n \"\"\"Shape function for BatchNormWithGlobalNormalization op.\"\"\"\n input_shape = op.inputs[0].get_shape().with_rank(4)\n mean_shape = op.inputs[1].get_shape().with_rank(1)\n var_shape = op.inputs[2].get_shape().with_rank(1)\n beta_shape = op.inputs[3].get_shape().with_rank(1)\n gamma_shape = op.inputs[4].get_shape().with_rank(1)\n mean_shape[0].merge_with(input_shape[3])\n var_shape[0].merge_with(input_shape[3])\n beta_shape[0].merge_with(input_shape[3])\n gamma_shape[0].merge_with(input_shape[3])\n return [input_shape]\n\n\[email protected](\"BatchNormWithGlobalNormalizationGrad\")\ndef _BatchNormGradShape(op):\n \"\"\"Shape function for BatchNormWithGlobalNormalizationGrad op.\"\"\"\n input_shape = op.inputs[0].get_shape().with_rank(4)\n mean_shape = op.inputs[1].get_shape().with_rank(1)\n var_shape = op.inputs[2].get_shape().with_rank(1)\n beta_shape = op.inputs[3].get_shape().with_rank(1)\n out_backprop_shape = op.inputs[4].get_shape().with_rank(4)\n input_shape = input_shape.merge_with(out_backprop_shape)\n vector_dim = input_shape[3]\n vector_dim = vector_dim.merge_with(mean_shape[0])\n vector_dim = vector_dim.merge_with(var_shape[0])\n vector_dim = vector_dim.merge_with(beta_shape[0])\n return [input_shape] + ([tensor_shape.vector(vector_dim)] * 4)\n\n\nops.RegisterShape(\"Conv2D\")(common_shapes.conv2d_shape)\nops.RegisterShape(\"DepthwiseConv2dNative\")(\n common_shapes.depthwise_conv2d_native_shape)\nops.RegisterShape(\"AvgPool\")(common_shapes.avg_pool_shape)\nops.RegisterShape(\"MaxPool\")(common_shapes.max_pool_shape)\n\n\[email protected](\"MaxPoolWithArgmax\")\ndef _MaxPoolWithArgMaxShape(op):\n \"\"\"Shape function for MaxPoolWithArgmax op.\"\"\"\n return common_shapes.max_pool_shape(op) * 2\n\n\[email protected](\"AvgPoolGrad\")\ndef _AvgPoolGradShape(op):\n \"\"\"Shape function for the AvgPoolGrad op.\"\"\"\n orig_input_shape = tensor_util.constant_value(op.inputs[0])\n if orig_input_shape is not None:\n return [tensor_shape.TensorShape(orig_input_shape.tolist())]\n else:\n # NOTE(mrry): We could in principle work out the shape from the\n # gradients and the attrs, but if we do not know orig_input_shape\n # statically, then we are unlikely to know the shape of the\n # gradients either.\n return [tensor_shape.unknown_shape(ndims=4)]\n\n\[email protected](\"Conv2DBackpropFilter\")\ndef _Conv2DBackpropFilterShape(op):\n \"\"\"Shape function for the Conv2DBackpropFilter op.\"\"\"\n filter_shape = tensor_util.constant_value(op.inputs[1])\n if filter_shape is not None:\n return [tensor_shape.TensorShape(filter_shape.tolist())]\n else:\n # NOTE(mrry): We could in principle work out the shape from the\n # gradients and the attrs, but if we do not know filter_shape\n # statically, then we are unlikely to know the shape of the\n # gradients either.\n return [tensor_shape.unknown_shape(ndims=4)]\n\n\[email protected](\"Conv2DBackpropInput\")\ndef _Conv2DBackpropInputShape(op):\n \"\"\"Shape function for the Conv2DBackpropInput op.\"\"\"\n input_shape = tensor_util.constant_value(op.inputs[0])\n if input_shape is not None:\n return [tensor_shape.TensorShape(input_shape.tolist())]\n else:\n # NOTE(mrry): We could in principle work out the shape from the\n # gradients and the attrs, but if we do not know input_shape\n # statically, then we are unlikely to know the shape of the\n # gradients either.\n return [tensor_shape.unknown_shape(ndims=4)]\n\n\[email protected](\"DepthwiseConv2dNativeBackpropFilter\")\ndef _DepthwiseConv2dNativeBackpropFilterShape(op):\n \"\"\"Shape function for the DepthwiseConv2dNativeBackpropFilter op.\"\"\"\n filter_shape = tensor_util.constant_value(op.inputs[1])\n if filter_shape is not None:\n return [tensor_shape.TensorShape(filter_shape.tolist())]\n else:\n return [tensor_shape.unknown_shape(ndims=4)]\n\n\[email protected](\"DepthwiseConv2dNativeBackpropInput\")\ndef _DepthwiseConv2dNativeBackpropInputShape(op):\n \"\"\"Shape function for the DepthwiseConv2dNativeBackpropInput op.\"\"\"\n input_shape = tensor_util.constant_value(op.inputs[0])\n if input_shape is not None:\n return [tensor_shape.TensorShape(input_shape.tolist())]\n else:\n return [tensor_shape.unknown_shape(ndims=4)]\n\n\[email protected](\"MaxPoolGrad\")\[email protected](\"MaxPoolGradWithArgmax\")\ndef _MaxPoolGradShape(op):\n \"\"\"Shape function for the MaxPoolGrad op.\"\"\"\n orig_input_shape = op.inputs[0].get_shape().with_rank(4)\n return [orig_input_shape]\n\n\[email protected](\"Conv2D\", \"flops\")\ndef _calc_conv_flops(graph, node):\n \"\"\"Calculates the compute resources needed for Conv2D.\"\"\"\n input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])\n input_shape.assert_is_fully_defined()\n filter_shape = graph_util.tensor_shape_from_node_def_name(graph,\n node.input[1])\n filter_shape.assert_is_fully_defined()\n output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)\n output_shape.assert_is_fully_defined()\n filter_height = int(filter_shape[0])\n filter_width = int(filter_shape[1])\n filter_in_depth = int(filter_shape[2])\n output_count = np.prod(output_shape.as_list())\n return ops.OpStats(\"flops\", (output_count * filter_in_depth * filter_height *\n filter_width * 2))\n\n\[email protected](\"Conv2D\", \"weight_parameters\")\ndef _calc_conv_weight_params(graph, node):\n \"\"\"Calculates the on-disk size of the weights for Conv2D.\"\"\"\n input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])\n input_shape.assert_is_fully_defined()\n filter_shape = graph_util.tensor_shape_from_node_def_name(graph,\n node.input[1])\n filter_shape.assert_is_fully_defined()\n output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)\n output_shape.assert_is_fully_defined()\n filter_height = int(filter_shape[0])\n filter_width = int(filter_shape[1])\n filter_in_depth = int(filter_shape[2])\n filter_out_depth = int(filter_shape[3])\n return ops.OpStats(\"weight_parameters\", (filter_height * filter_width *\n filter_in_depth * filter_out_depth))\n\n\[email protected](\"BiasAdd\", \"flops\")\ndef _calc_bias_add_flops(graph, node):\n \"\"\"Calculates the computing needed for BiasAdd.\"\"\"\n input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])\n input_shape.assert_is_fully_defined()\n input_count = np.prod(input_shape.as_list())\n return ops.OpStats(\"flops\", input_count)\n\n\[email protected](\"BiasAdd\", \"weight_parameters\")\ndef _calc_bias_add_weight_params(graph, node):\n \"\"\"Calculates the on-disk weight parameters for BiasAdd.\"\"\"\n bias_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[1])\n bias_shape.assert_is_fully_defined()\n bias_count = np.prod(bias_shape.as_list())\n return ops.OpStats(\"weight_parameters\", bias_count)\n\n\ndef xw_plus_b(x, weights, biases, name=None): # pylint: disable=invalid-name\n \"\"\"Computes matmul(x, weights) + biases.\n\n Args:\n x: a 2D tensor. Dimensions typically: batch, in_units\n weights: a 2D tensor. Dimensions typically: in_units, out_units\n biases: a 1D tensor. Dimensions: out_units\n name: A name for the operation (optional). If not specified\n \"xw_plus_b\" is used.\n\n Returns:\n A 2-D Tensor computing matmul(x, weights) + biases.\n Dimensions typically: batch, out_units.\n \"\"\"\n with ops.op_scope([x, weights, biases], name, \"xw_plus_b\") as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n weights = ops.convert_to_tensor(weights, name=\"weights\")\n biases = ops.convert_to_tensor(biases, name=\"biases\")\n mm = math_ops.matmul(x, weights)\n return bias_add(mm, biases, name=name)\n\n\ndef xw_plus_b_v1(x, weights, biases, name=None): # pylint: disable=invalid-name\n \"\"\"Computes matmul(x, weights) + biases.\n\n This is a deprecated version of that will soon be removed.\n\n Args:\n x: a 2D tensor. Dimensions typically: batch, in_units\n weights: a 2D tensor. Dimensions typically: in_units, out_units\n biases: a 1D tensor. Dimensions: out_units\n name: A name for the operation (optional). If not specified\n \"xw_plus_b_v1\" is used.\n\n Returns:\n A 2-D Tensor computing matmul(x, weights) + biases.\n Dimensions typically: batch, out_units.\n \"\"\"\n with ops.op_scope([x, weights, biases], name, \"xw_plus_b_v1\") as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n weights = ops.convert_to_tensor(weights, name=\"weights\")\n biases = ops.convert_to_tensor(biases, name=\"biases\")\n mm = math_ops.matmul(x, weights)\n return bias_add_v1(mm, biases, name=name)\n\n\n# pylint: disable=invalid-name\ndef dropout(x, keep_prob, noise_shape=None, seed=None, name=None):\n \"\"\"Computes dropout.\n\n With probability `keep_prob`, outputs the input element scaled up by\n `1 / keep_prob`, otherwise outputs `0`. The scaling is so that the expected\n sum is unchanged.\n\n By default, each element is kept or dropped independently. If `noise_shape`\n is specified, it must be\n [broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)\n to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]`\n will make independent decisions. For example, if `shape(x) = [k, l, m, n]`\n and `noise_shape = [k, 1, 1, n]`, each batch and channel component will be\n kept independently and each row and column will be kept or not kept together.\n\n Args:\n x: A tensor.\n keep_prob: A scalar `Tensor` with the same type as x. The probability\n that each element is kept.\n noise_shape: A 1-D `Tensor` of type `int32`, representing the\n shape for randomly generated keep/drop flags.\n seed: A Python integer. Used to create random seeds. See\n [`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)\n for behavior.\n name: A name for this operation (optional).\n\n Returns:\n A Tensor of the same shape of `x`.\n\n Raises:\n ValueError: If `keep_prob` is not in `(0, 1]`.\n \"\"\"\n with ops.op_scope([x], name, \"dropout\") as name:\n x = ops.convert_to_tensor(x, name=\"x\")\n if isinstance(keep_prob, float) and not 0 < keep_prob <= 1:\n raise ValueError(\"keep_prob must be a scalar tensor or a float in the \"\n \"range (0, 1], got %g\" % keep_prob)\n keep_prob = ops.convert_to_tensor(\n keep_prob, dtype=x.dtype, name=\"keep_prob\")\n keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar())\n\n noise_shape = noise_shape or array_ops.shape(x)\n # uniform [keep_prob, 1.0 + keep_prob)\n random_tensor = keep_prob\n random_tensor += random_ops.random_uniform(\n noise_shape, seed=seed, dtype=x.dtype)\n # 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)\n binary_tensor = math_ops.floor(random_tensor)\n ret = x * math_ops.inv(keep_prob) * binary_tensor\n ret.set_shape(x.get_shape())\n return ret\n\n\ndef top_k(input, k=1, sorted=True, name=None):\n \"\"\"Finds values and indices of the `k` largest entries for the last dimension.\n\n If the input is a vector (rank-1), finds the `k` largest entries in the vector\n and outputs their values and indices as vectors. Thus `values[j]` is the\n `j`-th largest entry in `input`, and its index is `indices[j]`.\n\n For matrices (resp. higher rank input), computes the top `k` entries in each\n row (resp. vector along the last dimension). Thus,\n\n values.shape = indices.shape = input.shape[:-1] + [k]\n\n If two elements are equal, the lower-index element appears first.\n\n Args:\n input: 1-D or higher `Tensor` with last dimension at least `k`.\n k: 0-D `int32` `Tensor`. Number of top elements to look for along the last\n dimension (along each row for matrices).\n sorted: If true the resulting `k` elements will be sorted by the values in\n descending order.\n name: Optional name for the operation.\n\n Returns:\n values: The `k` largest elements along each last dimensional slice.\n indices: The indices of `values` within the last dimension of `input`.\n \"\"\"\n return gen_nn_ops._top_kv2(input, k=k, sorted=sorted, name=name)\n\n\n# pylint: enable=invalid-name\n"
] |
[
[
"tensorflow.python.ops.math_ops.matmul",
"tensorflow.python.ops.gen_nn_ops._bias_add",
"tensorflow.python.framework.tensor_shape.vector",
"tensorflow.python.ops.gen_nn_ops._bias_add_v1",
"tensorflow.python.ops.math_ops.inv",
"tensorflow.python.ops.gen_nn_ops._top_kv2",
"tensorflow.python.client.graph_util.tensor_shape_from_node_def_name",
"tensorflow.python.ops.gen_nn_ops._relu6",
"tensorflow.python.framework.tensor_util.constant_value",
"tensorflow.python.framework.ops.RegisterShape",
"tensorflow.python.framework.ops.OpStats",
"tensorflow.python.ops.common_shapes.max_pool_shape",
"tensorflow.python.ops.math_ops.floor",
"tensorflow.python.ops.gen_nn_ops._max_pool",
"tensorflow.python.ops.gen_nn_ops.conv2d_backprop_input",
"tensorflow.python.ops.gen_nn_ops._sparse_softmax_cross_entropy_with_logits",
"tensorflow.python.ops.random_ops.random_uniform",
"tensorflow.python.ops.gen_nn_ops._avg_pool",
"tensorflow.python.framework.tensor_shape.scalar",
"tensorflow.python.framework.ops.op_scope",
"tensorflow.python.ops.gen_nn_ops._softmax_cross_entropy_with_logits",
"tensorflow.python.framework.tensor_shape.unknown_shape",
"tensorflow.python.framework.ops.RegisterStatistics",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.common_shapes.unchanged_shape_with_rank"
]
] |
JayKimBravekjh/deepchem
|
[
"842dd48ee065bee1034754540569f946cbb579eb"
] |
[
"deepchem/molnet/load_function/pdbbind_datasets.py"
] |
[
"\"\"\"\nPDBBind dataset loader.\n\"\"\"\nimport logging\nimport multiprocessing\nimport os\nimport re\nimport time\n\nimport deepchem\nimport numpy as np\nimport pandas as pd\nimport tarfile\nfrom deepchem.feat import RdkitGridFeaturizer\nfrom deepchem.feat import ComplexNeighborListFragmentAtomicCoordinates\nfrom deepchem.feat.graph_features import AtomicConvFeaturizer\n\nlogger = logging.getLogger(__name__)\nDEFAULT_DATA_DIR = deepchem.utils.data_utils.get_data_dir()\n\n\ndef featurize_pdbbind(data_dir=None, feat=\"grid\", subset=\"core\"):\n \"\"\"Featurizes pdbbind according to provided featurization\"\"\"\n tasks = [\"-logKd/Ki\"]\n data_dir = deepchem.utils.data_utils.get_data_dir()\n pdbbind_dir = os.path.join(data_dir, \"pdbbind\")\n dataset_dir = os.path.join(pdbbind_dir, \"%s_%s\" % (subset, feat))\n\n if not os.path.exists(dataset_dir):\n deepchem.utils.data_utils.download_url(\n \"https://deepchemdata.s3-us-west-1.amazonaws.com/featurized_datasets/core_grid.tar.gz\"\n )\n deepchem.utils.data_utils.download_url(\n \"https://deepchemdata.s3-us-west-1.amazonaws.com/featurized_datasets/full_grid.tar.gz\"\n )\n deepchem.utils.data_utils.download_url(\n \"https://deepchemdata.s3-us-west-1.amazonaws.com/featurized_datasets/refined_grid.tar.gz\"\n )\n if not os.path.exists(pdbbind_dir):\n os.system('mkdir ' + pdbbind_dir)\n deepchem.utils.data_utils.untargz_file(\n os.path.join(data_dir, 'core_grid.tar.gz'), pdbbind_dir)\n deepchem.utils.data_utils.untargz_file(\n os.path.join(data_dir, 'full_grid.tar.gz'), pdbbind_dir)\n deepchem.utils.data_utils.untargz_file(\n os.path.join(data_dir, 'refined_grid.tar.gz'), pdbbind_dir)\n\n return deepchem.data.DiskDataset(dataset_dir), tasks\n\n\ndef load_pdbbind_grid(split=\"random\",\n featurizer=\"grid\",\n subset=\"core\",\n reload=True):\n \"\"\"Load PDBBind datasets. Does not do train/test split\"\"\"\n if featurizer == 'grid':\n dataset, tasks = featurize_pdbbind(feat=featurizer, subset=subset)\n\n splitters = {\n 'index': deepchem.splits.IndexSplitter(),\n 'random': deepchem.splits.RandomSplitter(),\n 'time': deepchem.splits.TimeSplitterPDBbind(dataset.ids)\n }\n splitter = splitters[split]\n train, valid, test = splitter.train_valid_test_split(dataset)\n\n transformers = []\n for transformer in transformers:\n train = transformer.transform(train)\n for transformer in transformers:\n valid = transformer.transform(valid)\n for transformer in transformers:\n test = transformer.transform(test)\n\n all_dataset = (train, valid, test)\n return tasks, all_dataset, transformers\n\n else:\n data_dir = deepchem.utils.data_utils.get_data_dir()\n if reload:\n save_dir = os.path.join(\n data_dir, \"pdbbind_\" + subset + \"/\" + featurizer + \"/\" + str(split))\n\n dataset_file = os.path.join(data_dir, subset + \"_smiles_labels.csv\")\n\n if not os.path.exists(dataset_file):\n deepchem.utils.data_utils.download_url(\n \"https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/\" + subset +\n \"_smiles_labels.csv\")\n\n tasks = [\"-logKd/Ki\"]\n if reload:\n loaded, all_dataset, transformers = deepchem.utils.data_utils.load_dataset_from_disk(\n save_dir)\n if loaded:\n return tasks, all_dataset, transformers\n\n if featurizer == 'ECFP':\n featurizer = deepchem.feat.CircularFingerprint(size=1024)\n elif featurizer == 'GraphConv':\n featurizer = deepchem.feat.ConvMolFeaturizer()\n elif featurizer == 'Weave':\n featurizer = deepchem.feat.WeaveFeaturizer()\n elif featurizer == 'Raw':\n featurizer = deepchem.feat.RawFeaturizer()\n\n loader = deepchem.data.CSVLoader(\n tasks=tasks, smiles_field=\"smiles\", featurizer=featurizer)\n dataset = loader.featurize(dataset_file, shard_size=8192)\n df = pd.read_csv(dataset_file)\n\n if split == None:\n transformers = [\n deepchem.trans.NormalizationTransformer(\n transform_y=True, dataset=dataset)\n ]\n\n logger.info(\"Split is None, about to transform data.\")\n for transformer in transformers:\n dataset = transformer.transform(dataset)\n return tasks, (dataset, None, None), transformers\n\n splitters = {\n 'index': deepchem.splits.IndexSplitter(),\n 'random': deepchem.splits.RandomSplitter(),\n 'scaffold': deepchem.splits.ScaffoldSplitter(),\n }\n splitter = splitters[split]\n logger.info(\"About to split dataset with {} splitter.\".format(split))\n train, valid, test = splitter.train_valid_test_split(dataset)\n\n transformers = [\n deepchem.trans.NormalizationTransformer(\n transform_y=True, dataset=train)\n ]\n\n logger.info(\"About to transform dataset.\")\n for transformer in transformers:\n train = transformer.transform(train)\n valid = transformer.transform(valid)\n test = transformer.transform(test)\n\n if reload:\n deepchem.utils.data_utils.save_dataset_to_disk(save_dir, train, valid,\n test, transformers)\n\n return tasks, (train, valid, test), transformers\n\n\ndef load_pdbbind(reload=True,\n data_dir=None,\n subset=\"core\",\n load_binding_pocket=False,\n featurizer=\"grid\",\n split=\"random\",\n split_seed=None,\n save_dir=None,\n save_timestamp=False):\n \"\"\"Load raw PDBBind dataset by featurization and split.\n\n Parameters\n ----------\n reload: Bool, optional\n Reload saved featurized and splitted dataset or not.\n data_dir: Str, optional\n Specifies the directory storing the raw dataset.\n load_binding_pocket: Bool, optional\n Load binding pocket or full protein.\n subset: Str\n Specifies which subset of PDBBind, only \"core\" or \"refined\" for now.\n featurizer: Str\n Either \"grid\" or \"atomic\" for grid and atomic featurizations.\n split: Str\n Either \"random\" or \"index\".\n split_seed: Int, optional\n Specifies the random seed for splitter.\n save_dir: Str, optional\n Specifies the directory to store the featurized and splitted dataset when\n reload is False. If reload is True, it will load saved dataset inside save_dir.\n save_timestamp: Bool, optional\n Save featurized and splitted dataset with timestamp or not. Set it as True\n when running similar or same jobs simultaneously on multiple compute nodes.\n \"\"\"\n\n pdbbind_tasks = [\"-logKd/Ki\"]\n\n deepchem_dir = deepchem.utils.data_utils.get_data_dir()\n\n if data_dir == None:\n data_dir = DEFAULT_DATA_DIR\n data_folder = os.path.join(data_dir, \"pdbbind\", \"v2015\")\n\n if save_dir == None:\n save_dir = os.path.join(DEFAULT_DATA_DIR, \"from-pdbbind\")\n if load_binding_pocket:\n save_folder = os.path.join(\n save_dir, \"protein_pocket-%s-%s-%s\" % (subset, featurizer, split))\n else:\n save_folder = os.path.join(\n save_dir, \"full_protein-%s-%s-%s\" % (subset, featurizer, split))\n\n if save_timestamp:\n save_folder = \"%s-%s-%s\" % (save_folder,\n time.strftime(\"%Y%m%d\", time.localtime()),\n re.search(\"\\.(.*)\", str(time.time())).group(1))\n\n if reload:\n if not os.path.exists(save_folder):\n print(\n \"Dataset does not exist at {}. Reconstructing...\".format(save_folder))\n else:\n print(\n \"\\nLoading featurized and splitted dataset from:\\n%s\\n\" % save_folder)\n loaded, all_dataset, transformers = deepchem.utils.data_utils.load_dataset_from_disk(\n save_folder)\n if loaded:\n return pdbbind_tasks, all_dataset, transformers\n\n dataset_file = os.path.join(data_dir, \"pdbbind_v2015.tar.gz\")\n if not os.path.exists(dataset_file):\n logger.warning(\"About to download PDBBind full dataset. Large file, 2GB\")\n deepchem.utils.data_utils.download_url(\n \"https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/pdbbind_v2015.tar.gz\",\n dest_dir=data_dir)\n if os.path.exists(data_folder):\n logger.info(\"PDBBind full dataset already exists.\")\n else:\n print(\"Untarring full dataset...\")\n deepchem.utils.data_utils.untargz_file(\n dataset_file, dest_dir=os.path.join(data_dir, \"pdbbind\"))\n\n print(\"\\nRaw dataset:\\n%s\" % data_folder)\n print(\"\\nFeaturized and splitted dataset:\\n%s\" % save_folder)\n\n if subset == \"core\":\n index_labels_file = os.path.join(data_folder, \"INDEX_core_data.2013\")\n elif subset == \"refined\":\n index_labels_file = os.path.join(data_folder, \"INDEX_refined_data.2015\")\n else:\n raise ValueError(\"Other subsets not supported\")\n\n # Extract locations of data\n with open(index_labels_file, \"r\") as g:\n pdbs = [line[:4] for line in g.readlines() if line[0] != \"#\"]\n if load_binding_pocket:\n protein_files = [\n os.path.join(data_folder, pdb, \"%s_pocket.pdb\" % pdb) for pdb in pdbs\n ]\n else:\n protein_files = [\n os.path.join(data_folder, pdb, \"%s_protein.pdb\" % pdb) for pdb in pdbs\n ]\n ligand_files = [\n os.path.join(data_folder, pdb, \"%s_ligand.sdf\" % pdb) for pdb in pdbs\n ]\n\n # Extract labels\n with open(index_labels_file, \"r\") as g:\n labels = np.array([\n # Lines have format\n # PDB code, resolution, release year, -logKd/Ki, Kd/Ki, reference, ligand name\n # The base-10 logarithm, -log kd/pk\n float(line.split()[3]) for line in g.readlines() if line[0] != \"#\"\n ])\n\n # Featurize Data\n if featurizer == \"grid\":\n featurizer = RdkitGridFeaturizer(\n voxel_width=2.0,\n feature_types=[\n 'ecfp', 'splif', 'hbond', 'salt_bridge', 'pi_stack', 'cation_pi',\n 'charge'\n ],\n flatten=True)\n elif featurizer == \"atomic\" or featurizer == \"atomic_conv\":\n # Pulled from PDB files. For larger datasets with more PDBs, would use\n # max num atoms instead of exact.\n frag1_num_atoms = 70 # for ligand atoms\n if load_binding_pocket:\n frag2_num_atoms = 1000\n complex_num_atoms = 1070\n else:\n frag2_num_atoms = 24000 # for protein atoms\n complex_num_atoms = 24070 # in total\n max_num_neighbors = 4\n # Cutoff in angstroms\n neighbor_cutoff = 4\n if featurizer == \"atomic\":\n featurizer = ComplexNeighborListFragmentAtomicCoordinates(\n frag1_num_atoms=frag1_num_atoms,\n frag2_num_atoms=frag2_num_atoms,\n complex_num_atoms=complex_num_atoms,\n max_num_neighbors=max_num_neighbors,\n neighbor_cutoff=neighbor_cutoff)\n if featurizer == \"atomic_conv\":\n featurizer = AtomicConvFeaturizer(\n labels=labels,\n frag1_num_atoms=frag1_num_atoms,\n frag2_num_atoms=frag2_num_atoms,\n complex_num_atoms=complex_num_atoms,\n neighbor_cutoff=neighbor_cutoff,\n max_num_neighbors=max_num_neighbors,\n batch_size=64)\n else:\n raise ValueError(\"Featurizer not supported\")\n\n print(\"\\nFeaturizing Complexes for \\\"%s\\\" ...\\n\" % data_folder)\n feat_t1 = time.time()\n features, failures = featurizer.featurize(ligand_files, protein_files)\n feat_t2 = time.time()\n print(\"\\nFeaturization finished, took %0.3f s.\" % (feat_t2 - feat_t1))\n\n # Delete labels and ids for failing elements\n labels = np.delete(labels, failures)\n labels = labels.reshape((len(labels), 1))\n ids = np.delete(pdbs, failures)\n\n print(\"\\nConstruct dataset excluding failing featurization elements...\")\n dataset = deepchem.data.DiskDataset.from_numpy(features, y=labels, ids=ids)\n\n # No transformations of data\n transformers = []\n\n # Split dataset\n print(\"\\nSplit dataset...\\n\")\n if split == None:\n return pdbbind_tasks, (dataset, None, None), transformers\n\n # TODO(rbharath): This should be modified to contain a cluster split so\n # structures of the same protein aren't in both train/test\n splitters = {\n 'index': deepchem.splits.IndexSplitter(),\n 'random': deepchem.splits.RandomSplitter(),\n }\n splitter = splitters[split]\n train, valid, test = splitter.train_valid_test_split(dataset, seed=split_seed)\n\n all_dataset = (train, valid, test)\n print(\"\\nSaving dataset to \\\"%s\\\" ...\" % save_folder)\n deepchem.utils.data_utils.save_dataset_to_disk(save_folder, train, valid,\n test, transformers)\n return pdbbind_tasks, all_dataset, transformers\n\n\ndef load_pdbbind_from_dir(data_folder,\n index_files,\n featurizer=\"grid\",\n split=\"random\",\n ex_ids=[],\n save_dir=None):\n \"\"\"Load and featurize raw PDBBind dataset from a local directory with the option to avoid certain IDs.\n\n Parameters\n ----------\n data_dir: String,\n Specifies the data directory to store the featurized dataset.\n index_files: List\n List of data and labels index file paths relative to the path in data_dir\n split: Str\n Either \"random\" or \"index\"\n feat: Str\n Either \"grid\" or \"atomic\" for grid and atomic featurizations.\n subset: Str\n Only \"core\" or \"refined\" for now.\n ex_ids: List\n List of PDB IDs to avoid loading if present\n save_dir: String\n Path to store featurized datasets\n \"\"\"\n pdbbind_tasks = [\"-logKd/Ki\"]\n\n index_file = os.path.join(data_folder, index_files[0])\n labels_file = os.path.join(data_folder, index_files[1])\n\n # Extract locations of data\n pdbs = []\n\n with open(index_file, \"r\") as g:\n lines = g.readlines()\n for line in lines:\n line = line.split(\" \")\n pdb = line[0]\n if len(pdb) == 4:\n pdbs.append(pdb)\n protein_files = [\n os.path.join(data_folder, pdb, \"%s_protein.pdb\" % pdb)\n for pdb in pdbs\n if pdb not in ex_ids\n ]\n ligand_files = [\n os.path.join(data_folder, pdb, \"%s_ligand.sdf\" % pdb)\n for pdb in pdbs\n if pdb not in ex_ids\n ]\n # Extract labels\n labels_tmp = {}\n with open(labels_file, \"r\") as f:\n lines = f.readlines()\n for line in lines:\n # Skip comment lines\n if line[0] == \"#\":\n continue\n # Lines have format\n # PDB code, resolution, release year, -logKd/Ki, Kd/Ki, reference, ligand name\n line = line.split()\n # The base-10 logarithm, -log kd/pk\n log_label = line[3]\n labels_tmp[line[0]] = log_label\n\n labels = np.array([labels_tmp[pdb] for pdb in pdbs])\n print(labels)\n # Featurize Data\n if featurizer == \"grid\":\n featurizer = RdkitGridFeaturizer(\n voxel_width=2.0,\n feature_types=[\n 'ecfp', 'splif', 'hbond', 'salt_bridge', 'pi_stack', 'cation_pi',\n 'charge'\n ],\n flatten=True)\n elif featurizer == \"atomic\":\n # Pulled from PDB files. For larger datasets with more PDBs, would use\n # max num atoms instead of exact.\n frag1_num_atoms = 70 # for ligand atoms\n frag2_num_atoms = 24000 # for protein atoms\n complex_num_atoms = 24070 # in total\n max_num_neighbors = 4\n # Cutoff in angstroms\n neighbor_cutoff = 4\n featurizer = ComplexNeighborListFragmentAtomicCoordinates(\n frag1_num_atoms, frag2_num_atoms, complex_num_atoms, max_num_neighbors,\n neighbor_cutoff)\n\n else:\n raise ValueError(\"Featurizer not supported\")\n print(\"Featurizing Complexes\")\n features, failures = featurizer.featurize(ligand_files, protein_files)\n # Delete labels for failing elements\n labels = np.delete(labels, failures)\n dataset = deepchem.data.DiskDataset.from_numpy(features, labels)\n # No transformations of data\n transformers = []\n if split == None:\n return pdbbind_tasks, (dataset, None, None), transformers\n\n # TODO(rbharath): This should be modified to contain a cluster split so\n # structures of the same protein aren't in both train/test\n splitters = {\n 'index': deepchem.splits.IndexSplitter(),\n 'random': deepchem.splits.RandomSplitter(),\n }\n splitter = splitters[split]\n train, valid, test = splitter.train_valid_test_split(dataset)\n all_dataset = (train, valid, test)\n if save_dir:\n deepchem.utils.data_utils.save_dataset_to_disk(save_dir, train, valid, test,\n transformers)\n return pdbbind_tasks, all_dataset, transformers\n"
] |
[
[
"numpy.array",
"numpy.delete",
"pandas.read_csv"
]
] |
webclinic017/pyStock-1
|
[
"4ed6bf20130dcfc37d542bd5b3aec505a12f3106"
] |
[
"indicators/bollinger.py"
] |
[
"# Add import from parent directory possible\nimport matplotlib.pyplot as plt\nfrom helpers.DataOperations import FindIntersections, CreateSubsetByValues\nfrom core.indicator import indicator\n# Creates object\n\n\ndef CreateBollinger(prices, n=20, k=2):\n return Bollinger(prices, n, k)\n\n# Bollinger object which creates Bollinger data\n\n\nclass Bollinger(indicator):\n\n def __init__(self, close, n=20, k=2):\n indicator.__init__(self, 'Bollinger', 'momentum', close.index)\n self.n = n\n self.k = k\n self.consolidationLvl = 15 # percent\n self.variabilityLvl = 50 # percent\n self.mavg, self.upperBand, self.lowerBand = self.InitBollinger(\n close, self.n, self.k)\n self.std = self.upperBand - self.lowerBand\n self.absStd = (self.std * 100) / self.std.max()\n\n # Signals\n fromBottom, fromTop = FindIntersections(self.upperBand, close)\n self.sell = fromBottom\n fromBottom, fromTop = FindIntersections(self.lowerBand, close)\n self.buy = fromTop\n self.consolidation = CreateSubsetByValues(\n self.absStd, 0, self.consolidationLvl)\n self.variability = CreateSubsetByValues(\n self.absStd, self.variabilityLvl, 100)\n\n # Set Bollinger indicator\n @staticmethod\n def InitBollinger(prices, n=20, k=2):\n mavg = prices.rolling(window=n, min_periods=1).mean()\n std = prices.rolling(window=n, min_periods=1).std()\n upperBand = mavg + (std * 2)\n lowerBand = mavg - (std * 2)\n return mavg, upperBand, lowerBand\n\n # Export indicator signals to report\n def ExportSignals(self, reportSignals):\n reportSignals.AddDataframeSignals(self.buy, 'Bollinger', 'buy')\n reportSignals.AddDataframeSignals(self.sell, 'Bollinger', 'sell')\n\n # Plot method\n def Plot(self):\n # Get index values for the X axis for facebook DataFrame\n x_axis = self.toNumIndex(self.upperBand)\n\n # Plot shaded 21 Day Bollinger Band for Facebook\n plt.fill_between(x_axis, self.upperBand,\n self.lowerBand, color='#BBBBBB')\n plt.plot(self.toNumIndex(self.upperBand), self.upperBand, '--',\n linewidth=1.0, color='#940006', label='Sell band')\n plt.plot(self.toNumIndex(self.lowerBand), self.lowerBand, '--',\n linewidth=1.0, color='#169400', label='Buy band')\n plt.plot(self.toNumIndex(self.mavg), self.mavg, '--', linewidth=1.0,\n color='#0000FF', label=('MA %s days' % self.n))\n\n # Signals plottting\n if (self.buy is not None and self.buy.size):\n plt.plot(self.toNumIndex(self.buy), self.buy,\n 'o', color='#000000', ms=8)\n plt.plot(self.toNumIndex(self.buy), self.buy, 'o',\n label='Horiz. Buy', color='#00FF00')\n if (self.sell is not None and self.sell.size):\n plt.plot(self.toNumIndex(self.sell),\n self.sell, 'o', color='#000000', ms=8)\n plt.plot(self.toNumIndex(self.sell), self.sell, 'o',\n label='Horiz. Sell', color='#FF0000')\n\n # Plot method\n def PlotAbsDeviation(self):\n plt.plot(self.toNumIndex(self.absStd), self.absStd, linewidth=1.0,\n color='#333333', label='Bol.AbsDeviation')\n plt.ylim(top=100, bottom=0)\n if (self.consolidation is not None and self.consolidation.size):\n plt.plot(self.toNumIndex(self.consolidation), self.consolidation,\n 'o', label='Consolidation', color='cyan')\n if (self.variability is not None and self.variability.size):\n plt.plot(self.toNumIndex(self.variability), self.variability,\n 'o', label='Variability', color='magenta')\n"
] |
[
[
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.fill_between"
]
] |
RGiskard/Data-Structures-and-Algorithms
|
[
"045eab8e2167fa86aa48f194a7e2d621ce7f19ed"
] |
[
"meetup pandas/Funciones/comandos_pandas_v2.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 1 07:45:15 2019\n\n@author: PAULA\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\n\n##########################\n# Funciones de selección #\n##########################\n\nprint('##########################')\nprint('# Funciones de selección #')\nprint('##########################')\n \n \ndf = pd.DataFrame(np.array([[1,2,3],[4,5,6],[7,8,9]]))\nprint('DataFrame: ')\nprint(df)\nprint('____________________________')\n \n#seleccionar la primera columna del DataFrame\nprint('Primera columna del DataFrame: ')\nprint(df[0])\nprint('____________________________')\n\n#seleccionar dos columnas del DataFrame\nprint('Dos columnas del DataFrame: ')\nprint(df[[0,1]])\nprint('____________________________')\n\n#Un valor de la primera fila y ultima columna del DataFrame\nprint('Valor indicando los índices del DataFrame: ')\nprint(df.iloc[0][2])\nprint('____________________________')\n\n#Usar indices del dataframe para seleccionar sus datos, \n#valores de la primera fila del dataframe\nprint('Valores de la primera fila con loc: ')\nprint(df.loc[0])\nprint('____________________________')\nprint('Valores de la primera fila con iloc: ')\nprint(df.iloc[0,:])\n\n##########################\n# Funciones de limpieza #\n##########################\n\nprint('\\n')\nprint('##########################')\nprint('# Funciones de limpieza #')\nprint('##########################')\n \n \ndf2 = pd.DataFrame({'age': [5, 6, np.NaN],\n 'born': [pd.NaT, pd.Timestamp('1939-05-27'),\n pd.Timestamp('1940-04-25')],\n 'name': ['Alfred', 'Batman', ''],\n 'toy': [None, 'Batmobile', 'Joker']})\nprint('DataFrame2: ')\nprint(df2)\nprint('____________________________')\n\n#Limpiar datos, si queremos obtener resultados confiables\n#verificar si faltan valores en el conjunto de datos\n#verdadero para valores faltantes y falso para valores no perdidos\nprint('Verificar si faltan valores en el conjunto de datos: ')\nprint(df2.isnull())\nprint('____________________________')\n\n#Eliminar datos nulos\nprint('Eliminar columnas, filas de datos nulos: ')\nprint(df2.dropna())\nprint('____________________________')\n\ndf3 = pd.DataFrame(np.array([[1,np.NaN,3],[4,5,np.NaN],[7,np.NaN,9], [4, np.NaN, 0]]))\nprint('DataFrame3: ')\nprint(df3)\nprint('____________________________')\n\n#suma de datos nulos\nprint('Suma de datos nulos: ')\nprint(df3.isnull().sum())\nprint('____________________________')\n\n#Rellenar los datos nulos con algun valor x\nprint('Rellenar con algun valor x: ')\nprint(df3.fillna(0))\nprint('____________________________')\n\n\n\n"
] |
[
[
"pandas.Timestamp",
"numpy.array"
]
] |
timoblak/OpenAFQA
|
[
"dc3e4a02efac3342fc6341a2946398d19d6b7c84"
] |
[
"afqa_toolbox/features/gabor.py"
] |
[
"from afqa_toolbox.features import block_properties\nimport numpy as np\nimport cv2\n\n\ndef gabor_filter(theta, freq, sigma, shen=False):\n \"\"\"Produces a Gabor filter based on the provided parameters\n\n :param theta: The angle of the filter\n :param freq: The frequency of the filter\n :param sigma: The standard deviation of the gaussian envelope\n :param shen: Alternative definition of the Gabor filter by Shen et al.\n :return:\n \"\"\"\n # define range (add small eps to also include the last index\n range = np.arange(-2.5 * sigma, 2.5 * sigma + 1e-5)\n\n [x, y] = np.meshgrid(range, range)\n\n # Shen et al. define the Gabor filter a bit differently\n if shen:\n x1 = x * np.cos(theta) + y * np.sin(theta)\n y1 = -x * np.sin(theta) + y * np.cos(theta)\n else:\n x1 = x * np.sin(theta) + y * np.cos(theta)\n y1 = x * np.cos(theta) - y * np.sin(theta)\n\n return np.exp((-1/2) * ((x1 * x1) / (sigma * sigma) + (y1 * y1) / (sigma * sigma))) * \\\n np.exp(1j * 2 * np.pi * freq * x1)\n\n\nclass FeatGabor:\n \"\"\"Filters the imput image with differently oriented Gabor filters\"\"\"\n def __init__(self, blk_size, sigma=6, freq=0.1, angle_num=8):\n # Default values are suitable for fingerprint image of 500 ppi\n self.blk_size = blk_size\n self.sigma = sigma\n self.freq = freq\n self.angle_num = angle_num\n\n def gabor_stds(self, image, smooth=False, shen=False):\n \"\"\"Calculates the standard deviation of responses to differently oriented Gab filters\n\n :param image: Input image\n :param angle_num: The number of angles in half circle for which Gabor filters will be calculated\n :return:\n \"\"\"\n\n h, w = image.shape\n\n img_float = image.astype(np.float64)/255\n gauss_kernel_1 = cv2.getGaussianKernel(7, 1)\n gauss_kernel_4 = cv2.getGaussianKernel(25, 4)\n gauss_image = cv2.sepFilter2D(img_float, cv2.CV_64F, gauss_kernel_1, gauss_kernel_1)\n\n img_detail = img_float - gauss_image\n\n gauss_responses = np.zeros(shape=(h, w, self.angle_num))\n for i, angle in enumerate(range(self.angle_num)):\n theta = (np.pi*angle) / self.angle_num\n gf = gabor_filter(theta, self.freq, self.sigma, shen)\n\n # Calculate the response of Gabor filters\n response = cv2.filter2D(img_detail, cv2.CV_64F, gf.real) + 1j * cv2.filter2D(img_detail, cv2.CV_64F, gf.imag)\n magnitude = np.abs(response)\n\n # Calc Gauss of the Gabor magnitudes for smoothing\n if smooth:\n gauss_responses[:, :, i] = cv2.sepFilter2D(magnitude, cv2.CV_64F, gauss_kernel_4, gauss_kernel_4)\n else:\n gauss_responses[:, :, i] = magnitude\n\n std_local = gauss_responses.std(axis=-1, ddof=1)\n\n rows, cols = block_properties(image.shape, self.blk_size)\n return cv2.resize(std_local, (cols, rows), interpolation=cv2.INTER_AREA)\n\n\n"
] |
[
[
"numpy.sin",
"numpy.zeros",
"numpy.exp",
"numpy.arange",
"numpy.abs",
"numpy.cos",
"numpy.meshgrid"
]
] |
SunNy820828449/CINN
|
[
"6384f730867132508c2c60f5ff2aae12959143d7"
] |
[
"python/tests/pool_utils.py"
] |
[
"#!/usr/bin/env python3\n\n# Copyright (c) 2021 CINN Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport math\nimport numpy as np\nimport sys\n\n\ndef pool2d(np_data, attrs, dtype=\"float32\"):\n pool_type = \"max\"\n ceil_mode = False\n exclusive = True\n data_format = \"NCHW\"\n for key in attrs.attr_store:\n if key == \"kernel_size\":\n kernel_size = attrs.get_attr(\"kernel_size\")\n elif key == \"stride_size\":\n stride_size = attrs.get_attr(\"stride_size\")\n elif key == \"padding_size\":\n padding_size = attrs.get_attr(\"padding_size\")\n elif key == \"pool_type\":\n pool_type = attrs.get_attr(\"pool_type\")\n elif key == \"ceil_mode\":\n ceil_mode = attrs.get_attr(\"ceil_mode\")\n elif key == \"exclusive\":\n exclusive = attrs.get_attr(\"exclusive\")\n elif key == \"data_format\":\n data_format = attrs.get_attr(\"data_format\")\n else:\n raise ValueError(\"attr_store {} is not supported\".format(key))\n\n if data_format == \"NCHW\":\n in_n, in_c, in_h, in_w = in_shape = np_data.shape\n height_axis = 2\n width_axis = 3\n elif data_format == \"NHWC\":\n in_n, in_h, in_w, in_c = in_shape = np_data.shape\n height_axis = 1\n width_axis = 2\n else:\n raise ValueError(\"data_format {} is not supported\".format(data_format))\n\n if isinstance(kernel_size, int):\n k_h = k_w = kernel_size\n else:\n k_h, k_w = kernel_size\n if isinstance(stride_size, int):\n s_h = s_w = stride_size\n else:\n s_h, s_w = stride_size\n if isinstance(padding_size, int):\n pt = pl = pb = pr = padding_size\n else:\n pt, pl, pb, pr = padding_size\n\n out_shape = list(in_shape)\n if ceil_mode:\n out_shape[height_axis] = int(\n math.ceil(float(in_shape[height_axis] - k_h + pt + pb) / s_h) + 1)\n out_shape[width_axis] = int(\n math.ceil(float(in_shape[width_axis] - k_w + pl + pr) / s_w) + 1)\n else:\n out_shape[height_axis] = int(\n math.floor(float(in_shape[height_axis] - k_h + pt + pb) / s_h) + 1)\n out_shape[width_axis] = int(\n math.floor(float(in_shape[width_axis] - k_w + pl + pr) / s_w) + 1)\n\n fill_value = 0\n if exclusive and pool_type == 'max':\n fill_value = sys.float_info.min\n\n if data_format == \"NCHW\":\n pad_np = np.full(\n shape=(in_n, in_c, in_h + pt + pb, in_w + pl + pr),\n fill_value=fill_value,\n dtype=dtype)\n no_zero = (range(in_n), range(in_c), range(pt, in_h + pt),\n range(pl, in_w + pl))\n else:\n pad_np = np.full(\n shape=(in_n, in_h + pt + pb, in_w + pl + pr, in_c),\n fill_value=fill_value,\n dtype=dtype)\n no_zero = (range(in_n), range(pt, in_h + pt), range(pl, in_w + pl),\n range(in_c))\n\n pad_np[np.ix_(*no_zero)] = np_data\n ret_np = np.zeros(shape=out_shape).astype(dtype)\n if pool_type == 'avg':\n for i in range(out_shape[height_axis]):\n for j in range(out_shape[width_axis]):\n if exclusive:\n pad_exclusive = pad_np.copy()\n pad_exclusive[np.ix_(*no_zero)] = 1\n if data_format == \"NCHW\":\n pad_count = np.sum(\n pad_exclusive[:, :, i * s_h:i * s_h +\n k_h, j * s_w:j * s_w + k_w] == 1,\n axis=(height_axis, width_axis))\n ret_np[:, :, i, j] = np.sum(\n pad_np[:, :, i * s_h:i * s_h +\n k_h, j * s_w:j * s_w + k_w],\n axis=(height_axis, width_axis)) / np.maximum(\n pad_count, 1)\n else:\n pad_count = np.sum(\n pad_exclusive[:, i * s_h:i * s_h +\n k_h, j * s_w:j * s_w + k_w, :] == 1,\n axis=(height_axis, width_axis))\n ret_np[:, i, j, :] = np.sum(\n pad_np[:, i * s_h:i * s_h + k_h, j * s_w:j * s_w +\n k_w, :],\n axis=(height_axis, width_axis)) / np.maximum(\n pad_count, 1)\n else:\n if data_format == \"NCHW\":\n ret_np[:, :,i, j] = \\\n np.mean(pad_np[:, :,\n i * s_h: i * s_h + k_h,\n j * s_w: j * s_w + k_w], axis=(height_axis, width_axis))\n else:\n ret_np[:, i, j, :] = \\\n np.mean(pad_np[:,\n i * s_h: i * s_h + k_h,\n j * s_w: j * s_w + k_w, :], axis=(height_axis, width_axis))\n elif pool_type == 'max':\n for i in range(out_shape[height_axis]):\n for j in range(out_shape[width_axis]):\n if data_format == \"NCHW\":\n ret_np[:, :, i, j] = np.max(\n pad_np[:, :, i * s_h:i * s_h + k_h, j * s_w:j * s_w +\n k_w],\n axis=(height_axis, width_axis))\n else:\n ret_np[:, i, j, :] = np.max(\n pad_np[:, i * s_h:i * s_h + k_h, j * s_w:j * s_w +\n k_w, :],\n axis=(height_axis, width_axis))\n else:\n raise ValueError(\"pool type {} is not supported\".format(pool_type))\n\n ret_np = np.maximum(ret_np, fill_value)\n return ret_np, [out_shape]\n\n\ndef pool3d(np_data, attrs, dtype=\"float32\"):\n pool_type = \"max\"\n ceil_mode = False\n exclusive = True\n data_format = \"NCDHW\"\n for key in attrs.attr_store:\n if key == \"kernel_size\":\n kernel_size = attrs.get_attr(\"kernel_size\")\n elif key == \"stride_size\":\n stride_size = attrs.get_attr(\"stride_size\")\n elif key == \"padding_size\":\n padding_size = attrs.get_attr(\"padding_size\")\n elif key == \"pool_type\":\n pool_type = attrs.get_attr(\"pool_type\")\n elif key == \"ceil_mode\":\n ceil_mode = attrs.get_attr(\"ceil_mode\")\n elif key == \"exclusive\":\n exclusive = attrs.get_attr(\"exclusive\")\n elif key == \"data_format\":\n data_format = attrs.get_attr(\"data_format\")\n else:\n raise ValueError(\"attr_store {} is not supported\".format(key))\n\n if data_format == \"NCDHW\":\n in_n, in_c, in_d, in_h, in_w = in_shape = np_data.shape\n depth_axis = 2\n height_axis = 3\n width_axis = 4\n elif data_format == \"NDHWC\":\n in_n, in_d, in_h, in_w, in_c = in_shape = np_data.shape\n depth_axis = 1\n height_axis = 2\n width_axis = 3\n else:\n raise ValueError(\"data_format {} is not supported\".format(data_format))\n\n if isinstance(kernel_size, int):\n k_d = k_h = k_w = kernel_size\n else:\n k_d, k_h, k_w = kernel_size\n if isinstance(stride_size, int):\n s_d = s_h = s_w = stride_size\n else:\n s_d, s_h, s_w = stride_size\n if isinstance(padding_size, int):\n pf = pt = pl = pk = pb = pr = padding_size\n else:\n pf, pt, pl, pk, pb, pr = padding_size\n\n out_shape = list(in_shape)\n if ceil_mode:\n out_shape[depth_axis] = int(\n math.ceil(float(in_shape[depth_axis] - k_d + pf + pk) / s_d) + 1)\n out_shape[height_axis] = int(\n math.ceil(float(in_shape[height_axis] - k_h + pt + pb) / s_h) + 1)\n out_shape[width_axis] = int(\n math.ceil(float(in_shape[width_axis] - k_w + pl + pr) / s_w) + 1)\n else:\n out_shape[depth_axis] = int(\n math.floor(float(in_shape[depth_axis] - k_d + pf + pk) / s_d) + 1)\n out_shape[height_axis] = int(\n math.floor(float(in_shape[height_axis] - k_h + pt + pb) / s_h) + 1)\n out_shape[width_axis] = int(\n math.floor(float(in_shape[width_axis] - k_w + pl + pr) / s_w) + 1)\n\n fill_value = 0\n if exclusive and pool_type == 'max':\n fill_value = sys.float_info.min\n\n if data_format == \"NCDHW\":\n pad_np = np.full(\n shape=(in_n, in_c, in_d + pf + pk, in_h + pt + pb, in_w + pl + pr),\n fill_value=fill_value,\n dtype=dtype)\n no_zero = (range(in_n), range(in_c), range(pf, in_d + pf),\n range(pt, in_h + pt), range(pl, in_w + pl))\n else:\n pad_np = np.full(\n shape=(in_n, in_d + pf + pk, in_h + pt + pb, in_w + pl + pr, in_c),\n fill_value=fill_value,\n dtype=dtype)\n no_zero = (range(in_n), range(pf, in_d + pf), range(pt, in_h + pt),\n range(pl, in_w + pl), range(in_c))\n\n pad_np[np.ix_(*no_zero)] = np_data\n ret_np = np.zeros(shape=out_shape).astype(dtype)\n if pool_type == 'avg':\n for i in range(out_shape[depth_axis]):\n for j in range(out_shape[height_axis]):\n for k in range(out_shape[width_axis]):\n if exclusive:\n pad_exclusive = pad_np.copy()\n pad_exclusive[np.ix_(*no_zero)] = 1\n if data_format == \"NCDHW\":\n pad_count = np.sum(\n pad_exclusive[:, :, i * s_d:i * s_d +\n k_d, j * s_h:j * s_h +\n k_h, k * s_w:k * s_w + k_w] == 1,\n axis=(depth_axis, height_axis, width_axis))\n ret_np[:, :, i, j, k] = np.sum(\n pad_np[:, :, i * s_d:i * s_d + k_d, j * s_h:j *\n s_h + k_h, k * s_w:k * s_w + k_w],\n axis=(depth_axis, height_axis,\n width_axis)) / np.maximum(pad_count, 1)\n else:\n pad_count = np.sum(\n pad_exclusive[:, i * s_d:i * s_d +\n k_d, j * s_h:j * s_h + k_h, k *\n s_w:k * s_w + k_w, :] == 1,\n axis=(depth_axis, height_axis, width_axis))\n ret_np[:, i, j, k, :] = np.sum(\n pad_np[:, i * s_d:i * s_d + k_d, j * s_h:j *\n s_h + k_h, k * s_w:k * s_w + k_w, :],\n axis=(depth_axis, height_axis,\n width_axis)) / np.maximum(pad_count, 1)\n else:\n if data_format == \"NCDHW\":\n ret_np[:, :,i, j, k] = \\\n np.mean(pad_np[:, :,\n i * s_d: i * s_d + k_d,\n j * s_h: j * s_h + k_h,\n k * s_w: k * s_w + k_w], axis=(depth_axis, height_axis, width_axis))\n else:\n ret_np[:, i, j, k, :] = \\\n np.mean(pad_np[:,\n i * s_d: i * s_d + k_d,\n j * s_h: j * s_h + k_h,\n k * s_w: k * s_w + k_w,\n :], axis=(depth_axis, height_axis, width_axis))\n elif pool_type == 'max':\n for i in range(out_shape[depth_axis]):\n for j in range(out_shape[height_axis]):\n for k in range(out_shape[width_axis]):\n if data_format == \"NCDHW\":\n ret_np[:, :, i, j, k] = np.max(\n pad_np[:, :, i * s_d:i * s_d + k_d, j *\n s_h:j * s_h + k_h, k * s_w:k * s_w + k_w],\n axis=(depth_axis, height_axis, width_axis))\n else:\n ret_np[:, i, j, k, :] = np.max(\n pad_np[:, i * s_d:i * s_d + k_d, j * s_h:j * s_h +\n k_h, k * s_w:k * s_w + k_w, :],\n axis=(depth_axis, height_axis, width_axis))\n else:\n raise ValueError(\"pool type {} is not supported\".format(pool_type))\n\n ret_np = np.maximum(ret_np, fill_value)\n return ret_np, [out_shape]\n\n\ndef pool1d(np_data, attrs, dtype=\"float32\"):\n pool_type = \"max\"\n ceil_mode = False\n exclusive = True\n data_format = \"NCW\"\n for key in attrs.attr_store:\n if key == \"kernel_size\":\n kernel_size = attrs.get_attr(\"kernel_size\")\n elif key == \"stride_size\":\n stride_size = attrs.get_attr(\"stride_size\")\n elif key == \"padding_size\":\n padding_size = attrs.get_attr(\"padding_size\")\n elif key == \"pool_type\":\n pool_type = attrs.get_attr(\"pool_type\")\n elif key == \"ceil_mode\":\n ceil_mode = attrs.get_attr(\"ceil_mode\")\n elif key == \"exclusive\":\n exclusive = attrs.get_attr(\"exclusive\")\n elif key == \"data_format\":\n data_format = attrs.get_attr(\"data_format\")\n else:\n raise ValueError(\"attr_store {} is not supported\".format(key))\n\n if data_format == \"NCW\":\n in_n, in_c, in_w = in_shape = np_data.shape\n width_axis = 2\n elif data_format == \"NWC\":\n in_n, in_w, in_c = in_shape = np_data.shape\n width_axis = 1\n else:\n raise ValueError(\"data_format {} is not supported\".format(data_format))\n\n if isinstance(kernel_size, int):\n k_w = kernel_size\n else:\n k_w, = kernel_size\n if isinstance(stride_size, int):\n s_w = stride_size\n else:\n s_w, = stride_size\n if isinstance(padding_size, int):\n pl = pr = padding_size\n else:\n pl, pr = padding_size\n\n out_shape = list(in_shape)\n if ceil_mode:\n out_shape[width_axis] = int(\n math.ceil(float(in_shape[width_axis] - k_w + pl + pr) / s_w) + 1)\n else:\n out_shape[width_axis] = int(\n math.floor(float(in_shape[width_axis] - k_w + pl + pr) / s_w) + 1)\n\n fill_value = 0\n if exclusive and pool_type == 'max':\n fill_value = sys.float_info.min\n\n if data_format == \"NCW\":\n pad_np = np.full(\n shape=(in_n, in_c, in_w + pl + pr),\n fill_value=fill_value,\n dtype=dtype)\n no_zero = (range(in_n), range(in_c), range(pl, in_w + pl))\n else:\n pad_np = np.full(\n shape=(in_n, in_w + pl + pr, in_c),\n fill_value=fill_value,\n dtype=dtype)\n no_zero = (range(in_n), range(pl, in_w + pl), range(in_c))\n\n pad_np[np.ix_(*no_zero)] = np_data\n ret_np = np.zeros(shape=out_shape).astype(dtype)\n if pool_type == 'avg':\n for i in range(out_shape[width_axis]):\n if exclusive:\n pad_exclusive = pad_np.copy()\n pad_exclusive[np.ix_(*no_zero)] = 1\n if data_format == \"NCW\":\n pad_count = np.sum(\n pad_exclusive[:, :, i * s_w:i * s_w + k_w] == 1,\n axis=width_axis)\n ret_np[:, :, i] = np.sum(\n pad_np[:, :, i * s_w:i * s_w + k_w],\n axis=width_axis) / np.maximum(pad_count, 1)\n else:\n pad_count = np.sum(\n pad_exclusive[:, i * s_w:i * s_w + k_w, :] == 1,\n axis=width_axis)\n ret_np[:, i, :] = np.sum(\n pad_np[:, i * s_w:i * s_w + k_w, :],\n axis=width_axis) / np.maximum(pad_count, 1)\n else:\n if data_format == \"NCW\":\n ret_np[:, :, i] = \\\n np.mean(pad_np[:, :,\n i * s_w: i * s_w + k_w], axis=width_axis)\n else:\n ret_np[:, i, :] = \\\n np.mean(pad_np[:,\n i * s_w: i * s_w + k_w,\n :], axis=width_axis)\n elif pool_type == 'max':\n for k in range(out_shape[width_axis]):\n if data_format == \"NCW\":\n ret_np[:, :, k] = np.max(\n pad_np[:, :, k * s_w:k * s_w + k_w], axis=width_axis)\n else:\n ret_np[:, k, :] = np.max(\n pad_np[:, k * s_w:k * s_w + k_w, :], axis=width_axis)\n else:\n raise ValueError(\"pool type {} is not supported\".format(pool_type))\n\n ret_np = np.maximum(ret_np, fill_value)\n return ret_np, [out_shape]\n"
] |
[
[
"numpy.max",
"numpy.full",
"numpy.zeros",
"numpy.sum",
"numpy.ix_",
"numpy.mean",
"numpy.maximum"
]
] |
PyDemic/pydemic
|
[
"7e748e4bbe5c1f7fb209271af0ff8afb8fbd4fd5"
] |
[
"tests/models/test_sir.py"
] |
[
"import numpy as np\n\nfrom pydemic.diseases import covid19\nfrom pydemic.models import eSIR\n\n\nclass TestSIR:\n def test_basic_esir_api(self):\n m = eSIR(disease=covid19)\n m.run(30)\n res = m[\"I\"]\n ok = m.data.loc[m.times[0], \"infectious\"] * np.exp(m.K * m.times)\n\n assert m.R0 == 2.74\n assert abs(m.K - m.gamma * 1.74) <= 1e-6\n assert m.iter == len(m.data) == len(m.times) == len(m.dates)\n assert np.abs(res / ok - 1).max() < 1e-4\n"
] |
[
[
"numpy.exp",
"numpy.abs"
]
] |
dorothykiz1/pandas
|
[
"6033ed4b3383d874ee4a8a461724c0b8c2ca968d"
] |
[
"pandas/io/excel/_xlsxwriter.py"
] |
[
"from __future__ import annotations\n\nfrom typing import Any\n\nimport pandas._libs.json as json\nfrom pandas._typing import (\n FilePath,\n StorageOptions,\n WriteExcelBuffer,\n)\n\nfrom pandas.io.excel._base import ExcelWriter\nfrom pandas.io.excel._util import (\n combine_kwargs,\n validate_freeze_panes,\n)\n\n\nclass _XlsxStyler:\n # Map from openpyxl-oriented styles to flatter xlsxwriter representation\n # Ordering necessary for both determinism and because some are keyed by\n # prefixes of others.\n STYLE_MAPPING: dict[str, list[tuple[tuple[str, ...], str]]] = {\n \"font\": [\n ((\"name\",), \"font_name\"),\n ((\"sz\",), \"font_size\"),\n ((\"size\",), \"font_size\"),\n ((\"color\", \"rgb\"), \"font_color\"),\n ((\"color\",), \"font_color\"),\n ((\"b\",), \"bold\"),\n ((\"bold\",), \"bold\"),\n ((\"i\",), \"italic\"),\n ((\"italic\",), \"italic\"),\n ((\"u\",), \"underline\"),\n ((\"underline\",), \"underline\"),\n ((\"strike\",), \"font_strikeout\"),\n ((\"vertAlign\",), \"font_script\"),\n ((\"vertalign\",), \"font_script\"),\n ],\n \"number_format\": [((\"format_code\",), \"num_format\"), ((), \"num_format\")],\n \"protection\": [((\"locked\",), \"locked\"), ((\"hidden\",), \"hidden\")],\n \"alignment\": [\n ((\"horizontal\",), \"align\"),\n ((\"vertical\",), \"valign\"),\n ((\"text_rotation\",), \"rotation\"),\n ((\"wrap_text\",), \"text_wrap\"),\n ((\"indent\",), \"indent\"),\n ((\"shrink_to_fit\",), \"shrink\"),\n ],\n \"fill\": [\n ((\"patternType\",), \"pattern\"),\n ((\"patterntype\",), \"pattern\"),\n ((\"fill_type\",), \"pattern\"),\n ((\"start_color\", \"rgb\"), \"fg_color\"),\n ((\"fgColor\", \"rgb\"), \"fg_color\"),\n ((\"fgcolor\", \"rgb\"), \"fg_color\"),\n ((\"start_color\",), \"fg_color\"),\n ((\"fgColor\",), \"fg_color\"),\n ((\"fgcolor\",), \"fg_color\"),\n ((\"end_color\", \"rgb\"), \"bg_color\"),\n ((\"bgColor\", \"rgb\"), \"bg_color\"),\n ((\"bgcolor\", \"rgb\"), \"bg_color\"),\n ((\"end_color\",), \"bg_color\"),\n ((\"bgColor\",), \"bg_color\"),\n ((\"bgcolor\",), \"bg_color\"),\n ],\n \"border\": [\n ((\"color\", \"rgb\"), \"border_color\"),\n ((\"color\",), \"border_color\"),\n ((\"style\",), \"border\"),\n ((\"top\", \"color\", \"rgb\"), \"top_color\"),\n ((\"top\", \"color\"), \"top_color\"),\n ((\"top\", \"style\"), \"top\"),\n ((\"top\",), \"top\"),\n ((\"right\", \"color\", \"rgb\"), \"right_color\"),\n ((\"right\", \"color\"), \"right_color\"),\n ((\"right\", \"style\"), \"right\"),\n ((\"right\",), \"right\"),\n ((\"bottom\", \"color\", \"rgb\"), \"bottom_color\"),\n ((\"bottom\", \"color\"), \"bottom_color\"),\n ((\"bottom\", \"style\"), \"bottom\"),\n ((\"bottom\",), \"bottom\"),\n ((\"left\", \"color\", \"rgb\"), \"left_color\"),\n ((\"left\", \"color\"), \"left_color\"),\n ((\"left\", \"style\"), \"left\"),\n ((\"left\",), \"left\"),\n ],\n }\n\n @classmethod\n def convert(cls, style_dict, num_format_str=None):\n \"\"\"\n converts a style_dict to an xlsxwriter format dict\n\n Parameters\n ----------\n style_dict : style dictionary to convert\n num_format_str : optional number format string\n \"\"\"\n # Create a XlsxWriter format object.\n props = {}\n\n if num_format_str is not None:\n props[\"num_format\"] = num_format_str\n\n if style_dict is None:\n return props\n\n if \"borders\" in style_dict:\n style_dict = style_dict.copy()\n style_dict[\"border\"] = style_dict.pop(\"borders\")\n\n for style_group_key, style_group in style_dict.items():\n for src, dst in cls.STYLE_MAPPING.get(style_group_key, []):\n # src is a sequence of keys into a nested dict\n # dst is a flat key\n if dst in props:\n continue\n v = style_group\n for k in src:\n try:\n v = v[k]\n except (KeyError, TypeError):\n break\n else:\n props[dst] = v\n\n if isinstance(props.get(\"pattern\"), str):\n # TODO: support other fill patterns\n props[\"pattern\"] = 0 if props[\"pattern\"] == \"none\" else 1\n\n for k in [\"border\", \"top\", \"right\", \"bottom\", \"left\"]:\n if isinstance(props.get(k), str):\n try:\n props[k] = [\n \"none\",\n \"thin\",\n \"medium\",\n \"dashed\",\n \"dotted\",\n \"thick\",\n \"double\",\n \"hair\",\n \"mediumDashed\",\n \"dashDot\",\n \"mediumDashDot\",\n \"dashDotDot\",\n \"mediumDashDotDot\",\n \"slantDashDot\",\n ].index(props[k])\n except ValueError:\n props[k] = 2\n\n if isinstance(props.get(\"font_script\"), str):\n props[\"font_script\"] = [\"baseline\", \"superscript\", \"subscript\"].index(\n props[\"font_script\"]\n )\n\n if isinstance(props.get(\"underline\"), str):\n props[\"underline\"] = {\n \"none\": 0,\n \"single\": 1,\n \"double\": 2,\n \"singleAccounting\": 33,\n \"doubleAccounting\": 34,\n }[props[\"underline\"]]\n\n return props\n\n\nclass XlsxWriter(ExcelWriter):\n engine = \"xlsxwriter\"\n supported_extensions = (\".xlsx\",)\n\n def __init__(\n self,\n path: FilePath | WriteExcelBuffer | ExcelWriter,\n engine: str | None = None,\n date_format: str | None = None,\n datetime_format: str | None = None,\n mode: str = \"w\",\n storage_options: StorageOptions = None,\n if_sheet_exists: str | None = None,\n engine_kwargs: dict[str, Any] | None = None,\n **kwargs,\n ) -> None:\n # Use the xlsxwriter module as the Excel writer.\n from xlsxwriter import Workbook\n\n engine_kwargs = combine_kwargs(engine_kwargs, kwargs)\n\n if mode == \"a\":\n raise ValueError(\"Append mode is not supported with xlsxwriter!\")\n\n super().__init__(\n path,\n engine=engine,\n date_format=date_format,\n datetime_format=datetime_format,\n mode=mode,\n storage_options=storage_options,\n if_sheet_exists=if_sheet_exists,\n engine_kwargs=engine_kwargs,\n )\n\n self._book = Workbook(self._handles.handle, **engine_kwargs)\n\n @property\n def book(self):\n \"\"\"\n Book instance of class xlsxwriter.Workbook.\n\n This attribute can be used to access engine-specific features.\n \"\"\"\n return self._book\n\n @property\n def sheets(self) -> dict[str, Any]:\n result = self.book.sheetnames\n return result\n\n def _save(self) -> None:\n \"\"\"\n Save workbook to disk.\n \"\"\"\n self.book.close()\n\n def _write_cells(\n self,\n cells,\n sheet_name: str | None = None,\n startrow: int = 0,\n startcol: int = 0,\n freeze_panes: tuple[int, int] | None = None,\n ) -> None:\n # Write the frame cells using xlsxwriter.\n sheet_name = self._get_sheet_name(sheet_name)\n\n wks = self.book.get_worksheet_by_name(sheet_name)\n if wks is None:\n wks = self.book.add_worksheet(sheet_name)\n\n style_dict = {\"null\": None}\n\n if validate_freeze_panes(freeze_panes):\n wks.freeze_panes(*(freeze_panes))\n\n for cell in cells:\n val, fmt = self._value_with_fmt(cell.val)\n\n stylekey = json.dumps(cell.style)\n if fmt:\n stylekey += fmt\n\n if stylekey in style_dict:\n style = style_dict[stylekey]\n else:\n style = self.book.add_format(_XlsxStyler.convert(cell.style, fmt))\n style_dict[stylekey] = style\n\n if cell.mergestart is not None and cell.mergeend is not None:\n wks.merge_range(\n startrow + cell.row,\n startcol + cell.col,\n startrow + cell.mergestart,\n startcol + cell.mergeend,\n val,\n style,\n )\n else:\n wks.write(startrow + cell.row, startcol + cell.col, val, style)\n"
] |
[
[
"pandas.io.excel._util.validate_freeze_panes",
"pandas.io.excel._util.combine_kwargs",
"pandas._libs.json.dumps"
]
] |
kali20gakki/code
|
[
"369a2e64c2bfbd18899d1d49556d8d208d01bdff"
] |
[
"ppdet/data/transform/operators.py"
] |
[
"# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# function:\n# operators to process sample,\n# eg: decode/resize/crop image\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\ntry:\n from collections.abc import Sequence\nexcept Exception:\n from collections import Sequence\n\nfrom numbers import Number\n\nimport uuid\nimport logging\nimport random\nimport math\nimport numpy as np\nimport os\nimport six\n\nimport cv2\nfrom PIL import Image, ImageEnhance, ImageDraw, ImageOps\nimport imgaug as ia\nimport imgaug.augmenters as iaa\nfrom .minority_enhance_utils import *\n\nfrom ppdet.core.workspace import serializable\nfrom ppdet.modeling.ops import AnchorGrid\n\nfrom .op_helper import (satisfy_sample_constraint, filter_and_process,\n generate_sample_bbox, clip_bbox, data_anchor_sampling,\n satisfy_sample_constraint_coverage, crop_image_sampling,\n generate_sample_bbox_square, bbox_area_sampling,\n is_poly, gaussian_radius, draw_gaussian)\n\nlogger = logging.getLogger(__name__)\n\nregistered_ops = []\n\n\ndef register_op(cls):\n registered_ops.append(cls.__name__)\n if not hasattr(BaseOperator, cls.__name__):\n setattr(BaseOperator, cls.__name__, cls)\n else:\n raise KeyError(\"The {} class has been registered.\".format(cls.__name__))\n return serializable(cls)\n\n\nclass BboxError(ValueError):\n pass\n\n\nclass ImageError(ValueError):\n pass\n\n\nclass BaseOperator(object):\n def __init__(self, name=None):\n if name is None:\n name = self.__class__.__name__\n self._id = name + '_' + str(uuid.uuid4())[-6:]\n\n def __call__(self, sample, context=None):\n \"\"\" Process a sample.\n Args:\n sample (dict): a dict of sample, eg: {'image':xx, 'label': xxx}\n context (dict): info about this sample processing\n Returns:\n result (dict): a processed sample\n \"\"\"\n return sample\n\n def __str__(self):\n return str(self._id)\n\n\n@register_op\nclass DecodeImage(BaseOperator):\n def __init__(self, to_rgb=True, with_mixup=False, with_cutmix=False):\n \"\"\" Transform the image data to numpy format.\n Args:\n to_rgb (bool): whether to convert BGR to RGB\n with_mixup (bool): whether or not to mixup image and gt_bbbox/gt_score\n with_cutmix (bool): whether or not to cutmix image and gt_bbbox/gt_score\n \"\"\"\n\n super(DecodeImage, self).__init__()\n self.to_rgb = to_rgb\n self.with_mixup = with_mixup\n self.with_cutmix = with_cutmix\n if not isinstance(self.to_rgb, bool):\n raise TypeError(\"{}: input type is invalid.\".format(self))\n if not isinstance(self.with_mixup, bool):\n raise TypeError(\"{}: input type is invalid.\".format(self))\n\n def __call__(self, sample, context=None):\n \"\"\" load image if 'im_file' field is not empty but 'image' is\"\"\"\n if 'image' not in sample:\n with open(sample['im_file'], 'rb') as f:\n sample['image'] = f.read()\n\n im = sample['image']\n data = np.frombuffer(im, dtype='uint8')\n im = cv2.imdecode(data, 1) # BGR mode, but need RGB mode\n\n if self.to_rgb:\n im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)\n sample['image'] = im\n\n if 'h' not in sample:\n sample['h'] = im.shape[0]\n elif sample['h'] != im.shape[0]:\n logger.warn(\n \"The actual image height: {} is not equal to the \"\n \"height: {} in annotation, and update sample['h'] by actual \"\n \"image height.\".format(im.shape[0], sample['h']))\n sample['h'] = im.shape[0]\n if 'w' not in sample:\n sample['w'] = im.shape[1]\n elif sample['w'] != im.shape[1]:\n logger.warn(\n \"The actual image width: {} is not equal to the \"\n \"width: {} in annotation, and update sample['w'] by actual \"\n \"image width.\".format(im.shape[1], sample['w']))\n sample['w'] = im.shape[1]\n\n # make default im_info with [h, w, 1]\n sample['im_info'] = np.array(\n [im.shape[0], im.shape[1], 1.], dtype=np.float32)\n\n # decode mixup image\n if self.with_mixup and 'mixup' in sample:\n self.__call__(sample['mixup'], context)\n\n # decode cutmix image\n if self.with_cutmix and 'cutmix' in sample:\n self.__call__(sample['cutmix'], context)\n\n # decode semantic label \n if 'semantic' in sample.keys() and sample['semantic'] is not None:\n sem_file = sample['semantic']\n sem = cv2.imread(sem_file, cv2.IMREAD_GRAYSCALE)\n sample['semantic'] = sem.astype('int32')\n\n return sample\n\n@register_op\nclass RoadEnhance(BaseOperator):\n def __init__(self, prob=0.5):\n super(RoadEnhance, self).__init__()\n print(\"[DIY] Use RoadEnhance!\")\n sometimes = lambda aug: iaa.Sometimes(prob, aug)\n seq_list = [sometimes(\n iaa.OneOf([\n iaa.GammaContrast((0.5, 2.0)),\n iaa.MultiplyAndAddToBrightness(mul=(0.5, 1.5), add=(-30, 30)),\n iaa.MotionBlur(k=7, angle=[-45, 45]),\n iaa.Snowflakes(flake_size=(0.5, 0.75), speed=(0.001, 0.03)),\n iaa.Rain(drop_size=(0.10, 0.20)),\n ]\n )\n )]\n self.seq = iaa.Sequential(seq_list, random_order=True)\n\n\n def apply(self, sample, context=None):\n im = sample['image']\n im = self.seq(image=im)\n sample['image'] = im\n return sample\n\n@register_op\nclass MinorityEnhance(BaseOperator):\n def __init__(self, obj_dir='ppdet/data/transform/class3_obj', prob=0.6):\n super(MinorityEnhance, self).__init__()\n print(\"[DIY] Use MinorityEnhance!\")\n\n self.prob = int(prob * 10)\n # 少数类的资源文件\n self.images_dir = os.path.join(obj_dir, 'images')\n self.seg_dir = os.path.join(obj_dir, 'seg')\n self.segs_list = os.listdir(self.seg_dir)\n\n def _apply(self, sample, context=None):\n if random.randint(1,10) < 7: # 0.7\n bbox_size_range = (32, 64) # 中等大小\n else:\n bbox_size_range = (16, 32)\n \n gt_bbox = sample['gt_bbox']\n gt_class = sample['gt_class']\n H, W = sample['h'], sample['w']\n bg_img = sample['image']\n\n points = select_points(gt_bbox, [H, W], 2) # 固定两张图\n for point in points:\n seg_file = random.choice(self.segs_list)\n seg_path = os.path.join(seg_dir, seg_file)\n obj_path = os.path.join(self.images_dir, seg_file.split('.')[0]+'.png')\n polygon = get_polygon(seg_path)\n obj_img = cv2.imread(obj_path)\n\n bg_img, bbox = paste_obj2img(bg_img, obj_img, polygon, point, \n random.randint(bbox_size_range[0], bbox_size_range[1]))\n \n if box is not None:\n gt_bbox.append(box)\n gt_class.append(3)\n sample['image'] = bg_img\n \n return sample\n\n def apply(self, sample, context=None):\n # if random.randint(1,10) < 6: # 0.6\n # try:\n # sample = self._apply(sample)\n # except:\n # sample = sample\n # return sample\n\n if random.randint(1,10) < 6: # 0.6\n sample = self._apply(sample)\n return sample\n\n@register_op\nclass MultiscaleTestResize(BaseOperator):\n def __init__(self,\n origin_target_size=800,\n origin_max_size=1333,\n target_size=[],\n max_size=2000,\n interp=cv2.INTER_LINEAR,\n use_flip=True):\n \"\"\"\n Rescale image to the each size in target size, and capped at max_size.\n Args:\n origin_target_size(int): original target size of image's short side.\n origin_max_size(int): original max size of image.\n target_size (list): A list of target sizes of image's short side.\n max_size (int): the max size of image.\n interp (int): the interpolation method.\n use_flip (bool): whether use flip augmentation.\n \"\"\"\n super(MultiscaleTestResize, self).__init__()\n self.origin_target_size = int(origin_target_size)\n self.origin_max_size = int(origin_max_size)\n self.max_size = int(max_size)\n self.interp = int(interp)\n self.use_flip = use_flip\n\n if not isinstance(target_size, list):\n raise TypeError(\n \"Type of target_size is invalid. Must be List, now is {}\".\n format(type(target_size)))\n self.target_size = target_size\n if not (isinstance(self.origin_target_size, int) and isinstance(\n self.origin_max_size, int) and isinstance(self.max_size, int)\n and isinstance(self.interp, int)):\n raise TypeError(\"{}: input type is invalid.\".format(self))\n\n def __call__(self, sample, context=None):\n \"\"\" Resize the image numpy for multi-scale test.\n \"\"\"\n origin_ims = {}\n im = sample['image']\n if not isinstance(im, np.ndarray):\n raise TypeError(\"{}: image type is not numpy.\".format(self))\n if len(im.shape) != 3:\n raise ImageError('{}: image is not 3-dimensional.'.format(self))\n im_shape = im.shape\n im_size_min = np.min(im_shape[0:2])\n im_size_max = np.max(im_shape[0:2])\n if float(im_size_min) == 0:\n raise ZeroDivisionError('{}: min size of image is 0'.format(self))\n base_name_list = ['image']\n origin_ims['image'] = im\n if self.use_flip:\n sample['image_flip'] = im[:, ::-1, :]\n base_name_list.append('image_flip')\n origin_ims['image_flip'] = sample['image_flip']\n\n for base_name in base_name_list:\n im_scale = float(self.origin_target_size) / float(im_size_min)\n # Prevent the biggest axis from being more than max_size\n if np.round(im_scale * im_size_max) > self.origin_max_size:\n im_scale = float(self.origin_max_size) / float(im_size_max)\n im_scale_x = im_scale\n im_scale_y = im_scale\n\n resize_w = np.round(im_scale_x * float(im_shape[1]))\n resize_h = np.round(im_scale_y * float(im_shape[0]))\n im_resize = cv2.resize(\n origin_ims[base_name],\n None,\n None,\n fx=im_scale_x,\n fy=im_scale_y,\n interpolation=self.interp)\n\n sample[base_name] = im_resize\n info_name = 'im_info' if base_name == 'image' else 'im_info_image_flip'\n sample[base_name] = im_resize\n sample[info_name] = np.array(\n [resize_h, resize_w, im_scale], dtype=np.float32)\n for i, size in enumerate(self.target_size):\n im_scale = float(size) / float(im_size_min)\n if np.round(im_scale * im_size_max) > self.max_size:\n im_scale = float(self.max_size) / float(im_size_max)\n im_scale_x = im_scale\n im_scale_y = im_scale\n resize_w = np.round(im_scale_x * float(im_shape[1]))\n resize_h = np.round(im_scale_y * float(im_shape[0]))\n im_resize = cv2.resize(\n origin_ims[base_name],\n None,\n None,\n fx=im_scale_x,\n fy=im_scale_y,\n interpolation=self.interp)\n\n im_info = [resize_h, resize_w, im_scale]\n # hard-code here, must be consistent with\n # ppdet/modeling/architectures/input_helper.py\n name = base_name + '_scale_' + str(i)\n info_name = 'im_info_' + name\n sample[name] = im_resize\n sample[info_name] = np.array(\n [resize_h, resize_w, im_scale], dtype=np.float32)\n return sample\n\n\n@register_op\nclass ResizeImage(BaseOperator):\n def __init__(self,\n target_size=0,\n max_size=0,\n interp=cv2.INTER_LINEAR,\n use_cv2=True,\n resize_box=False):\n \"\"\"\n Rescale image to the specified target size, and capped at max_size\n if max_size != 0.\n If target_size is list, selected a scale randomly as the specified\n target size.\n Args:\n target_size (int|list): the target size of image's short side,\n multi-scale training is adopted when type is list.\n max_size (int): the max size of image\n interp (int): the interpolation method\n use_cv2 (bool): use the cv2 interpolation method or use PIL\n interpolation method\n resize_box (bool): whether resize ground truth bbox annotations.\n \"\"\"\n super(ResizeImage, self).__init__()\n self.max_size = int(max_size)\n self.interp = int(interp)\n self.use_cv2 = use_cv2\n self.resize_box = resize_box\n if not (isinstance(target_size, int) or isinstance(target_size, list)):\n raise TypeError(\n \"Type of target_size is invalid. Must be Integer or List, now is {}\".\n format(type(target_size)))\n self.target_size = target_size\n if not (isinstance(self.max_size, int) and isinstance(self.interp,\n int)):\n raise TypeError(\"{}: input type is invalid.\".format(self))\n\n def __call__(self, sample, context=None):\n \"\"\" Resize the image numpy.\n \"\"\"\n im = sample['image']\n if not isinstance(im, np.ndarray):\n raise TypeError(\"{}: image type is not numpy.\".format(self))\n if len(im.shape) != 3:\n raise ImageError('{}: image is not 3-dimensional.'.format(self))\n im_shape = im.shape\n im_size_min = np.min(im_shape[0:2])\n im_size_max = np.max(im_shape[0:2])\n if isinstance(self.target_size, list):\n # Case for multi-scale training\n selected_size = random.choice(self.target_size)\n else:\n selected_size = self.target_size\n if float(im_size_min) == 0:\n raise ZeroDivisionError('{}: min size of image is 0'.format(self))\n if self.max_size != 0:\n im_scale = float(selected_size) / float(im_size_min)\n # Prevent the biggest axis from being more than max_size\n if np.round(im_scale * im_size_max) > self.max_size:\n im_scale = float(self.max_size) / float(im_size_max)\n im_scale_x = im_scale\n im_scale_y = im_scale\n\n resize_w = im_scale_x * float(im_shape[1])\n resize_h = im_scale_y * float(im_shape[0])\n im_info = [resize_h, resize_w, im_scale]\n if 'im_info' in sample and sample['im_info'][2] != 1.:\n sample['im_info'] = np.append(\n list(sample['im_info']), im_info).astype(np.float32)\n else:\n sample['im_info'] = np.array(im_info).astype(np.float32)\n else:\n im_scale_x = float(selected_size) / float(im_shape[1])\n im_scale_y = float(selected_size) / float(im_shape[0])\n\n resize_w = selected_size\n resize_h = selected_size\n\n if self.use_cv2:\n im = cv2.resize(\n im,\n None,\n None,\n fx=im_scale_x,\n fy=im_scale_y,\n interpolation=self.interp)\n else:\n if self.max_size != 0:\n raise TypeError(\n 'If you set max_size to cap the maximum size of image,'\n 'please set use_cv2 to True to resize the image.')\n im = im.astype('uint8')\n im = Image.fromarray(im)\n im = im.resize((int(resize_w), int(resize_h)), self.interp)\n im = np.array(im)\n sample['image'] = im\n sample['scale_factor'] = [im_scale_x, im_scale_y] * 2\n if 'gt_bbox' in sample and self.resize_box and len(sample[\n 'gt_bbox']) > 0:\n bboxes = sample['gt_bbox'] * sample['scale_factor']\n bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, resize_w - 1)\n bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, resize_h - 1)\n sample['gt_bbox'] = bboxes\n if 'semantic' in sample.keys() and sample['semantic'] is not None:\n semantic = sample['semantic']\n semantic = cv2.resize(\n semantic.astype('float32'),\n None,\n None,\n fx=im_scale_x,\n fy=im_scale_y,\n interpolation=self.interp)\n semantic = np.asarray(semantic).astype('int32')\n semantic = np.expand_dims(semantic, 0)\n sample['semantic'] = semantic\n if 'gt_segm' in sample and len(sample['gt_segm']) > 0:\n masks = [\n cv2.resize(\n gt_segm,\n None,\n None,\n fx=im_scale_x,\n fy=im_scale_y,\n interpolation=cv2.INTER_NEAREST)\n for gt_segm in sample['gt_segm']\n ]\n sample['gt_segm'] = np.asarray(masks).astype(np.uint8)\n\n return sample\n\n\n@register_op\nclass RandomFlipImage(BaseOperator):\n def __init__(self, prob=0.5, is_normalized=False, is_mask_flip=False):\n \"\"\"\n Args:\n prob (float): the probability of flipping image\n is_normalized (bool): whether the bbox scale to [0,1]\n is_mask_flip (bool): whether flip the segmentation\n \"\"\"\n super(RandomFlipImage, self).__init__()\n self.prob = prob\n self.is_normalized = is_normalized\n self.is_mask_flip = is_mask_flip\n if not (isinstance(self.prob, float) and\n isinstance(self.is_normalized, bool) and\n isinstance(self.is_mask_flip, bool)):\n raise TypeError(\"{}: input type is invalid.\".format(self))\n\n def flip_segms(self, segms, height, width):\n def _flip_poly(poly, width):\n flipped_poly = np.array(poly)\n flipped_poly[0::2] = width - np.array(poly[0::2]) - 1\n return flipped_poly.tolist()\n\n def _flip_rle(rle, height, width):\n if 'counts' in rle and type(rle['counts']) == list:\n rle = mask_util.frPyObjects(rle, height, width)\n mask = mask_util.decode(rle)\n mask = mask[:, ::-1]\n rle = mask_util.encode(np.array(mask, order='F', dtype=np.uint8))\n return rle\n\n flipped_segms = []\n for segm in segms:\n if is_poly(segm):\n # Polygon format\n flipped_segms.append([_flip_poly(poly, width) for poly in segm])\n else:\n # RLE format\n import pycocotools.mask as mask_util\n flipped_segms.append(_flip_rle(segm, height, width))\n return flipped_segms\n\n def flip_keypoint(self, gt_keypoint, width):\n for i in range(gt_keypoint.shape[1]):\n if i % 2 == 0:\n old_x = gt_keypoint[:, i].copy()\n if self.is_normalized:\n gt_keypoint[:, i] = 1 - old_x\n else:\n gt_keypoint[:, i] = width - old_x - 1\n return gt_keypoint\n\n def __call__(self, sample, context=None):\n \"\"\"Filp the image and bounding box.\n Operators:\n 1. Flip the image numpy.\n 2. Transform the bboxes' x coordinates.\n (Must judge whether the coordinates are normalized!)\n 3. Transform the segmentations' x coordinates.\n (Must judge whether the coordinates are normalized!)\n Output:\n sample: the image, bounding box and segmentation part\n in sample are flipped.\n \"\"\"\n\n samples = sample\n batch_input = True\n if not isinstance(samples, Sequence):\n batch_input = False\n samples = [samples]\n for sample in samples:\n gt_bbox = sample['gt_bbox']\n im = sample['image']\n if not isinstance(im, np.ndarray):\n raise TypeError(\"{}: image is not a numpy array.\".format(self))\n if len(im.shape) != 3:\n raise ImageError(\"{}: image is not 3-dimensional.\".format(self))\n height, width, _ = im.shape\n if np.random.uniform(0, 1) < self.prob:\n im = im[:, ::-1, :]\n if gt_bbox.shape[0] == 0:\n return sample\n oldx1 = gt_bbox[:, 0].copy()\n oldx2 = gt_bbox[:, 2].copy()\n if self.is_normalized:\n gt_bbox[:, 0] = 1 - oldx2\n gt_bbox[:, 2] = 1 - oldx1\n else:\n gt_bbox[:, 0] = width - oldx2 - 1\n gt_bbox[:, 2] = width - oldx1 - 1\n if gt_bbox.shape[0] != 0 and (\n gt_bbox[:, 2] < gt_bbox[:, 0]).all():\n m = \"{}: invalid box, x2 should be greater than x1\".format(\n self)\n raise BboxError(m)\n sample['gt_bbox'] = gt_bbox\n if self.is_mask_flip and len(sample['gt_poly']) != 0:\n sample['gt_poly'] = self.flip_segms(sample['gt_poly'],\n height, width)\n if 'gt_keypoint' in sample.keys():\n sample['gt_keypoint'] = self.flip_keypoint(\n sample['gt_keypoint'], width)\n\n if 'semantic' in sample.keys() and sample[\n 'semantic'] is not None:\n sample['semantic'] = sample['semantic'][:, ::-1]\n\n if 'gt_segm' in sample.keys() and sample['gt_segm'] is not None:\n sample['gt_segm'] = sample['gt_segm'][:, :, ::-1]\n\n sample['flipped'] = True\n sample['image'] = im\n sample = samples if batch_input else samples[0]\n return sample\n\n\n@register_op\nclass RandomErasingImage(BaseOperator):\n def __init__(self, prob=0.5, sl=0.02, sh=0.4, r1=0.3):\n \"\"\"\n Random Erasing Data Augmentation, see https://arxiv.org/abs/1708.04896\n Args:\n prob (float): probability to carry out random erasing\n sl (float): lower limit of the erasing area ratio\n sh (float): upper limit of the erasing area ratio\n r1 (float): aspect ratio of the erasing region\n \"\"\"\n super(RandomErasingImage, self).__init__()\n self.prob = prob\n self.sl = sl\n self.sh = sh\n self.r1 = r1\n\n def __call__(self, sample, context=None):\n samples = sample\n batch_input = True\n if not isinstance(samples, Sequence):\n batch_input = False\n samples = [samples]\n for sample in samples:\n gt_bbox = sample['gt_bbox']\n im = sample['image']\n if not isinstance(im, np.ndarray):\n raise TypeError(\"{}: image is not a numpy array.\".format(self))\n if len(im.shape) != 3:\n raise ImageError(\"{}: image is not 3-dimensional.\".format(self))\n\n for idx in range(gt_bbox.shape[0]):\n if self.prob <= np.random.rand():\n continue\n\n x1, y1, x2, y2 = gt_bbox[idx, :]\n w_bbox = x2 - x1 + 1\n h_bbox = y2 - y1 + 1\n area = w_bbox * h_bbox\n\n target_area = random.uniform(self.sl, self.sh) * area\n aspect_ratio = random.uniform(self.r1, 1 / self.r1)\n\n h = int(round(math.sqrt(target_area * aspect_ratio)))\n w = int(round(math.sqrt(target_area / aspect_ratio)))\n\n if w < w_bbox and h < h_bbox:\n off_y1 = random.randint(0, int(h_bbox - h))\n off_x1 = random.randint(0, int(w_bbox - w))\n im[int(y1 + off_y1):int(y1 + off_y1 + h), int(x1 + off_x1):\n int(x1 + off_x1 + w), :] = 0\n sample['image'] = im\n\n sample = samples if batch_input else samples[0]\n return sample\n\n\n@register_op\nclass GridMaskOp(BaseOperator):\n def __init__(self,\n use_h=True,\n use_w=True,\n rotate=1,\n offset=False,\n ratio=0.5,\n mode=1,\n prob=0.7,\n upper_iter=360000):\n \"\"\"\n GridMask Data Augmentation, see https://arxiv.org/abs/2001.04086\n Args:\n use_h (bool): whether to mask vertically\n use_w (boo;): whether to mask horizontally\n rotate (float): angle for the mask to rotate\n offset (float): mask offset\n ratio (float): mask ratio\n mode (int): gridmask mode\n prob (float): max probability to carry out gridmask\n upper_iter (int): suggested to be equal to global max_iter\n \"\"\"\n super(GridMaskOp, self).__init__()\n self.use_h = use_h\n self.use_w = use_w\n self.rotate = rotate\n self.offset = offset\n self.ratio = ratio\n self.mode = mode\n self.prob = prob\n self.upper_iter = upper_iter\n\n from .gridmask_utils import GridMask\n self.gridmask_op = GridMask(\n use_h,\n use_w,\n rotate=rotate,\n offset=offset,\n ratio=ratio,\n mode=mode,\n prob=prob,\n upper_iter=upper_iter)\n\n def __call__(self, sample, context=None):\n samples = sample\n batch_input = True\n if not isinstance(samples, Sequence):\n batch_input = False\n samples = [samples]\n for sample in samples:\n sample['image'] = self.gridmask_op(sample['image'],\n sample['curr_iter'])\n if not batch_input:\n samples = samples[0]\n return samples\n\n\n@register_op\nclass AutoAugmentImage(BaseOperator):\n def __init__(self, is_normalized=False, autoaug_type=\"v1\"):\n \"\"\"\n Args:\n is_normalized (bool): whether the bbox scale to [0,1]\n autoaug_type (str): autoaug type, support v0, v1, v2, v3, test\n \"\"\"\n super(AutoAugmentImage, self).__init__()\n self.is_normalized = is_normalized\n self.autoaug_type = autoaug_type\n if not isinstance(self.is_normalized, bool):\n raise TypeError(\"{}: input type is invalid.\".format(self))\n\n def __call__(self, sample, context=None):\n \"\"\"\n Learning Data Augmentation Strategies for Object Detection, see https://arxiv.org/abs/1906.11172\n \"\"\"\n samples = sample\n batch_input = True\n if not isinstance(samples, Sequence):\n batch_input = False\n samples = [samples]\n for sample in samples:\n gt_bbox = sample['gt_bbox']\n im = sample['image']\n if not isinstance(im, np.ndarray):\n raise TypeError(\"{}: image is not a numpy array.\".format(self))\n if len(im.shape) != 3:\n raise ImageError(\"{}: image is not 3-dimensional.\".format(self))\n if len(gt_bbox) == 0:\n continue\n\n # gt_boxes : [x1, y1, x2, y2]\n # norm_gt_boxes: [y1, x1, y2, x2]\n height, width, _ = im.shape\n norm_gt_bbox = np.ones_like(gt_bbox, dtype=np.float32)\n if not self.is_normalized:\n norm_gt_bbox[:, 0] = gt_bbox[:, 1] / float(height)\n norm_gt_bbox[:, 1] = gt_bbox[:, 0] / float(width)\n norm_gt_bbox[:, 2] = gt_bbox[:, 3] / float(height)\n norm_gt_bbox[:, 3] = gt_bbox[:, 2] / float(width)\n else:\n norm_gt_bbox[:, 0] = gt_bbox[:, 1]\n norm_gt_bbox[:, 1] = gt_bbox[:, 0]\n norm_gt_bbox[:, 2] = gt_bbox[:, 3]\n norm_gt_bbox[:, 3] = gt_bbox[:, 2]\n\n from .autoaugment_utils import distort_image_with_autoaugment\n im, norm_gt_bbox = distort_image_with_autoaugment(im, norm_gt_bbox,\n self.autoaug_type)\n if not self.is_normalized:\n gt_bbox[:, 0] = norm_gt_bbox[:, 1] * float(width)\n gt_bbox[:, 1] = norm_gt_bbox[:, 0] * float(height)\n gt_bbox[:, 2] = norm_gt_bbox[:, 3] * float(width)\n gt_bbox[:, 3] = norm_gt_bbox[:, 2] * float(height)\n else:\n gt_bbox[:, 0] = norm_gt_bbox[:, 1]\n gt_bbox[:, 1] = norm_gt_bbox[:, 0]\n gt_bbox[:, 2] = norm_gt_bbox[:, 3]\n gt_bbox[:, 3] = norm_gt_bbox[:, 2]\n\n sample['gt_bbox'] = gt_bbox\n sample['image'] = im\n\n sample = samples if batch_input else samples[0]\n return sample\n\n\n@register_op\nclass NormalizeImage(BaseOperator):\n def __init__(self,\n mean=[0.485, 0.456, 0.406],\n std=[1, 1, 1],\n is_scale=True,\n is_channel_first=True):\n \"\"\"\n Args:\n mean (list): the pixel mean\n std (list): the pixel variance\n \"\"\"\n super(NormalizeImage, self).__init__()\n self.mean = mean\n self.std = std\n self.is_scale = is_scale\n self.is_channel_first = is_channel_first\n if not (isinstance(self.mean, list) and isinstance(self.std, list) and\n isinstance(self.is_scale, bool)):\n raise TypeError(\"{}: input type is invalid.\".format(self))\n from functools import reduce\n if reduce(lambda x, y: x * y, self.std) == 0:\n raise ValueError('{}: std is invalid!'.format(self))\n\n def __call__(self, sample, context=None):\n \"\"\"Normalize the image.\n Operators:\n 1.(optional) Scale the image to [0,1]\n 2. Each pixel minus mean and is divided by std\n \"\"\"\n samples = sample\n batch_input = True\n if not isinstance(samples, Sequence):\n batch_input = False\n samples = [samples]\n for sample in samples:\n for k in sample.keys():\n # hard code\n if k.startswith('image'):\n im = sample[k]\n im = im.astype(np.float32, copy=False)\n if self.is_channel_first:\n mean = np.array(self.mean)[:, np.newaxis, np.newaxis]\n std = np.array(self.std)[:, np.newaxis, np.newaxis]\n else:\n mean = np.array(self.mean)[np.newaxis, np.newaxis, :]\n std = np.array(self.std)[np.newaxis, np.newaxis, :]\n if self.is_scale:\n im = im / 255.0\n im -= mean\n im /= std\n sample[k] = im\n if not batch_input:\n samples = samples[0]\n return samples\n\n\n@register_op\nclass RandomDistort(BaseOperator):\n def __init__(self,\n brightness_lower=0.5,\n brightness_upper=1.5,\n contrast_lower=0.5,\n contrast_upper=1.5,\n saturation_lower=0.5,\n saturation_upper=1.5,\n hue_lower=-18,\n hue_upper=18,\n brightness_prob=0.5,\n contrast_prob=0.5,\n saturation_prob=0.5,\n hue_prob=0.5,\n count=4,\n is_order=False):\n \"\"\"\n Args:\n brightness_lower/ brightness_upper (float): the brightness\n between brightness_lower and brightness_upper\n contrast_lower/ contrast_upper (float): the contrast between\n contrast_lower and contrast_lower\n saturation_lower/ saturation_upper (float): the saturation\n between saturation_lower and saturation_upper\n hue_lower/ hue_upper (float): the hue between\n hue_lower and hue_upper\n brightness_prob (float): the probability of changing brightness\n contrast_prob (float): the probability of changing contrast\n saturation_prob (float): the probability of changing saturation\n hue_prob (float): the probability of changing hue\n count (int): the kinds of doing distrot\n is_order (bool): whether determine the order of distortion\n \"\"\"\n super(RandomDistort, self).__init__()\n self.brightness_lower = brightness_lower\n self.brightness_upper = brightness_upper\n self.contrast_lower = contrast_lower\n self.contrast_upper = contrast_upper\n self.saturation_lower = saturation_lower\n self.saturation_upper = saturation_upper\n self.hue_lower = hue_lower\n self.hue_upper = hue_upper\n self.brightness_prob = brightness_prob\n self.contrast_prob = contrast_prob\n self.saturation_prob = saturation_prob\n self.hue_prob = hue_prob\n self.count = count\n self.is_order = is_order\n\n def random_brightness(self, img):\n brightness_delta = np.random.uniform(self.brightness_lower,\n self.brightness_upper)\n prob = np.random.uniform(0, 1)\n if prob < self.brightness_prob:\n img = ImageEnhance.Brightness(img).enhance(brightness_delta)\n return img\n\n def random_contrast(self, img):\n contrast_delta = np.random.uniform(self.contrast_lower,\n self.contrast_upper)\n prob = np.random.uniform(0, 1)\n if prob < self.contrast_prob:\n img = ImageEnhance.Contrast(img).enhance(contrast_delta)\n return img\n\n def random_saturation(self, img):\n saturation_delta = np.random.uniform(self.saturation_lower,\n self.saturation_upper)\n prob = np.random.uniform(0, 1)\n if prob < self.saturation_prob:\n img = ImageEnhance.Color(img).enhance(saturation_delta)\n return img\n\n def random_hue(self, img):\n hue_delta = np.random.uniform(self.hue_lower, self.hue_upper)\n prob = np.random.uniform(0, 1)\n if prob < self.hue_prob:\n img = np.array(img.convert('HSV'))\n img[:, :, 0] = img[:, :, 0] + hue_delta\n img = Image.fromarray(img, mode='HSV').convert('RGB')\n return img\n\n def __call__(self, sample, context):\n \"\"\"random distort the image\"\"\"\n ops = [\n self.random_brightness, self.random_contrast,\n self.random_saturation, self.random_hue\n ]\n if self.is_order:\n prob = np.random.uniform(0, 1)\n if prob < 0.5:\n ops = [\n self.random_brightness,\n self.random_saturation,\n self.random_hue,\n self.random_contrast,\n ]\n else:\n ops = random.sample(ops, self.count)\n assert 'image' in sample, \"image data not found\"\n im = sample['image']\n im = Image.fromarray(im)\n for id in range(self.count):\n im = ops[id](im)\n im = np.asarray(im)\n sample['image'] = im\n return sample\n\n\n@register_op\nclass ExpandImage(BaseOperator):\n def __init__(self, max_ratio, prob, mean=[127.5, 127.5, 127.5]):\n \"\"\"\n Args:\n max_ratio (float): the ratio of expanding\n prob (float): the probability of expanding image\n mean (list): the pixel mean\n \"\"\"\n super(ExpandImage, self).__init__()\n self.max_ratio = max_ratio\n self.mean = mean\n self.prob = prob\n\n def __call__(self, sample, context):\n \"\"\"\n Expand the image and modify bounding box.\n Operators:\n 1. Scale the image width and height.\n 2. Construct new images with new height and width.\n 3. Fill the new image with the mean.\n 4. Put original imge into new image.\n 5. Rescale the bounding box.\n 6. Determine if the new bbox is satisfied in the new image.\n Returns:\n sample: the image, bounding box are replaced.\n \"\"\"\n\n prob = np.random.uniform(0, 1)\n assert 'image' in sample, 'not found image data'\n im = sample['image']\n gt_bbox = sample['gt_bbox']\n gt_class = sample['gt_class']\n im_width = sample['w']\n im_height = sample['h']\n if prob < self.prob:\n if self.max_ratio - 1 >= 0.01:\n expand_ratio = np.random.uniform(1, self.max_ratio)\n height = int(im_height * expand_ratio)\n width = int(im_width * expand_ratio)\n h_off = math.floor(np.random.uniform(0, height - im_height))\n w_off = math.floor(np.random.uniform(0, width - im_width))\n expand_bbox = [\n -w_off / im_width, -h_off / im_height,\n (width - w_off) / im_width, (height - h_off) / im_height\n ]\n expand_im = np.ones((height, width, 3))\n expand_im = np.uint8(expand_im * np.squeeze(self.mean))\n expand_im = Image.fromarray(expand_im)\n im = Image.fromarray(im)\n expand_im.paste(im, (int(w_off), int(h_off)))\n expand_im = np.asarray(expand_im)\n if 'gt_keypoint' in sample.keys(\n ) and 'keypoint_ignore' in sample.keys():\n keypoints = (sample['gt_keypoint'],\n sample['keypoint_ignore'])\n gt_bbox, gt_class, _, gt_keypoints = filter_and_process(\n expand_bbox, gt_bbox, gt_class, keypoints=keypoints)\n sample['gt_keypoint'] = gt_keypoints[0]\n sample['keypoint_ignore'] = gt_keypoints[1]\n else:\n gt_bbox, gt_class, _ = filter_and_process(expand_bbox,\n gt_bbox, gt_class)\n sample['image'] = expand_im\n sample['gt_bbox'] = gt_bbox\n sample['gt_class'] = gt_class\n sample['w'] = width\n sample['h'] = height\n\n return sample\n\n\n@register_op\nclass CropImage(BaseOperator):\n def __init__(self, batch_sampler, satisfy_all=False, avoid_no_bbox=True):\n \"\"\"\n Args:\n batch_sampler (list): Multiple sets of different\n parameters for cropping.\n satisfy_all (bool): whether all boxes must satisfy.\n e.g.[[1, 1, 1.0, 1.0, 1.0, 1.0, 0.0, 1.0],\n [1, 50, 0.3, 1.0, 0.5, 2.0, 0.1, 1.0],\n [1, 50, 0.3, 1.0, 0.5, 2.0, 0.3, 1.0],\n [1, 50, 0.3, 1.0, 0.5, 2.0, 0.5, 1.0],\n [1, 50, 0.3, 1.0, 0.5, 2.0, 0.7, 1.0],\n [1, 50, 0.3, 1.0, 0.5, 2.0, 0.9, 1.0],\n [1, 50, 0.3, 1.0, 0.5, 2.0, 0.0, 1.0]]\n [max sample, max trial, min scale, max scale,\n min aspect ratio, max aspect ratio,\n min overlap, max overlap]\n avoid_no_bbox (bool): whether to to avoid the\n situation where the box does not appear.\n \"\"\"\n super(CropImage, self).__init__()\n self.batch_sampler = batch_sampler\n self.satisfy_all = satisfy_all\n self.avoid_no_bbox = avoid_no_bbox\n\n def __call__(self, sample, context):\n \"\"\"\n Crop the image and modify bounding box.\n Operators:\n 1. Scale the image width and height.\n 2. Crop the image according to a radom sample.\n 3. Rescale the bounding box.\n 4. Determine if the new bbox is satisfied in the new image.\n Returns:\n sample: the image, bounding box are replaced.\n \"\"\"\n assert 'image' in sample, \"image data not found\"\n im = sample['image']\n gt_bbox = sample['gt_bbox']\n gt_class = sample['gt_class']\n im_width = sample['w']\n im_height = sample['h']\n gt_score = None\n if 'gt_score' in sample:\n gt_score = sample['gt_score']\n sampled_bbox = []\n gt_bbox = gt_bbox.tolist()\n for sampler in self.batch_sampler:\n found = 0\n for i in range(sampler[1]):\n if found >= sampler[0]:\n break\n sample_bbox = generate_sample_bbox(sampler)\n if satisfy_sample_constraint(sampler, sample_bbox, gt_bbox,\n self.satisfy_all):\n sampled_bbox.append(sample_bbox)\n found = found + 1\n im = np.array(im)\n while sampled_bbox:\n idx = int(np.random.uniform(0, len(sampled_bbox)))\n sample_bbox = sampled_bbox.pop(idx)\n sample_bbox = clip_bbox(sample_bbox)\n crop_bbox, crop_class, crop_score = \\\n filter_and_process(sample_bbox, gt_bbox, gt_class, scores=gt_score)\n if self.avoid_no_bbox:\n if len(crop_bbox) < 1:\n continue\n xmin = int(sample_bbox[0] * im_width)\n xmax = int(sample_bbox[2] * im_width)\n ymin = int(sample_bbox[1] * im_height)\n ymax = int(sample_bbox[3] * im_height)\n im = im[ymin:ymax, xmin:xmax]\n sample['image'] = im\n sample['gt_bbox'] = crop_bbox\n sample['gt_class'] = crop_class\n sample['gt_score'] = crop_score\n return sample\n return sample\n\n\n@register_op\nclass CropImageWithDataAchorSampling(BaseOperator):\n def __init__(self,\n batch_sampler,\n anchor_sampler=None,\n target_size=None,\n das_anchor_scales=[16, 32, 64, 128],\n sampling_prob=0.5,\n min_size=8.,\n avoid_no_bbox=True):\n \"\"\"\n Args:\n anchor_sampler (list): anchor_sampling sets of different\n parameters for cropping.\n batch_sampler (list): Multiple sets of different\n parameters for cropping.\n e.g.[[1, 10, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.2, 0.0]]\n [[1, 50, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0],\n [1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0],\n [1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0],\n [1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0],\n [1, 50, 0.3, 1.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0]]\n [max sample, max trial, min scale, max scale,\n min aspect ratio, max aspect ratio,\n min overlap, max overlap, min coverage, max coverage]\n target_size (bool): target image size.\n das_anchor_scales (list[float]): a list of anchor scales in data\n anchor smapling.\n min_size (float): minimum size of sampled bbox.\n avoid_no_bbox (bool): whether to to avoid the\n situation where the box does not appear.\n \"\"\"\n super(CropImageWithDataAchorSampling, self).__init__()\n self.anchor_sampler = anchor_sampler\n self.batch_sampler = batch_sampler\n self.target_size = target_size\n self.sampling_prob = sampling_prob\n self.min_size = min_size\n self.avoid_no_bbox = avoid_no_bbox\n self.das_anchor_scales = np.array(das_anchor_scales)\n\n def __call__(self, sample, context):\n \"\"\"\n Crop the image and modify bounding box.\n Operators:\n 1. Scale the image width and height.\n 2. Crop the image according to a radom sample.\n 3. Rescale the bounding box.\n 4. Determine if the new bbox is satisfied in the new image.\n Returns:\n sample: the image, bounding box are replaced.\n \"\"\"\n assert 'image' in sample, \"image data not found\"\n im = sample['image']\n gt_bbox = sample['gt_bbox']\n gt_class = sample['gt_class']\n image_width = sample['w']\n image_height = sample['h']\n gt_score = None\n if 'gt_score' in sample:\n gt_score = sample['gt_score']\n sampled_bbox = []\n gt_bbox = gt_bbox.tolist()\n\n prob = np.random.uniform(0., 1.)\n if prob > self.sampling_prob: # anchor sampling\n assert self.anchor_sampler\n for sampler in self.anchor_sampler:\n found = 0\n for i in range(sampler[1]):\n if found >= sampler[0]:\n break\n sample_bbox = data_anchor_sampling(\n gt_bbox, image_width, image_height,\n self.das_anchor_scales, self.target_size)\n if sample_bbox == 0:\n break\n if satisfy_sample_constraint_coverage(sampler, sample_bbox,\n gt_bbox):\n sampled_bbox.append(sample_bbox)\n found = found + 1\n im = np.array(im)\n while sampled_bbox:\n idx = int(np.random.uniform(0, len(sampled_bbox)))\n sample_bbox = sampled_bbox.pop(idx)\n\n if 'gt_keypoint' in sample.keys():\n keypoints = (sample['gt_keypoint'],\n sample['keypoint_ignore'])\n crop_bbox, crop_class, crop_score, gt_keypoints = \\\n filter_and_process(sample_bbox, gt_bbox, gt_class,\n scores=gt_score,\n keypoints=keypoints)\n else:\n crop_bbox, crop_class, crop_score = filter_and_process(\n sample_bbox, gt_bbox, gt_class, scores=gt_score)\n crop_bbox, crop_class, crop_score = bbox_area_sampling(\n crop_bbox, crop_class, crop_score, self.target_size,\n self.min_size)\n\n if self.avoid_no_bbox:\n if len(crop_bbox) < 1:\n continue\n im = crop_image_sampling(im, sample_bbox, image_width,\n image_height, self.target_size)\n sample['image'] = im\n sample['gt_bbox'] = crop_bbox\n sample['gt_class'] = crop_class\n sample['gt_score'] = crop_score\n if 'gt_keypoint' in sample.keys():\n sample['gt_keypoint'] = gt_keypoints[0]\n sample['keypoint_ignore'] = gt_keypoints[1]\n return sample\n return sample\n\n else:\n for sampler in self.batch_sampler:\n found = 0\n for i in range(sampler[1]):\n if found >= sampler[0]:\n break\n sample_bbox = generate_sample_bbox_square(\n sampler, image_width, image_height)\n if satisfy_sample_constraint_coverage(sampler, sample_bbox,\n gt_bbox):\n sampled_bbox.append(sample_bbox)\n found = found + 1\n im = np.array(im)\n while sampled_bbox:\n idx = int(np.random.uniform(0, len(sampled_bbox)))\n sample_bbox = sampled_bbox.pop(idx)\n sample_bbox = clip_bbox(sample_bbox)\n\n if 'gt_keypoint' in sample.keys():\n keypoints = (sample['gt_keypoint'],\n sample['keypoint_ignore'])\n crop_bbox, crop_class, crop_score, gt_keypoints = \\\n filter_and_process(sample_bbox, gt_bbox, gt_class,\n scores=gt_score,\n keypoints=keypoints)\n else:\n crop_bbox, crop_class, crop_score = filter_and_process(\n sample_bbox, gt_bbox, gt_class, scores=gt_score)\n # sampling bbox according the bbox area\n crop_bbox, crop_class, crop_score = bbox_area_sampling(\n crop_bbox, crop_class, crop_score, self.target_size,\n self.min_size)\n\n if self.avoid_no_bbox:\n if len(crop_bbox) < 1:\n continue\n xmin = int(sample_bbox[0] * image_width)\n xmax = int(sample_bbox[2] * image_width)\n ymin = int(sample_bbox[1] * image_height)\n ymax = int(sample_bbox[3] * image_height)\n im = im[ymin:ymax, xmin:xmax]\n sample['image'] = im\n sample['gt_bbox'] = crop_bbox\n sample['gt_class'] = crop_class\n sample['gt_score'] = crop_score\n if 'gt_keypoint' in sample.keys():\n sample['gt_keypoint'] = gt_keypoints[0]\n sample['keypoint_ignore'] = gt_keypoints[1]\n return sample\n return sample\n\n\n@register_op\nclass NormalizeBox(BaseOperator):\n \"\"\"Transform the bounding box's coornidates to [0,1].\"\"\"\n\n def __init__(self):\n super(NormalizeBox, self).__init__()\n\n def __call__(self, sample, context):\n gt_bbox = sample['gt_bbox']\n width = sample['w']\n height = sample['h']\n for i in range(gt_bbox.shape[0]):\n gt_bbox[i][0] = gt_bbox[i][0] / width\n gt_bbox[i][1] = gt_bbox[i][1] / height\n gt_bbox[i][2] = gt_bbox[i][2] / width\n gt_bbox[i][3] = gt_bbox[i][3] / height\n sample['gt_bbox'] = gt_bbox\n\n if 'gt_keypoint' in sample.keys():\n gt_keypoint = sample['gt_keypoint']\n\n for i in range(gt_keypoint.shape[1]):\n if i % 2:\n gt_keypoint[:, i] = gt_keypoint[:, i] / height\n else:\n gt_keypoint[:, i] = gt_keypoint[:, i] / width\n sample['gt_keypoint'] = gt_keypoint\n\n return sample\n\n\n@register_op\nclass Permute(BaseOperator):\n def __init__(self, to_bgr=True, channel_first=True):\n \"\"\"\n Change the channel.\n Args:\n to_bgr (bool): confirm whether to convert RGB to BGR\n channel_first (bool): confirm whether to change channel\n \"\"\"\n super(Permute, self).__init__()\n self.to_bgr = to_bgr\n self.channel_first = channel_first\n if not (isinstance(self.to_bgr, bool) and\n isinstance(self.channel_first, bool)):\n raise TypeError(\"{}: input type is invalid.\".format(self))\n\n def __call__(self, sample, context=None):\n samples = sample\n batch_input = True\n if not isinstance(samples, Sequence):\n batch_input = False\n samples = [samples]\n for sample in samples:\n assert 'image' in sample, \"image data not found\"\n for k in sample.keys():\n # hard code\n if k.startswith('image'):\n im = sample[k]\n if self.channel_first:\n im = np.swapaxes(im, 1, 2)\n im = np.swapaxes(im, 1, 0)\n if self.to_bgr:\n im = im[[2, 1, 0], :, :]\n sample[k] = im\n if not batch_input:\n samples = samples[0]\n return samples\n\n\n@register_op\nclass MixupImage(BaseOperator):\n def __init__(self, alpha=1.5, beta=1.5):\n \"\"\" Mixup image and gt_bbbox/gt_score\n Args:\n alpha (float): alpha parameter of beta distribute\n beta (float): beta parameter of beta distribute\n \"\"\"\n super(MixupImage, self).__init__()\n self.alpha = alpha\n self.beta = beta\n if self.alpha <= 0.0:\n raise ValueError(\"alpha shold be positive in {}\".format(self))\n if self.beta <= 0.0:\n raise ValueError(\"beta shold be positive in {}\".format(self))\n\n def _mixup_img(self, img1, img2, factor):\n h = max(img1.shape[0], img2.shape[0])\n w = max(img1.shape[1], img2.shape[1])\n img = np.zeros((h, w, img1.shape[2]), 'float32')\n img[:img1.shape[0], :img1.shape[1], :] = \\\n img1.astype('float32') * factor\n img[:img2.shape[0], :img2.shape[1], :] += \\\n img2.astype('float32') * (1.0 - factor)\n return img.astype('uint8')\n\n def __call__(self, sample, context=None):\n if 'mixup' not in sample:\n return sample\n factor = np.random.beta(self.alpha, self.beta)\n factor = max(0.0, min(1.0, factor))\n if factor >= 1.0:\n sample.pop('mixup')\n return sample\n if factor <= 0.0:\n return sample['mixup']\n im = self._mixup_img(sample['image'], sample['mixup']['image'], factor)\n gt_bbox1 = sample['gt_bbox'].reshape((-1, 4))\n gt_bbox2 = sample['mixup']['gt_bbox'].reshape((-1, 4))\n gt_bbox = np.concatenate((gt_bbox1, gt_bbox2), axis=0)\n gt_class1 = sample['gt_class']\n gt_class2 = sample['mixup']['gt_class']\n gt_class = np.concatenate((gt_class1, gt_class2), axis=0)\n\n gt_score1 = sample['gt_score']\n gt_score2 = sample['mixup']['gt_score']\n gt_score = np.concatenate(\n (gt_score1 * factor, gt_score2 * (1. - factor)), axis=0)\n\n is_crowd1 = sample['is_crowd']\n is_crowd2 = sample['mixup']['is_crowd']\n is_crowd = np.concatenate((is_crowd1, is_crowd2), axis=0)\n\n sample['image'] = im\n sample['gt_bbox'] = gt_bbox\n sample['gt_score'] = gt_score\n sample['gt_class'] = gt_class\n sample['is_crowd'] = is_crowd\n sample['h'] = im.shape[0]\n sample['w'] = im.shape[1]\n sample.pop('mixup')\n return sample\n\n\n@register_op\nclass CutmixImage(BaseOperator):\n def __init__(self, alpha=1.5, beta=1.5):\n \"\"\" \n CutMix: Regularization Strategy to Train Strong Classifiers with Localizable Features, see https://https://arxiv.org/abs/1905.04899\n Cutmix image and gt_bbbox/gt_score\n Args:\n alpha (float): alpha parameter of beta distribute\n beta (float): beta parameter of beta distribute\n \"\"\"\n super(CutmixImage, self).__init__()\n self.alpha = alpha\n self.beta = beta\n if self.alpha <= 0.0:\n raise ValueError(\"alpha shold be positive in {}\".format(self))\n if self.beta <= 0.0:\n raise ValueError(\"beta shold be positive in {}\".format(self))\n\n def _rand_bbox(self, img1, img2, factor):\n \"\"\" _rand_bbox \"\"\"\n h = max(img1.shape[0], img2.shape[0])\n w = max(img1.shape[1], img2.shape[1])\n cut_rat = np.sqrt(1. - factor)\n\n cut_w = np.int(w * cut_rat)\n cut_h = np.int(h * cut_rat)\n\n # uniform\n cx = np.random.randint(w)\n cy = np.random.randint(h)\n\n bbx1 = np.clip(cx - cut_w // 2, 0, w)\n bby1 = np.clip(cy - cut_h // 2, 0, h)\n bbx2 = np.clip(cx + cut_w // 2, 0, w)\n bby2 = np.clip(cy + cut_h // 2, 0, h)\n\n img_1 = np.zeros((h, w, img1.shape[2]), 'float32')\n img_1[:img1.shape[0], :img1.shape[1], :] = \\\n img1.astype('float32')\n img_2 = np.zeros((h, w, img2.shape[2]), 'float32')\n img_2[:img2.shape[0], :img2.shape[1], :] = \\\n img2.astype('float32')\n img_1[bby1:bby2, bbx1:bbx2, :] = img2[bby1:bby2, bbx1:bbx2, :]\n return img_1\n\n def __call__(self, sample, context=None):\n if 'cutmix' not in sample:\n return sample\n factor = np.random.beta(self.alpha, self.beta)\n factor = max(0.0, min(1.0, factor))\n if factor >= 1.0:\n sample.pop('cutmix')\n return sample\n if factor <= 0.0:\n return sample['cutmix']\n img1 = sample['image']\n img2 = sample['cutmix']['image']\n img = self._rand_bbox(img1, img2, factor)\n gt_bbox1 = sample['gt_bbox']\n gt_bbox2 = sample['cutmix']['gt_bbox']\n gt_bbox = np.concatenate((gt_bbox1, gt_bbox2), axis=0)\n gt_class1 = sample['gt_class']\n gt_class2 = sample['cutmix']['gt_class']\n gt_class = np.concatenate((gt_class1, gt_class2), axis=0)\n gt_score1 = sample['gt_score']\n gt_score2 = sample['cutmix']['gt_score']\n gt_score = np.concatenate(\n (gt_score1 * factor, gt_score2 * (1. - factor)), axis=0)\n sample['image'] = img\n sample['gt_bbox'] = gt_bbox\n sample['gt_score'] = gt_score\n sample['gt_class'] = gt_class\n sample['h'] = img.shape[0]\n sample['w'] = img.shape[1]\n sample.pop('cutmix')\n return sample\n\n\n@register_op\nclass RandomInterpImage(BaseOperator):\n def __init__(self, target_size=0, max_size=0):\n \"\"\"\n Random reisze image by multiply interpolate method.\n Args:\n target_size (int): the taregt size of image's short side\n max_size (int): the max size of image\n \"\"\"\n super(RandomInterpImage, self).__init__()\n self.target_size = target_size\n self.max_size = max_size\n if not (isinstance(self.target_size, int) and\n isinstance(self.max_size, int)):\n raise TypeError('{}: input type is invalid.'.format(self))\n interps = [\n cv2.INTER_NEAREST,\n cv2.INTER_LINEAR,\n cv2.INTER_AREA,\n cv2.INTER_CUBIC,\n cv2.INTER_LANCZOS4,\n ]\n self.resizers = []\n for interp in interps:\n self.resizers.append(ResizeImage(target_size, max_size, interp))\n\n def __call__(self, sample, context=None):\n \"\"\"Resise the image numpy by random resizer.\"\"\"\n resizer = random.choice(self.resizers)\n return resizer(sample, context)\n\n\n@register_op\nclass Resize(BaseOperator):\n \"\"\"Resize image and bbox.\n Args:\n target_dim (int or list): target size, can be a single number or a list\n (for random shape).\n interp (int or str): interpolation method, can be an integer or\n 'random' (for randomized interpolation).\n default to `cv2.INTER_LINEAR`.\n \"\"\"\n\n def __init__(self, target_dim=[], interp=cv2.INTER_LINEAR):\n super(Resize, self).__init__()\n self.target_dim = target_dim\n self.interp = interp # 'random' for yolov3\n\n def __call__(self, sample, context=None):\n w = sample['w']\n h = sample['h']\n\n interp = self.interp\n if interp == 'random':\n interp = np.random.choice(range(5))\n\n if isinstance(self.target_dim, Sequence):\n dim = np.random.choice(self.target_dim)\n else:\n dim = self.target_dim\n resize_w = resize_h = dim\n scale_x = dim / w\n scale_y = dim / h\n if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:\n scale_array = np.array([scale_x, scale_y] * 2, dtype=np.float32)\n sample['gt_bbox'] = np.clip(sample['gt_bbox'] * scale_array, 0,\n dim - 1)\n sample['scale_factor'] = [scale_x, scale_y] * 2\n sample['h'] = resize_h\n sample['w'] = resize_w\n\n sample['image'] = cv2.resize(\n sample['image'], (resize_w, resize_h), interpolation=interp)\n return sample\n\n\n@register_op\nclass ColorDistort(BaseOperator):\n \"\"\"Random color distortion.\n Args:\n hue (list): hue settings.\n in [lower, upper, probability] format.\n saturation (list): saturation settings.\n in [lower, upper, probability] format.\n contrast (list): contrast settings.\n in [lower, upper, probability] format.\n brightness (list): brightness settings.\n in [lower, upper, probability] format.\n random_apply (bool): whether to apply in random (yolo) or fixed (SSD)\n order.\n hsv_format (bool): whether to convert color from BGR to HSV\n random_channel (bool): whether to swap channels randomly\n \"\"\"\n\n def __init__(self,\n hue=[-18, 18, 0.5],\n saturation=[0.5, 1.5, 0.5],\n contrast=[0.5, 1.5, 0.5],\n brightness=[0.5, 1.5, 0.5],\n random_apply=True,\n hsv_format=False,\n random_channel=False):\n super(ColorDistort, self).__init__()\n self.hue = hue\n self.saturation = saturation\n self.contrast = contrast\n self.brightness = brightness\n self.random_apply = random_apply\n self.hsv_format = hsv_format\n self.random_channel = random_channel\n\n def apply_hue(self, img):\n low, high, prob = self.hue\n if np.random.uniform(0., 1.) < prob:\n return img\n\n img = img.astype(np.float32)\n if self.hsv_format:\n img[..., 0] += random.uniform(low, high)\n img[..., 0][img[..., 0] > 360] -= 360\n img[..., 0][img[..., 0] < 0] += 360\n return img\n\n # XXX works, but result differ from HSV version\n delta = np.random.uniform(low, high)\n u = np.cos(delta * np.pi)\n w = np.sin(delta * np.pi)\n bt = np.array([[1.0, 0.0, 0.0], [0.0, u, -w], [0.0, w, u]])\n tyiq = np.array([[0.299, 0.587, 0.114], [0.596, -0.274, -0.321],\n [0.211, -0.523, 0.311]])\n ityiq = np.array([[1.0, 0.956, 0.621], [1.0, -0.272, -0.647],\n [1.0, -1.107, 1.705]])\n t = np.dot(np.dot(ityiq, bt), tyiq).T\n img = np.dot(img, t)\n return img\n\n def apply_saturation(self, img):\n low, high, prob = self.saturation\n if np.random.uniform(0., 1.) < prob:\n return img\n delta = np.random.uniform(low, high)\n img = img.astype(np.float32)\n if self.hsv_format:\n img[..., 1] *= delta\n return img\n gray = img * np.array([[[0.299, 0.587, 0.114]]], dtype=np.float32)\n gray = gray.sum(axis=2, keepdims=True)\n gray *= (1.0 - delta)\n img *= delta\n img += gray\n return img\n\n def apply_contrast(self, img):\n low, high, prob = self.contrast\n if np.random.uniform(0., 1.) < prob:\n return img\n delta = np.random.uniform(low, high)\n\n img = img.astype(np.float32)\n img *= delta\n return img\n\n def apply_brightness(self, img):\n low, high, prob = self.brightness\n if np.random.uniform(0., 1.) < prob:\n return img\n delta = np.random.uniform(low, high)\n\n img = img.astype(np.float32)\n img += delta\n return img\n\n def __call__(self, sample, context=None):\n img = sample['image']\n if self.random_apply:\n functions = [\n self.apply_brightness,\n self.apply_contrast,\n self.apply_saturation,\n self.apply_hue,\n ]\n distortions = np.random.permutation(functions)\n for func in distortions:\n img = func(img)\n sample['image'] = img\n return sample\n\n img = self.apply_brightness(img)\n\n if np.random.randint(0, 2):\n img = self.apply_contrast(img)\n if self.hsv_format:\n img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n img = self.apply_saturation(img)\n img = self.apply_hue(img)\n if self.hsv_format:\n img = cv2.cvtColor(img, cv2.COLOR_HSV2RGB)\n else:\n if self.hsv_format:\n img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n img = self.apply_saturation(img)\n img = self.apply_hue(img)\n if self.hsv_format:\n img = cv2.cvtColor(img, cv2.COLOR_HSV2RGB)\n img = self.apply_contrast(img)\n\n if self.random_channel:\n if np.random.randint(0, 2):\n img = img[..., np.random.permutation(3)]\n sample['image'] = img\n return sample\n\n\n@register_op\nclass CornerRandColor(ColorDistort):\n \"\"\"Random color for CornerNet series models.\n Args:\n saturation (float): saturation settings.\n contrast (float): contrast settings.\n brightness (float): brightness settings.\n is_scale (bool): whether to scale the input image.\n \"\"\"\n\n def __init__(self,\n saturation=0.4,\n contrast=0.4,\n brightness=0.4,\n is_scale=True):\n super(CornerRandColor, self).__init__(\n saturation=saturation, contrast=contrast, brightness=brightness)\n self.is_scale = is_scale\n\n def apply_saturation(self, img, img_gray):\n alpha = 1. + np.random.uniform(\n low=-self.saturation, high=self.saturation)\n self._blend(alpha, img, img_gray[:, :, None])\n return img\n\n def apply_contrast(self, img, img_gray):\n alpha = 1. + np.random.uniform(low=-self.contrast, high=self.contrast)\n img_mean = img_gray.mean()\n self._blend(alpha, img, img_mean)\n return img\n\n def apply_brightness(self, img, img_gray):\n alpha = 1 + np.random.uniform(\n low=-self.brightness, high=self.brightness)\n img *= alpha\n return img\n\n def _blend(self, alpha, img, img_mean):\n img *= alpha\n img_mean *= (1 - alpha)\n img += img_mean\n\n def __call__(self, sample, context=None):\n img = sample['image']\n if self.is_scale:\n img = img.astype(np.float32, copy=False)\n img /= 255.\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n functions = [\n self.apply_brightness,\n self.apply_contrast,\n self.apply_saturation,\n ]\n distortions = np.random.permutation(functions)\n for func in distortions:\n img = func(img, img_gray)\n sample['image'] = img\n return sample\n\n\n@register_op\nclass NormalizePermute(BaseOperator):\n \"\"\"Normalize and permute channel order.\n Args:\n mean (list): mean values in RGB order.\n std (list): std values in RGB order.\n \"\"\"\n\n def __init__(self,\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.120, 57.375]):\n super(NormalizePermute, self).__init__()\n self.mean = mean\n self.std = std\n\n def __call__(self, sample, context=None):\n img = sample['image']\n img = img.astype(np.float32)\n\n img = img.transpose((2, 0, 1))\n mean = np.array(self.mean, dtype=np.float32)\n std = np.array(self.std, dtype=np.float32)\n invstd = 1. / std\n for v, m, s in zip(img, mean, invstd):\n v.__isub__(m).__imul__(s)\n sample['image'] = img\n return sample\n\n\n@register_op\nclass RandomExpand(BaseOperator):\n \"\"\"Random expand the canvas.\n Args:\n ratio (float): maximum expansion ratio.\n prob (float): probability to expand.\n fill_value (list): color value used to fill the canvas. in RGB order.\n is_mask_expand(bool): whether expand the segmentation.\n \"\"\"\n\n def __init__(self,\n ratio=4.,\n prob=0.5,\n fill_value=(127.5, ) * 3,\n is_mask_expand=False):\n super(RandomExpand, self).__init__()\n assert ratio > 1.01, \"expand ratio must be larger than 1.01\"\n self.ratio = ratio\n self.prob = prob\n assert isinstance(fill_value, (Number, Sequence)), \\\n \"fill value must be either float or sequence\"\n if isinstance(fill_value, Number):\n fill_value = (fill_value, ) * 3\n if not isinstance(fill_value, tuple):\n fill_value = tuple(fill_value)\n self.fill_value = fill_value\n self.is_mask_expand = is_mask_expand\n\n def expand_segms(self, segms, x, y, height, width, ratio):\n def _expand_poly(poly, x, y):\n expanded_poly = np.array(poly)\n expanded_poly[0::2] += x\n expanded_poly[1::2] += y\n return expanded_poly.tolist()\n\n def _expand_rle(rle, x, y, height, width, ratio):\n if 'counts' in rle and type(rle['counts']) == list:\n rle = mask_util.frPyObjects(rle, height, width)\n mask = mask_util.decode(rle)\n expanded_mask = np.full((int(height * ratio), int(width * ratio)),\n 0).astype(mask.dtype)\n expanded_mask[y:y + height, x:x + width] = mask\n rle = mask_util.encode(\n np.array(\n expanded_mask, order='F', dtype=np.uint8))\n return rle\n\n expanded_segms = []\n for segm in segms:\n if is_poly(segm):\n # Polygon format\n expanded_segms.append(\n [_expand_poly(poly, x, y) for poly in segm])\n else:\n # RLE format\n import pycocotools.mask as mask_util\n expanded_segms.append(\n _expand_rle(segm, x, y, height, width, ratio))\n return expanded_segms\n\n def __call__(self, sample, context=None):\n if np.random.uniform(0., 1.) < self.prob:\n return sample\n\n img = sample['image']\n height = int(sample['h'])\n width = int(sample['w'])\n\n expand_ratio = np.random.uniform(1., self.ratio)\n h = int(height * expand_ratio)\n w = int(width * expand_ratio)\n if not h > height or not w > width:\n return sample\n y = np.random.randint(0, h - height)\n x = np.random.randint(0, w - width)\n canvas = np.ones((h, w, 3), dtype=np.uint8)\n canvas *= np.array(self.fill_value, dtype=np.uint8)\n canvas[y:y + height, x:x + width, :] = img.astype(np.uint8)\n\n sample['h'] = h\n sample['w'] = w\n sample['image'] = canvas\n if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:\n sample['gt_bbox'] += np.array([x, y] * 2, dtype=np.float32)\n if self.is_mask_expand and 'gt_poly' in sample and len(sample[\n 'gt_poly']) > 0:\n sample['gt_poly'] = self.expand_segms(sample['gt_poly'], x, y,\n height, width, expand_ratio)\n return sample\n\n\n@register_op\nclass RandomCrop(BaseOperator):\n \"\"\"Random crop image and bboxes.\n Args:\n aspect_ratio (list): aspect ratio of cropped region.\n in [min, max] format.\n thresholds (list): iou thresholds for decide a valid bbox crop.\n scaling (list): ratio between a cropped region and the original image.\n in [min, max] format.\n num_attempts (int): number of tries before giving up.\n allow_no_crop (bool): allow return without actually cropping them.\n cover_all_box (bool): ensure all bboxes are covered in the final crop.\n is_mask_crop(bool): whether crop the segmentation.\n \"\"\"\n\n def __init__(self,\n aspect_ratio=[.5, 2.],\n thresholds=[.0, .1, .3, .5, .7, .9],\n scaling=[.3, 1.],\n num_attempts=50,\n allow_no_crop=True,\n cover_all_box=False,\n is_mask_crop=False):\n super(RandomCrop, self).__init__()\n self.aspect_ratio = aspect_ratio\n self.thresholds = thresholds\n self.scaling = scaling\n self.num_attempts = num_attempts\n self.allow_no_crop = allow_no_crop\n self.cover_all_box = cover_all_box\n self.is_mask_crop = is_mask_crop\n\n def crop_segms(self, segms, valid_ids, crop, height, width):\n def _crop_poly(segm, crop):\n xmin, ymin, xmax, ymax = crop\n crop_coord = [xmin, ymin, xmin, ymax, xmax, ymax, xmax, ymin]\n crop_p = np.array(crop_coord).reshape(4, 2)\n crop_p = Polygon(crop_p)\n\n crop_segm = list()\n for poly in segm:\n poly = np.array(poly).reshape(len(poly) // 2, 2)\n polygon = Polygon(poly)\n if not polygon.is_valid:\n exterior = polygon.exterior\n multi_lines = exterior.intersection(exterior)\n polygons = shapely.ops.polygonize(multi_lines)\n polygon = MultiPolygon(polygons)\n multi_polygon = list()\n if isinstance(polygon, MultiPolygon):\n multi_polygon = copy.deepcopy(polygon)\n else:\n multi_polygon.append(copy.deepcopy(polygon))\n for per_polygon in multi_polygon:\n inter = per_polygon.intersection(crop_p)\n if not inter:\n continue\n if isinstance(inter, (MultiPolygon, GeometryCollection)):\n for part in inter:\n if not isinstance(part, Polygon):\n continue\n part = np.squeeze(\n np.array(part.exterior.coords[:-1]).reshape(1,\n -1))\n part[0::2] -= xmin\n part[1::2] -= ymin\n crop_segm.append(part.tolist())\n elif isinstance(inter, Polygon):\n crop_poly = np.squeeze(\n np.array(inter.exterior.coords[:-1]).reshape(1, -1))\n crop_poly[0::2] -= xmin\n crop_poly[1::2] -= ymin\n crop_segm.append(crop_poly.tolist())\n else:\n continue\n return crop_segm\n\n def _crop_rle(rle, crop, height, width):\n if 'counts' in rle and type(rle['counts']) == list:\n rle = mask_util.frPyObjects(rle, height, width)\n mask = mask_util.decode(rle)\n mask = mask[crop[1]:crop[3], crop[0]:crop[2]]\n rle = mask_util.encode(np.array(mask, order='F', dtype=np.uint8))\n return rle\n\n crop_segms = []\n for id in valid_ids:\n segm = segms[id]\n if is_poly(segm):\n import copy\n import shapely.ops\n from shapely.geometry import Polygon, MultiPolygon, GeometryCollection\n logging.getLogger(\"shapely\").setLevel(logging.WARNING)\n # Polygon format\n crop_segms.append(_crop_poly(segm, crop))\n else:\n # RLE format\n import pycocotools.mask as mask_util\n crop_segms.append(_crop_rle(segm, crop, height, width))\n return crop_segms\n\n def __call__(self, sample, context=None):\n if 'gt_bbox' in sample and len(sample['gt_bbox']) == 0:\n return sample\n\n h = sample['h']\n w = sample['w']\n gt_bbox = sample['gt_bbox']\n\n # NOTE Original method attempts to generate one candidate for each\n # threshold then randomly sample one from the resulting list.\n # Here a short circuit approach is taken, i.e., randomly choose a\n # threshold and attempt to find a valid crop, and simply return the\n # first one found.\n # The probability is not exactly the same, kinda resembling the\n # \"Monty Hall\" problem. Actually carrying out the attempts will affect\n # observability (just like opening doors in the \"Monty Hall\" game).\n thresholds = list(self.thresholds)\n if self.allow_no_crop:\n thresholds.append('no_crop')\n np.random.shuffle(thresholds)\n\n for thresh in thresholds:\n if thresh == 'no_crop':\n return sample\n\n found = False\n for i in range(self.num_attempts):\n scale = np.random.uniform(*self.scaling)\n if self.aspect_ratio is not None:\n min_ar, max_ar = self.aspect_ratio\n aspect_ratio = np.random.uniform(\n max(min_ar, scale**2), min(max_ar, scale**-2))\n h_scale = scale / np.sqrt(aspect_ratio)\n w_scale = scale * np.sqrt(aspect_ratio)\n else:\n h_scale = np.random.uniform(*self.scaling)\n w_scale = np.random.uniform(*self.scaling)\n crop_h = h * h_scale\n crop_w = w * w_scale\n if self.aspect_ratio is None:\n if crop_h / crop_w < 0.5 or crop_h / crop_w > 2.0:\n continue\n\n crop_h = int(crop_h)\n crop_w = int(crop_w)\n crop_y = np.random.randint(0, h - crop_h)\n crop_x = np.random.randint(0, w - crop_w)\n crop_box = [crop_x, crop_y, crop_x + crop_w, crop_y + crop_h]\n iou = self._iou_matrix(\n gt_bbox, np.array(\n [crop_box], dtype=np.float32))\n if iou.max() < thresh:\n continue\n\n if self.cover_all_box and iou.min() < thresh:\n continue\n\n cropped_box, valid_ids = self._crop_box_with_center_constraint(\n gt_bbox, np.array(\n crop_box, dtype=np.float32))\n if valid_ids.size > 0:\n found = True\n break\n\n if found:\n if self.is_mask_crop and 'gt_poly' in sample and len(sample[\n 'gt_poly']) > 0:\n crop_polys = self.crop_segms(\n sample['gt_poly'],\n valid_ids,\n np.array(\n crop_box, dtype=np.int64),\n h,\n w)\n if [] in crop_polys:\n delete_id = list()\n valid_polys = list()\n for id, crop_poly in enumerate(crop_polys):\n if crop_poly == []:\n delete_id.append(id)\n else:\n valid_polys.append(crop_poly)\n valid_ids = np.delete(valid_ids, delete_id)\n if len(valid_polys) == 0:\n return sample\n sample['gt_poly'] = valid_polys\n else:\n sample['gt_poly'] = crop_polys\n\n if 'gt_segm' in sample:\n sample['gt_segm'] = self._crop_segm(sample['gt_segm'],\n crop_box)\n sample['gt_segm'] = np.take(\n sample['gt_segm'], valid_ids, axis=0)\n sample['image'] = self._crop_image(sample['image'], crop_box)\n sample['gt_bbox'] = np.take(cropped_box, valid_ids, axis=0)\n sample['gt_class'] = np.take(\n sample['gt_class'], valid_ids, axis=0)\n sample['w'] = crop_box[2] - crop_box[0]\n sample['h'] = crop_box[3] - crop_box[1]\n if 'gt_score' in sample:\n sample['gt_score'] = np.take(\n sample['gt_score'], valid_ids, axis=0)\n\n if 'is_crowd' in sample:\n sample['is_crowd'] = np.take(\n sample['is_crowd'], valid_ids, axis=0)\n return sample\n\n return sample\n\n def _iou_matrix(self, a, b):\n tl_i = np.maximum(a[:, np.newaxis, :2], b[:, :2])\n br_i = np.minimum(a[:, np.newaxis, 2:], b[:, 2:])\n\n area_i = np.prod(br_i - tl_i, axis=2) * (tl_i < br_i).all(axis=2)\n area_a = np.prod(a[:, 2:] - a[:, :2], axis=1)\n area_b = np.prod(b[:, 2:] - b[:, :2], axis=1)\n area_o = (area_a[:, np.newaxis] + area_b - area_i)\n return area_i / (area_o + 1e-10)\n\n def _crop_box_with_center_constraint(self, box, crop):\n cropped_box = box.copy()\n\n cropped_box[:, :2] = np.maximum(box[:, :2], crop[:2])\n cropped_box[:, 2:] = np.minimum(box[:, 2:], crop[2:])\n cropped_box[:, :2] -= crop[:2]\n cropped_box[:, 2:] -= crop[:2]\n\n centers = (box[:, :2] + box[:, 2:]) / 2\n valid = np.logical_and(crop[:2] <= centers,\n centers < crop[2:]).all(axis=1)\n valid = np.logical_and(\n valid, (cropped_box[:, :2] < cropped_box[:, 2:]).all(axis=1))\n\n return cropped_box, np.where(valid)[0]\n\n def _crop_image(self, img, crop):\n x1, y1, x2, y2 = crop\n return img[y1:y2, x1:x2, :]\n\n def _crop_segm(self, segm, crop):\n x1, y1, x2, y2 = crop\n return segm[:, y1:y2, x1:x2]\n\n\n@register_op\nclass PadBox(BaseOperator):\n def __init__(self, num_max_boxes=50):\n \"\"\"\n Pad zeros to bboxes if number of bboxes is less than num_max_boxes.\n Args:\n num_max_boxes (int): the max number of bboxes\n \"\"\"\n self.num_max_boxes = num_max_boxes\n super(PadBox, self).__init__()\n\n def __call__(self, sample, context=None):\n assert 'gt_bbox' in sample\n bbox = sample['gt_bbox']\n gt_num = min(self.num_max_boxes, len(bbox))\n num_max = self.num_max_boxes\n fields = context['fields'] if context else []\n pad_bbox = np.zeros((num_max, 4), dtype=np.float32)\n if gt_num > 0:\n pad_bbox[:gt_num, :] = bbox[:gt_num, :]\n sample['gt_bbox'] = pad_bbox\n if 'gt_class' in fields:\n pad_class = np.zeros((num_max), dtype=np.int32)\n if gt_num > 0:\n pad_class[:gt_num] = sample['gt_class'][:gt_num, 0]\n sample['gt_class'] = pad_class\n if 'gt_score' in fields:\n pad_score = np.zeros((num_max), dtype=np.float32)\n if gt_num > 0:\n pad_score[:gt_num] = sample['gt_score'][:gt_num, 0]\n sample['gt_score'] = pad_score\n # in training, for example in op ExpandImage,\n # the bbox and gt_class is expandded, but the difficult is not,\n # so, judging by it's length\n if 'is_difficult' in fields:\n pad_diff = np.zeros((num_max), dtype=np.int32)\n if gt_num > 0:\n pad_diff[:gt_num] = sample['difficult'][:gt_num, 0]\n sample['difficult'] = pad_diff\n return sample\n\n\n@register_op\nclass BboxXYXY2XYWH(BaseOperator):\n \"\"\"\n Convert bbox XYXY format to XYWH format.\n \"\"\"\n\n def __init__(self):\n super(BboxXYXY2XYWH, self).__init__()\n\n def __call__(self, sample, context=None):\n assert 'gt_bbox' in sample\n bbox = sample['gt_bbox']\n bbox[:, 2:4] = bbox[:, 2:4] - bbox[:, :2]\n bbox[:, :2] = bbox[:, :2] + bbox[:, 2:4] / 2.\n sample['gt_bbox'] = bbox\n return sample\n\n\n@register_op\nclass Lighting(BaseOperator):\n \"\"\"\n Lighting the imagen by eigenvalues and eigenvectors\n Args:\n eigval (list): eigenvalues\n eigvec (list): eigenvectors\n alphastd (float): random weight of lighting, 0.1 by default\n \"\"\"\n\n def __init__(self, eigval, eigvec, alphastd=0.1):\n super(Lighting, self).__init__()\n self.alphastd = alphastd\n self.eigval = np.array(eigval).astype('float32')\n self.eigvec = np.array(eigvec).astype('float32')\n\n def __call__(self, sample, context=None):\n alpha = np.random.normal(scale=self.alphastd, size=(3, ))\n sample['image'] += np.dot(self.eigvec, self.eigval * alpha)\n return sample\n\n\n@register_op\nclass CornerTarget(BaseOperator):\n \"\"\"\n Generate targets for CornerNet by ground truth data. \n Args:\n output_size (int): the size of output heatmaps.\n num_classes (int): num of classes.\n gaussian_bump (bool): whether to apply gaussian bump on gt targets.\n True by default.\n gaussian_rad (int): radius of gaussian bump. If it is set to -1, the \n radius will be calculated by iou. -1 by default.\n gaussian_iou (float): the threshold iou of predicted bbox to gt bbox. \n If the iou is larger than threshold, the predicted bboox seems as\n positive sample. 0.3 by default\n max_tag_len (int): max num of gt box per image.\n \"\"\"\n\n def __init__(self,\n output_size,\n num_classes,\n gaussian_bump=True,\n gaussian_rad=-1,\n gaussian_iou=0.3,\n max_tag_len=128):\n super(CornerTarget, self).__init__()\n self.num_classes = num_classes\n self.output_size = output_size\n self.gaussian_bump = gaussian_bump\n self.gaussian_rad = gaussian_rad\n self.gaussian_iou = gaussian_iou\n self.max_tag_len = max_tag_len\n\n def __call__(self, sample, context=None):\n tl_heatmaps = np.zeros(\n (self.num_classes, self.output_size[0], self.output_size[1]),\n dtype=np.float32)\n br_heatmaps = np.zeros(\n (self.num_classes, self.output_size[0], self.output_size[1]),\n dtype=np.float32)\n\n tl_regrs = np.zeros((self.max_tag_len, 2), dtype=np.float32)\n br_regrs = np.zeros((self.max_tag_len, 2), dtype=np.float32)\n tl_tags = np.zeros((self.max_tag_len), dtype=np.int64)\n br_tags = np.zeros((self.max_tag_len), dtype=np.int64)\n tag_masks = np.zeros((self.max_tag_len), dtype=np.uint8)\n tag_lens = np.zeros((), dtype=np.int32)\n tag_nums = np.zeros((1), dtype=np.int32)\n\n gt_bbox = sample['gt_bbox']\n gt_class = sample['gt_class']\n keep_inds = ((gt_bbox[:, 2] - gt_bbox[:, 0]) > 0) & \\\n ((gt_bbox[:, 3] - gt_bbox[:, 1]) > 0)\n gt_bbox = gt_bbox[keep_inds]\n gt_class = gt_class[keep_inds]\n sample['gt_bbox'] = gt_bbox\n sample['gt_class'] = gt_class\n width_ratio = self.output_size[1] / sample['w']\n height_ratio = self.output_size[0] / sample['h']\n for i in range(gt_bbox.shape[0]):\n width = gt_bbox[i][2] - gt_bbox[i][0]\n height = gt_bbox[i][3] - gt_bbox[i][1]\n\n xtl, ytl = gt_bbox[i][0], gt_bbox[i][1]\n xbr, ybr = gt_bbox[i][2], gt_bbox[i][3]\n\n fxtl = (xtl * width_ratio)\n fytl = (ytl * height_ratio)\n fxbr = (xbr * width_ratio)\n fybr = (ybr * height_ratio)\n\n xtl = int(fxtl)\n ytl = int(fytl)\n xbr = int(fxbr)\n ybr = int(fybr)\n if self.gaussian_bump:\n width = math.ceil(width * width_ratio)\n height = math.ceil(height * height_ratio)\n if self.gaussian_rad == -1:\n radius = gaussian_radius((height, width), self.gaussian_iou)\n radius = max(0, int(radius))\n else:\n radius = self.gaussian_rad\n draw_gaussian(tl_heatmaps[gt_class[i][0]], [xtl, ytl], radius)\n draw_gaussian(br_heatmaps[gt_class[i][0]], [xbr, ybr], radius)\n else:\n tl_heatmaps[gt_class[i][0], ytl, xtl] = 1\n br_heatmaps[gt_class[i][0], ybr, xbr] = 1\n\n tl_regrs[i, :] = [fxtl - xtl, fytl - ytl]\n br_regrs[i, :] = [fxbr - xbr, fybr - ybr]\n tl_tags[tag_lens] = ytl * self.output_size[1] + xtl\n br_tags[tag_lens] = ybr * self.output_size[1] + xbr\n tag_lens += 1\n\n tag_masks[:tag_lens] = 1\n\n sample['tl_heatmaps'] = tl_heatmaps\n sample['br_heatmaps'] = br_heatmaps\n sample['tl_regrs'] = tl_regrs\n sample['br_regrs'] = br_regrs\n sample['tl_tags'] = tl_tags\n sample['br_tags'] = br_tags\n sample['tag_masks'] = tag_masks\n\n return sample\n\n\n@register_op\nclass CornerCrop(BaseOperator):\n \"\"\"\n Random crop for CornerNet\n Args:\n random_scales (list): scales of output_size to input_size.\n border (int): border of corp center\n is_train (bool): train or test\n input_size (int): size of input image\n \"\"\"\n\n def __init__(self,\n random_scales=[0.6, 0.7, 0.8, 0.9, 1., 1.1, 1.2, 1.3],\n border=128,\n is_train=True,\n input_size=511):\n super(CornerCrop, self).__init__()\n self.random_scales = random_scales\n self.border = border\n self.is_train = is_train\n self.input_size = input_size\n\n def __call__(self, sample, context=None):\n im_h, im_w = int(sample['h']), int(sample['w'])\n if self.is_train:\n scale = np.random.choice(self.random_scales)\n height = int(self.input_size * scale)\n width = int(self.input_size * scale)\n\n w_border = self._get_border(self.border, im_w)\n h_border = self._get_border(self.border, im_h)\n\n ctx = np.random.randint(low=w_border, high=im_w - w_border)\n cty = np.random.randint(low=h_border, high=im_h - h_border)\n\n else:\n cty, ctx = im_h // 2, im_w // 2\n height = im_h | 127\n width = im_w | 127\n\n cropped_image = np.zeros(\n (height, width, 3), dtype=sample['image'].dtype)\n\n x0, x1 = max(ctx - width // 2, 0), min(ctx + width // 2, im_w)\n y0, y1 = max(cty - height // 2, 0), min(cty + height // 2, im_h)\n\n left_w, right_w = ctx - x0, x1 - ctx\n top_h, bottom_h = cty - y0, y1 - cty\n\n # crop image\n cropped_ctx, cropped_cty = width // 2, height // 2\n x_slice = slice(int(cropped_ctx - left_w), int(cropped_ctx + right_w))\n y_slice = slice(int(cropped_cty - top_h), int(cropped_cty + bottom_h))\n cropped_image[y_slice, x_slice, :] = sample['image'][y0:y1, x0:x1, :]\n\n sample['image'] = cropped_image\n sample['h'], sample['w'] = height, width\n\n if self.is_train:\n # crop detections\n gt_bbox = sample['gt_bbox']\n gt_bbox[:, 0:4:2] -= x0\n gt_bbox[:, 1:4:2] -= y0\n gt_bbox[:, 0:4:2] += cropped_ctx - left_w\n gt_bbox[:, 1:4:2] += cropped_cty - top_h\n else:\n sample['borders'] = np.array(\n [\n cropped_cty - top_h, cropped_cty + bottom_h,\n cropped_ctx - left_w, cropped_ctx + right_w\n ],\n dtype=np.float32)\n\n return sample\n\n def _get_border(self, border, size):\n i = 1\n while size - border // i <= border // i:\n i *= 2\n return border // i\n\n\n@register_op\nclass CornerRatio(BaseOperator):\n \"\"\"\n Ratio of output size to image size\n Args:\n input_size (int): the size of input size\n output_size (int): the size of heatmap\n \"\"\"\n\n def __init__(self, input_size=511, output_size=64):\n super(CornerRatio, self).__init__()\n self.input_size = input_size\n self.output_size = output_size\n\n def __call__(self, sample, context=None):\n scale = (self.input_size + 1) // self.output_size\n out_height, out_width = (sample['h'] + 1) // scale, (\n sample['w'] + 1) // scale\n height_ratio = out_height / float(sample['h'])\n width_ratio = out_width / float(sample['w'])\n sample['ratios'] = np.array([height_ratio, width_ratio])\n\n return sample\n\n\n@register_op\nclass RandomScaledCrop(BaseOperator):\n \"\"\"Resize image and bbox based on long side (with optional random scaling),\n then crop or pad image to target size.\n Args:\n target_dim (int): target size.\n scale_range (list): random scale range.\n interp (int): interpolation method, default to `cv2.INTER_LINEAR`.\n \"\"\"\n\n def __init__(self,\n target_dim=512,\n scale_range=[.1, 2.],\n interp=cv2.INTER_LINEAR):\n super(RandomScaledCrop, self).__init__()\n self.target_dim = target_dim\n self.scale_range = scale_range\n self.interp = interp\n\n def __call__(self, sample, context=None):\n w = sample['w']\n h = sample['h']\n random_scale = np.random.uniform(*self.scale_range)\n dim = self.target_dim\n random_dim = int(dim * random_scale)\n dim_max = max(h, w)\n scale = random_dim / dim_max\n resize_w = int(round(w * scale))\n resize_h = int(round(h * scale))\n offset_x = int(max(0, np.random.uniform(0., resize_w - dim)))\n offset_y = int(max(0, np.random.uniform(0., resize_h - dim)))\n if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:\n scale_array = np.array([scale, scale] * 2, dtype=np.float32)\n shift_array = np.array([offset_x, offset_y] * 2, dtype=np.float32)\n boxes = sample['gt_bbox'] * scale_array - shift_array\n boxes = np.clip(boxes, 0, dim - 1)\n # filter boxes with no area\n area = np.prod(boxes[..., 2:] - boxes[..., :2], axis=1)\n valid = (area > 1.).nonzero()[0]\n sample['gt_bbox'] = boxes[valid]\n sample['gt_class'] = sample['gt_class'][valid]\n\n img = sample['image']\n img = cv2.resize(img, (resize_w, resize_h), interpolation=self.interp)\n img = np.array(img)\n canvas = np.zeros((dim, dim, 3), dtype=img.dtype)\n canvas[:min(dim, resize_h), :min(dim, resize_w), :] = img[\n offset_y:offset_y + dim, offset_x:offset_x + dim, :]\n sample['h'] = dim\n sample['w'] = dim\n sample['image'] = canvas\n sample['im_info'] = [resize_h, resize_w, scale]\n return sample\n\n\n@register_op\nclass ResizeAndPad(BaseOperator):\n \"\"\"Resize image and bbox, then pad image to target size.\n Args:\n target_dim (int): target size\n interp (int): interpolation method, default to `cv2.INTER_LINEAR`.\n \"\"\"\n\n def __init__(self, target_dim=512, interp=cv2.INTER_LINEAR):\n super(ResizeAndPad, self).__init__()\n self.target_dim = target_dim\n self.interp = interp\n\n def __call__(self, sample, context=None):\n w = sample['w']\n h = sample['h']\n interp = self.interp\n dim = self.target_dim\n dim_max = max(h, w)\n scale = self.target_dim / dim_max\n resize_w = int(round(w * scale))\n resize_h = int(round(h * scale))\n if 'gt_bbox' in sample and len(sample['gt_bbox']) > 0:\n scale_array = np.array([scale, scale] * 2, dtype=np.float32)\n sample['gt_bbox'] = np.clip(sample['gt_bbox'] * scale_array, 0,\n dim - 1)\n img = sample['image']\n img = cv2.resize(img, (resize_w, resize_h), interpolation=interp)\n img = np.array(img)\n canvas = np.zeros((dim, dim, 3), dtype=img.dtype)\n canvas[:resize_h, :resize_w, :] = img\n sample['h'] = dim\n sample['w'] = dim\n sample['image'] = canvas\n sample['im_info'] = [resize_h, resize_w, scale]\n return sample\n\n\n@register_op\nclass TargetAssign(BaseOperator):\n \"\"\"Assign regression target and labels.\n Args:\n image_size (int or list): input image size, a single integer or list of\n [h, w]. Default: 512\n min_level (int): min level of the feature pyramid. Default: 3\n max_level (int): max level of the feature pyramid. Default: 7\n anchor_base_scale (int): base anchor scale. Default: 4\n num_scales (int): number of anchor scales. Default: 3\n aspect_ratios (list): aspect ratios.\n Default: [(1, 1), (1.4, 0.7), (0.7, 1.4)]\n match_threshold (float): threshold for foreground IoU. Default: 0.5\n \"\"\"\n\n def __init__(self,\n image_size=512,\n min_level=3,\n max_level=7,\n anchor_base_scale=4,\n num_scales=3,\n aspect_ratios=[(1, 1), (1.4, 0.7), (0.7, 1.4)],\n match_threshold=0.5):\n super(TargetAssign, self).__init__()\n assert image_size % 2 ** max_level == 0, \\\n \"image size should be multiple of the max level stride\"\n self.image_size = image_size\n self.min_level = min_level\n self.max_level = max_level\n self.anchor_base_scale = anchor_base_scale\n self.num_scales = num_scales\n self.aspect_ratios = aspect_ratios\n self.match_threshold = match_threshold\n\n @property\n def anchors(self):\n if not hasattr(self, '_anchors'):\n anchor_grid = AnchorGrid(self.image_size, self.min_level,\n self.max_level, self.anchor_base_scale,\n self.num_scales, self.aspect_ratios)\n self._anchors = np.concatenate(anchor_grid.generate())\n return self._anchors\n\n def iou_matrix(self, a, b):\n tl_i = np.maximum(a[:, np.newaxis, :2], b[:, :2])\n br_i = np.minimum(a[:, np.newaxis, 2:], b[:, 2:])\n area_i = np.prod(br_i - tl_i, axis=2) * (tl_i < br_i).all(axis=2)\n area_a = np.prod(a[:, 2:] - a[:, :2], axis=1)\n area_b = np.prod(b[:, 2:] - b[:, :2], axis=1)\n area_o = (area_a[:, np.newaxis] + area_b - area_i)\n # return area_i / (area_o + 1e-10)\n return np.where(area_i == 0., np.zeros_like(area_i), area_i / area_o)\n\n def match(self, anchors, gt_boxes):\n # XXX put smaller matrix first would be a little bit faster\n mat = self.iou_matrix(gt_boxes, anchors)\n max_anchor_for_each_gt = mat.argmax(axis=1)\n max_for_each_anchor = mat.max(axis=0)\n anchor_to_gt = mat.argmax(axis=0)\n anchor_to_gt[max_for_each_anchor < self.match_threshold] = -1\n # XXX ensure each gt has at least one anchor assigned,\n # see `force_match_for_each_row` in TF implementation\n one_hot = np.zeros_like(mat)\n one_hot[np.arange(mat.shape[0]), max_anchor_for_each_gt] = 1.\n max_anchor_indices = one_hot.sum(axis=0).nonzero()[0]\n max_gt_indices = one_hot.argmax(axis=0)[max_anchor_indices]\n anchor_to_gt[max_anchor_indices] = max_gt_indices\n return anchor_to_gt\n\n def encode(self, anchors, boxes):\n wha = anchors[..., 2:] - anchors[..., :2] + 1\n ca = anchors[..., :2] + wha * .5\n whb = boxes[..., 2:] - boxes[..., :2] + 1\n cb = boxes[..., :2] + whb * .5\n offsets = np.empty_like(anchors)\n offsets[..., :2] = (cb - ca) / wha\n offsets[..., 2:] = np.log(whb / wha)\n return offsets\n\n def __call__(self, sample, context=None):\n gt_boxes = sample['gt_bbox']\n gt_labels = sample['gt_class']\n labels = np.full((self.anchors.shape[0], 1), 0, dtype=np.int32)\n targets = np.full((self.anchors.shape[0], 4), 0., dtype=np.float32)\n sample['gt_label'] = labels\n sample['gt_target'] = targets\n\n if len(gt_boxes) < 1:\n sample['fg_num'] = np.array(0, dtype=np.int32)\n return sample\n\n anchor_to_gt = self.match(self.anchors, gt_boxes)\n matched_indices = (anchor_to_gt >= 0).nonzero()[0]\n labels[matched_indices] = gt_labels[anchor_to_gt[matched_indices]]\n\n matched_boxes = gt_boxes[anchor_to_gt[matched_indices]]\n matched_anchors = self.anchors[matched_indices]\n matched_targets = self.encode(matched_anchors, matched_boxes)\n targets[matched_indices] = matched_targets\n sample['fg_num'] = np.array(len(matched_targets), dtype=np.int32)\n return sample\n\n\n@register_op\nclass DebugVisibleImage(BaseOperator):\n \"\"\"\n In debug mode, visualize images according to `gt_box`.\n (Currently only supported when not cropping and flipping image.)\n \"\"\"\n\n def __init__(self,\n output_dir='output/debug',\n use_vdl=False,\n is_normalized=False):\n super(DebugVisibleImage, self).__init__()\n self.is_normalized = is_normalized\n self.output_dir = output_dir\n self.use_vdl = use_vdl\n if not os.path.isdir(output_dir):\n os.makedirs(output_dir)\n if not isinstance(self.is_normalized, bool):\n raise TypeError(\"{}: input type is invalid.\".format(self))\n if self.use_vdl:\n assert six.PY3, \"VisualDL requires Python >= 3.5\"\n from visualdl import LogWriter\n self.vdl_writer = LogWriter(self.output_dir)\n\n def __call__(self, sample, context=None):\n out_file_name = sample['im_file'].split('/')[-1]\n if self.use_vdl:\n origin_image = Image.open(sample['im_file']).convert('RGB')\n origin_image = ImageOps.exif_transpose(origin_image)\n image_np = np.array(origin_image)\n self.vdl_writer.add_image(\"original/{}\".format(out_file_name),\n image_np, 0)\n\n if not isinstance(sample['image'], np.ndarray):\n raise TypeError(\"{}: sample[image] type is not numpy.\".format(self))\n image = Image.fromarray(np.uint8(sample['image']))\n\n width = sample['w']\n height = sample['h']\n gt_bbox = sample['gt_bbox']\n gt_class = sample['gt_class']\n\n if 'gt_poly' in sample.keys():\n poly_to_mask = Poly2Mask()\n sample = poly_to_mask(sample)\n\n if 'gt_segm' in sample.keys():\n import pycocotools.mask as mask_util\n from ppdet.utils.colormap import colormap\n image_np = np.array(image).astype('float32')\n mask_color_id = 0\n w_ratio = .4\n alpha = 0.7\n color_list = colormap(rgb=True)\n gt_segm = sample['gt_segm']\n for mask in gt_segm:\n color_mask = color_list[mask_color_id % len(color_list), 0:3]\n mask_color_id += 1\n for c in range(3):\n color_mask[c] = color_mask[c] * (1 - w_ratio\n ) + w_ratio * 255\n idx = np.nonzero(mask)\n image_np[idx[0], idx[1], :] *= 1.0 - alpha\n image_np[idx[0], idx[1], :] += alpha * color_mask\n image = Image.fromarray(np.uint8(image_np))\n\n draw = ImageDraw.Draw(image)\n for i in range(gt_bbox.shape[0]):\n if self.is_normalized:\n gt_bbox[i][0] = gt_bbox[i][0] * width\n gt_bbox[i][1] = gt_bbox[i][1] * height\n gt_bbox[i][2] = gt_bbox[i][2] * width\n gt_bbox[i][3] = gt_bbox[i][3] * height\n\n xmin, ymin, xmax, ymax = gt_bbox[i]\n draw.line(\n [(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin),\n (xmin, ymin)],\n width=2,\n fill='green')\n # draw label\n text = 'id' + str(gt_class[i][0])\n tw, th = draw.textsize(text)\n draw.rectangle(\n [(xmin + 1, ymin - th), (xmin + tw + 1, ymin)], fill='green')\n draw.text((xmin + 1, ymin - th), text, fill=(255, 255, 255))\n\n if 'gt_keypoint' in sample.keys():\n gt_keypoint = sample['gt_keypoint']\n if self.is_normalized:\n for i in range(gt_keypoint.shape[1]):\n if i % 2:\n gt_keypoint[:, i] = gt_keypoint[:, i] * height\n else:\n gt_keypoint[:, i] = gt_keypoint[:, i] * width\n for i in range(gt_keypoint.shape[0]):\n keypoint = gt_keypoint[i]\n for j in range(int(keypoint.shape[0] / 2)):\n x1 = round(keypoint[2 * j])\n y1 = round(keypoint[2 * j + 1])\n draw.ellipse(\n (x1, y1, x1 + 5, y1 + 5), fill='green', outline='green')\n save_path = os.path.join(self.output_dir, out_file_name)\n if self.use_vdl:\n preprocess_image_np = np.array(image)\n self.vdl_writer.add_image(\"preprocess/{}\".format(out_file_name),\n preprocess_image_np, 0)\n else:\n image.save(save_path, quality=95)\n return sample\n\n\n@register_op\nclass Poly2Mask(BaseOperator):\n \"\"\"\n gt poly to mask annotations\n \"\"\"\n\n def __init__(self):\n super(Poly2Mask, self).__init__()\n import pycocotools.mask as maskUtils\n self.maskutils = maskUtils\n\n def _poly2mask(self, mask_ann, img_h, img_w):\n if isinstance(mask_ann, list):\n # polygon -- a single object might consist of multiple parts\n # we merge all parts into one mask rle code\n rles = self.maskutils.frPyObjects(mask_ann, img_h, img_w)\n rle = self.maskutils.merge(rles)\n elif isinstance(mask_ann['counts'], list):\n # uncompressed RLE\n rle = self.maskutils.frPyObjects(mask_ann, img_h, img_w)\n else:\n # rle\n rle = mask_ann\n mask = self.maskutils.decode(rle)\n return mask\n\n def __call__(self, sample, context=None):\n assert 'gt_poly' in sample\n im_h = sample['h']\n im_w = sample['w']\n masks = [\n self._poly2mask(gt_poly, im_h, im_w)\n for gt_poly in sample['gt_poly']\n ]\n sample['gt_segm'] = np.asarray(masks).astype(np.uint8)\n return sample\n"
] |
[
[
"numpy.dot",
"numpy.ones_like",
"numpy.random.choice",
"numpy.random.rand",
"numpy.minimum",
"numpy.min",
"numpy.where",
"numpy.cos",
"numpy.frombuffer",
"numpy.max",
"numpy.concatenate",
"numpy.sin",
"numpy.random.normal",
"numpy.zeros_like",
"numpy.full",
"numpy.log",
"numpy.uint8",
"numpy.logical_and",
"numpy.take",
"numpy.nonzero",
"numpy.prod",
"numpy.random.randint",
"numpy.arange",
"numpy.sqrt",
"numpy.swapaxes",
"numpy.empty_like",
"numpy.expand_dims",
"numpy.array",
"numpy.int",
"numpy.delete",
"numpy.zeros",
"numpy.round",
"numpy.random.shuffle",
"numpy.clip",
"numpy.squeeze",
"numpy.asarray",
"numpy.random.permutation",
"numpy.ones",
"numpy.random.beta",
"numpy.random.uniform",
"numpy.maximum"
]
] |
double-fire-0/SystemNoise
|
[
"ab042dd54371482a18117eb13f816a7472e51590"
] |
[
"EOC/prototype/data/utils/imagenet_s_gen.py"
] |
[
"import os.path as osp\nimport numpy as np\nfrom PIL import Image\nimport io\nimport cv2\nimport ffmpeg\nimport copy\nimport math\nimport random\nimport os\nfrom tqdm import tqdm\n# try:\n# import mc\n# except ImportError:\n# pass\nimport argparse\n\n\npil_resize_mode_dict = {\n \"pil-bilinear\": Image.BILINEAR,\n \"pil-nearest\": Image.NEAREST,\n \"pil-box\": Image.BOX,\n \"pil-hamming\": Image.HAMMING,\n \"pil-cubic\": Image.BICUBIC,\n \"pil-lanczos\": Image.LANCZOS\n}\n\ncv_resize_mode_dict = {\n \"opencv-nearest\": cv2.INTER_NEAREST,\n \"opencv-bilinear\": cv2.INTER_LINEAR,\n \"opencv-area\": cv2.INTER_AREA,\n \"opencv-cubic\": cv2.INTER_CUBIC,\n \"opencv-lanczos\": cv2.INTER_LANCZOS4\n}\n\n\nclass ImageTransfer:\n def __init__(self, root_dir, meta_file, save_root, decoder_type='pil',\n resize_type='pil-bilinear', resize=224, transform_type='val'):\n self.root_dir = root_dir\n self.meta_file = meta_file\n self.decoder_type = decoder_type\n self.resize_type = resize_type\n self.save_root = save_root\n self.transform_type = transform_type\n\n if isinstance(resize, tuple):\n self.resize = resize\n else:\n self.resize = (resize, resize)\n self.color_mode = 'RGB'\n\n with open(meta_file) as f:\n lines = f.readlines()\n self.num = len(lines)\n self.metas = []\n for line in lines:\n filename, label = line.rstrip().split()\n self.metas.append({'filename': filename, 'label': label})\n\n def write_to_filesystem(self):\n new_meta_file_name = self.decoder_type + '_' + self.resize_type + '.txt'\n new_meta_file = open(new_meta_file_name, 'w')\n save_dir = osp.join(self.save_root, self.decoder_type, self.resize_type)\n if not osp.exists(save_dir):\n os.makedirs(save_dir)\n\n for idx in tqdm(range(self.num)):\n np_image, label = self.getimage(idx)\n save_file_name = self.metas[idx]['filename'] + '.npy'\n save_path = osp.join(save_dir, save_file_name)\n np.save(save_path, np_image)\n\n new_meta_file.write(f'{osp.join(self.decoder_type, self.resize_type, save_file_name)} {label}'+'\\n')\n\n def getimage(self, idx):\n curr_meta = copy.deepcopy(self.metas[idx])\n filename = osp.join(self.root_dir, curr_meta['filename'])\n label = int(curr_meta['label'])\n # add root_dir to filename\n curr_meta['filename'] = filename\n img_bytes = self.read_file(curr_meta)\n\n img_after_decode = self.image_decoder(img_bytes, filepath=filename)\n assert isinstance(img_after_decode, np.ndarray)\n\n y, x, h, w = self.get_params(img_after_decode)\n img_after_resize = self.image_resize(img_after_decode, y, x, h, w)\n\n return img_after_resize, label\n\n\n def image_resize(self, img, y, x, h, w):\n if 'pil' in self.resize_type:\n img = self.toPIL(img)\n interpolation = pil_resize_mode_dict[self.resize_type]\n elif 'opencv' in self.resize_type:\n interpolation = cv_resize_mode_dict[self.resize_type]\n else:\n raise NotImplementedError\n\n if self.transform_type == 'train':\n i, j = y, x\n size = self.resize\n if 'pil' in self.resize_type:\n img = img.crop((j, i, j + w, i + h))\n return self.toNumpy(self.PIL_resize(img, size, interpolation))\n elif 'opencv' in self.resize_type:\n img = img[y: y + h, x: x + w]\n img = cv2.resize(img, self.resize, interpolation=interpolation)\n return img\n else:\n raise NotImplementedError\n elif self.transform_type == 'val':\n if 'pil' in self.resize_type:\n frist_resize = tuple(size * 8 / 7 for size in self.resize)\n img = self.PIL_resize(img, frist_resize, interpolation)\n\n w, h = img.size\n th, tw = self.resize\n i = int(round((h - th) / 2.))\n j = int(round((w - tw) / 2.))\n img = img.crop((j, i, j + tw, i + th))\n return self.toNumpy(img)\n elif 'opencv' in self.resize_type:\n width, height = tuple(int(size * 8 / 7) for size in self.resize)\n img = cv2.resize(img, (width, height), interpolation=interpolation)\n\n h, w, c = img.shape\n th, tw = self.resize\n dy = int(round((h - th) / 2.))\n dx = int(round((w - tw) / 2.))\n return img[dy: dy + th, dx: dx + tw]\n else:\n raise NotImplementedError\n\n\n\n\n def PIL_resize(self, img, size, interpolation):\n if isinstance(size, int):\n w, h = img.size\n if (w <= h and w == size) or (h <= w and h == size):\n return img\n if w < h:\n ow = size\n oh = int(size * h / w)\n return img.resize((ow, oh), interpolation)\n else:\n oh = size\n ow = int(size * w / h)\n return img.resize((ow, oh), interpolation)\n else:\n return img.resize(size[::-1], interpolation)\n\n\n\n def toNumpy(self, img):\n return np.asarray(img)\n\n def toPIL(self, img):\n return Image.fromarray(img)\n\n def image_decoder(self, filebytes, filepath=None):\n if self.decoder_type == 'pil':\n buff = io.BytesIO(filebytes)\n try:\n with Image.open(buff) as img:\n img = img.convert('RGB')\n if self.color_mode == \"BGR\":\n b, g, r = img.split()\n img = Image.merge(\"RGB\", (r, g, b))\n elif self.color_mode == \"GRAY\":\n img = img.convert('L')\n\n except IOError:\n print('Failed in loading {}'.format(filepath))\n image_array = np.array(img)\n return image_array\n elif self.decoder_type == 'opencv':\n try:\n img = cv2.imdecode(filebytes, cv2.IMREAD_COLOR)\n if self.color_mode == \"RGB\":\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n elif self.color_mode == \"GRAY\":\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n except IOError:\n print('Failed in loading {}'.format(filepath))\n return img\n elif self.decoder_type == 'ffmpeg':\n img = cv2.imdecode(filebytes, cv2.IMREAD_COLOR)\n height = img.shape[0]\n width = img.shape[1]\n out, _ = (\n ffmpeg\n .input(filepath)\n .output('pipe:', format='rawvideo', pix_fmt='rgb24')\n .run(capture_stdout=True)\n )\n img = (\n np\n .frombuffer(out, np.uint8)\n .reshape([height, width, 3])\n )\n return img\n else:\n raise NotImplementedError\n\n def get_params(self, img, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.)):\n \"\"\"Get parameters for ``crop`` for a random sized crop.\n\n Args:\n img (PIL Image): Image to be cropped.\n scale (tuple): range of size of the origin size cropped\n ratio (tuple): range of aspect ratio of the origin aspect ratio cropped\n\n Returns:\n tuple: params (i, j, h, w) to be passed to ``crop`` for a random\n sized crop.\n \"\"\"\n\n area = img.shape[0] * img.shape[1]\n height, width = img.shape[0], img.shape[1]\n\n for attempt in range(10):\n target_area = random.uniform(*scale) * area\n log_ratio = (math.log(ratio[0]), math.log(ratio[1]))\n aspect_ratio = math.exp(random.uniform(*log_ratio))\n\n w = int(round(math.sqrt(target_area * aspect_ratio)))\n h = int(round(math.sqrt(target_area / aspect_ratio)))\n\n if 0 < w <= width and 0 < h <= height:\n i = random.randint(0, height - h)\n j = random.randint(0, width - w)\n return i, j, h, w\n\n in_ratio = float(width) / float(height)\n if (in_ratio < min(ratio)):\n w = width\n h = int(round(w / min(ratio)))\n elif (in_ratio > max(ratio)):\n h = height\n w = int(round(h * max(ratio)))\n else: # whole image\n w = width\n h = height\n i = (height - h) // 2\n j = (width - w) // 2\n return i, j, h, w\n\n def read_file(self, meta_dict):\n filebytes = np.fromfile(meta_dict['filename'], dtype=np.uint8)\n return filebytes\n # self._init_memcached()\n # value = mc.pyvector()\n # self.mclient.Get(meta_dict['filename'], value)\n # value_str = mc.ConvertBuffer(value)\n # filebytes = np.frombuffer(value_str.tobytes(), dtype=np.uint8)\n # return filebytes\n #\n # def _init_memcached(self):\n # if not self.initialized:\n # server_list_config_file = \"/mnt/lustre/share/memcached_client/server_list.conf\"\n # client_config_file = \"/mnt/lustre/share/memcached_client/client.conf\"\n # self.mclient = mc.MemcachedClient.GetInstance(server_list_config_file, client_config_file)\n # self.initialized = True\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Generate Dataset')\n parser.add_argument('--decoder', required=False, type=str, default='pil', choices=['pil', 'opencv', 'ffmpeg'])\n parser.add_argument('--resize', required=False, type=str, default='pil-bilinear',\n choices=['pil-bilinear', 'pil-nearest', 'pil-box', 'pil-hamming', 'pil-cubic', 'pil-lanczos',\n 'opencv-nearest', 'opencv-bilinear', 'opencv-area', 'opencv-cubic', 'opencv-lanczos'])\n parser.add_argument('--transform-type', required=False, type=str, default='val', choices=['val', 'train'])\n # train: Random Resize Crop\n # val: Resize (outsize * (8/7)) + Center Crop\n\n args = parser.parse_args()\n\n ImageTransfer(root_dir='/mnt/lustre/share/images/val', meta_file='/meta/val.txt',\n save_root='/mnt/lustre/wangyan3/dataset-decoder-resize', decoder_type=args.decoder,\n transform_type=args.transform_type, resize_type=args.resize).write_to_filesystem()\n\n"
] |
[
[
"numpy.array",
"numpy.asarray",
"numpy.save",
"numpy.fromfile",
"numpy.frombuffer"
]
] |
zehuilu/How-to-Use-Qualisys-Motion-Capture-System-in-AIMS-Lab
|
[
"862860a9a5d28fc60ee01954e4929a908bf80533"
] |
[
"python/streaming_6dof_data.py"
] |
[
"#!/usr/bin/python3\n\n\"\"\"\n Streaming 6-DOF data from QTM forever\n (start QTM first, Capture->Continuous Capture)\n\"\"\"\n\nimport asyncio\nimport xml.etree.ElementTree as ET\nimport pkg_resources\nimport qtm\nimport json\nimport numpy as np\nimport socket\n\n\ndef create_body_index(xml_string):\n \"\"\" Extract a name to index dictionary from 6-DOF settings xml \"\"\"\n xml = ET.fromstring(xml_string)\n\n body_to_index = {}\n for index, body in enumerate(xml.findall(\"*/Body/Name\")):\n body_to_index[body.text.strip()] = index\n\n return body_to_index\n\n\ndef publisher_udp_main(json_file_data):\n \"\"\"\n The following two lines show what is json_file_data\n\n json_file = open('mocap_config.json')\n json_file_data = json.load(json_file)\n \"\"\"\n\n # IP for publisher\n HOST_UDP = json_file_data['HOST_UDP']\n # Port for publisher\n PORT_UDP = int(json_file_data['PORT_UDP'])\n\n server_address_udp = (HOST_UDP, PORT_UDP)\n # Create a UDP socket\n sock_udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n return sock_udp, server_address_udp\n\n\nasync def main(network_config_file_name):\n # Read the configuration from the json file\n json_file = open(network_config_file_name)\n json_file_data = json.load(json_file)\n\n # 1 for realtime streaming, 0 for loading qtm file\n flag_realtime = int(json_file_data['FLAG_REALTIME'])\n\n # IP address for the mocap server\n IP_server = json_file_data['IP_SERVER']\n\n # If you want to stream recorded data in a real-time way, change json file and load it here.\n # There might be a bug about file path. Will test it later. -- Sept. 08, 2020\n file_name_qtm = json_file_data['NAME_FILE_LOADED_QTM']\n QTM_FILE = pkg_resources.resource_filename(\"qtm\", file_name_qtm)\n\n # Connect to qtm\n connection = await qtm.connect(IP_server)\n\n # Connection failed?\n if connection is None:\n print(\"Failed to connect\")\n return\n\n # Take control of qtm, context manager will automatically release control after scope end\n async with qtm.TakeControl(connection, \"password\"):\n if not flag_realtime:\n # Load qtm file\n await connection.load(QTM_FILE)\n # start rtfromfile\n await connection.start(rtfromfile=True)\n\n # Get 6-DOF settings from QTM\n xml_string = await connection.get_parameters(parameters=[\"6d\"])\n\n # Create a UDP socket for data streaming\n sock_udp, server_address_udp = publisher_udp_main(json_file_data)\n\n # parser for mocap rigid bodies indexing\n body_index = create_body_index(xml_string)\n\n wanted_body = json_file_data['NAME_SINGLE_BODY']\n\n def on_packet(packet):\n # Get the 6-DOF data\n bodies = packet.get_6d()[1]\n\n if wanted_body is not None and wanted_body in body_index:\n # Extract one specific body\n wanted_index = body_index[wanted_body]\n position, rotation = bodies[wanted_index]\n # You can use position and rotation here. Notice that the unit for position is mm!\n print(wanted_body)\n\n print(\"Position in numpy [meter]\")\n position_np = np.array([[position.x/1000.0], [position.y/1000.0], [position.z/1000.0]], dtype=np.float64)\n print(position_np)\n\n # rotation.matrix is a tuple with 9 elements.\n print(\"Rotation matrix in numpy\")\n rotation_np = np.asarray(rotation.matrix, dtype=np.float64).reshape(3, 3)\n print(rotation_np)\n\n # send 6-DOF data via UDP\n # concatenate the position and rotation matrix vertically\n msg = np.asarray((position.x/1000.0, position.y/1000.0, position.z/1000.0) + rotation.matrix, dtype=np.float64).tobytes()\n sock_udp.sendto(msg, server_address_udp)\n print(\"6-DOF data sent via UDP!\")\n \n else:\n # Print all bodies\n for position, rotation in bodies:\n print(\"There is no such a rigid body! Print all bodies.\")\n print(\"Pos: {} - Rot: {}\".format(position, rotation))\n\n # Start streaming frames\n # Make sure the component matches with the data fetch function, for example: packet.get_6d() with \"6d\"\n # Reference: https://qualisys.github.io/qualisys_python_sdk/index.html\n await connection.stream_frames(components=[\"6d\"], on_packet=on_packet)\n\n\nif __name__ == \"__main__\":\n network_config_file_name = 'mocap_config.json'\n # Run our asynchronous main function forever\n asyncio.ensure_future(main(network_config_file_name))\n asyncio.get_event_loop().run_forever()\n"
] |
[
[
"numpy.array",
"numpy.asarray"
]
] |
markmac99/WesternMeteorPyLib
|
[
"c5104974c3f1e2259b0d0ea63a9bbaa15d236be2"
] |
[
"wmpl/Trajectory/Orbit.py"
] |
[
"from __future__ import print_function, division, absolute_import\n\nimport os\nimport sys\nimport datetime\nimport argparse\n\nimport numpy as np\n\nfrom jplephem.spk import SPK\n\n\nfrom wmpl.Config import config\n\nfrom wmpl.Utils.Earth import calcEarthRectangularCoordJPL\nfrom wmpl.Utils.ShowerAssociation import associateShower\nfrom wmpl.Utils.SolarLongitude import jd2SolLonJPL\nfrom wmpl.Utils.TrajConversions import J2000_JD, J2000_OBLIQUITY, AU, SUN_MU, SUN_MASS, G, SIDEREAL_YEAR, \\\n jd2LST, jd2Date, jd2DynamicalTimeJD, eci2RaDec, altAz2RADec, raDec2AltAz, raDec2Ecliptic, cartesian2Geo,\\\n equatorialCoordPrecession, eclipticToRectangularVelocityVect, correctedEclipticCoord, datetime2JD, \\\n geo2Cartesian\nfrom wmpl.Utils.Math import vectNorm, vectMag, rotateVector, cartesianToSpherical, sphericalToCartesian\nfrom wmpl.Utils.Pickling import loadPickle\n\n\n\nclass Orbit(object):\n \"\"\" Structure for storing the orbit solution of a meteor. \"\"\"\n\n def __init__(self):\n\n\n ### Apparent radiant in ECI (Earth's rotation is included) ###\n\n # Apparent radiant position (ECI, radians)\n self.ra = None\n self.dec = None\n\n # Apparent azimuth and altitude (ECI)\n self.azimuth_apparent = None\n self.elevation_apparent = None\n\n # Estimated average velocity (ECI)\n self.v_avg = None\n\n # Estimated initial velocity (ECI)\n self.v_init = None\n\n ### ###\n\n\n\n ### Apparent radiant which includes no Earth's rotation (reference to the ground) ###\n\n # Apparent radiant position (no Earth's rotation, radians)\n self.ra_norot = None\n self.dec_norot = None\n\n # Apparent azimuth and altitude (no Earth's rotation)\n self.azimuth_apparent_norot = None\n self.elevation_apparent_norot = None\n\n # Estimated average velocity (no Earth's rotation)\n self.v_avg_norot = None\n\n # Estimated initial velocity (no Earth's rotation)\n self.v_init_norot = None\n\n ### ###\n\n\n\n # Reference Julian date for the trajectory. Can be the time of the first point on the trajectory or the\n # average time of the meteor\n self.jd_ref = None\n\n # Dynamical Julian date\n self.jd_dyn = None\n\n # reference Local Sidreal Time of the reference trajectory position\n self.lst_ref = None\n\n # Longitude of the reference point on the trajectory (rad)\n self.lon_ref = None\n\n # Latitude of the reference point on the trajectory (rad)\n self.lat_ref = None\n\n # Height of the reference point on the trajectory (meters)\n self.ht_ref = None\n\n # Geocentric latitude of the reference point on the trajectory (rad)\n self.lat_geocentric = None\n\n # Apparent zenith angle (before the correction for Earth's gravity)\n self.zc = None\n\n # Zenith distance of the geocentric radiant (after the correction for Earth's gravity)\n self.zg = None\n\n # Velocity at infinity\n self.v_inf = None\n\n # Geocentric velocity (m/s)\n self.v_g = None\n\n # Geocentric radiant position (radians)\n self.ra_g = None\n self.dec_g = None\n\n # Ecliptic coordinates of the radiant (radians)\n self.L_g = None\n self.B_g = None\n\n # Sun-centered ecliptic rectangular coordinates of the average position on the meteor's trajectory \n # (in kilometers)\n self.meteor_pos = None\n\n # Helioventric velocity of the meteor (m/s)\n self.v_h = None\n\n # Components of the heliocentric velocity vector of the meteoroid\n self.v_h_x = None\n self.v_h_y = None\n self.v_h_z = None\n\n # Heliocentric ecliptic coordinates of the meteor\n self.L_h = None\n self.B_h = None\n\n # Solar longitude (radians)\n self.la_sun = None\n\n # Semi-major axis (AU)\n self.a = None\n\n # Eccentricty\n self.e = None\n\n # Inclination (radians)\n self.i = None\n\n # Argument of perihelion (radians)\n self.peri = None\n\n # Ascending node (radians)\n self.node = None\n\n # Longitude of perihelion (radians)\n self.pi = None\n\n # Latitude of perihelion (radians)\n self.b = None\n\n # Perihelion distance (AU)\n self.q = None\n\n # Aphelion distance (AU)\n self.Q = None\n\n # True anomaly at the moment of contact with Earth (radians)\n self.true_anomaly = None\n\n # Exxentric anomaly (radians)\n self.eccentric_anomaly = None\n\n # Mean anomaly (radians)\n self.mean_anomaly = None\n\n # Calculate the date and time of the last perihelion passage (datetime object)\n self.last_perihelion = None\n\n # Mean motion in the orbit (rad/day)\n self.n = None\n\n # Tisserand's parameter with respect to Jupiter\n self.Tj = None\n\n # Orbital period\n self.T = None\n\n\n def fixMissingParameters(self):\n \"\"\" Some old orbit files might have missing parameters that were not computed. This function computes\n them.\n \"\"\"\n\n if (not hasattr(self, 'b')) and (self.v_g is not None):\n self.b = calcLatitudeOfPerihelion(self.peri, self.node, self.i)\n\n\n def __repr__(self, uncertainties=None, v_init_ht=None):\n \"\"\" String to be printed out when the Orbit object is printed. \"\"\"\n\n def _uncer(str_format, std_name, multi=1.0, deg=False):\n \"\"\" Internal function. Returns the formatted uncertanty, if the uncertanty is given. If not,\n it returns nothing. \n\n Arguments:\n str_format: [str] String format for the unceertanty.\n std_name: [str] Name of the uncertanty attribute, e.g. if it is 'x', then the uncertanty is \n stored in uncertainties.x.\n \n Keyword arguments:\n multi: [float] Uncertanty multiplier. 1.0 by default. This is used to scale the uncertanty to\n different units (e.g. from m/s to km/s).\n deg: [bool] Converet radians to degrees if True. False by defualt.\n \"\"\"\n\n if deg:\n multi *= np.degrees(1.0)\n\n if uncertainties is not None:\n if hasattr(uncertainties, std_name):\n return \" +/- \" + str_format.format(getattr(uncertainties, std_name)*multi)\n\n \n return ''\n\n\n out_str = \"\"\n #out_str += \"--------------------\\n\"\n\n # Check if the orbit was calculated\n if self.ra_g is not None:\n out_str += \" JD dynamic = {:20.12f} \\n\".format(self.jd_dyn)\n out_str += \" LST apparent = {:.10f} deg\\n\".format(np.degrees(self.lst_ref))\n\n\n ### Apparent radiant in ECI ###\n\n out_str += \"Radiant (apparent in ECI which includes Earth's rotation, epoch of date):\\n\"\n out_str += \" R.A. = {:>9.5f}{:s} deg\\n\".format(np.degrees(self.ra), _uncer('{:.4f}', 'ra', \n deg=True))\n out_str += \" Dec = {:>+9.5f}{:s} deg\\n\".format(np.degrees(self.dec), _uncer('{:.4f}', 'dec', \n deg=True))\n out_str += \" Azimuth = {:>9.5f}{:s} deg\\n\".format(np.degrees(self.azimuth_apparent), \\\n _uncer('{:.4f}', 'azimuth_apparent', deg=True))\n out_str += \" Elevation = {:>+9.5f}{:s} deg\\n\".format(np.degrees(self.elevation_apparent), \\\n _uncer('{:.4f}', 'elevation_apparent', deg=True))\n out_str += \" Vavg = {:>9.5f}{:s} km/s\\n\".format(self.v_avg/1000, _uncer('{:.4f}', 'v_avg', \n multi=1.0/1000))\n\n\n if v_init_ht is not None:\n v_init_ht_str = ' (average above {:.2f} km)'.format(v_init_ht)\n else:\n v_init_ht_str = ''\n\n out_str += \" Vinit = {:>9.5f}{:s} km/s{:s}\\n\".format(self.v_init/1000, _uncer('{:.4f}', 'v_init', \n multi=1.0/1000), v_init_ht_str)\n\n\n ### ###\n\n\n ### Apparent radiant in ECEF (no rotation included) ###\n\n out_str += \"Radiant (apparent ground-fixed, epoch of date):\\n\"\n out_str += \" R.A. = {:>9.5f}{:s} deg\\n\".format(np.degrees(self.ra_norot), _uncer('{:.4f}', \\\n 'ra', deg=True))\n out_str += \" Dec = {:>+9.5f}{:s} deg\\n\".format(np.degrees(self.dec_norot), _uncer('{:.4f}', \\\n 'dec', deg=True))\n out_str += \" Azimuth = {:>9.5f}{:s} deg\\n\".format(np.degrees(self.azimuth_apparent_norot), \\\n _uncer('{:.4f}', 'azimuth_apparent', deg=True))\n out_str += \" Elevation = {:>+9.5f}{:s} deg\\n\".format(np.degrees(self.elevation_apparent_norot), \\\n _uncer('{:.4f}', 'elevation_apparent', deg=True))\n out_str += \" Vavg = {:>9.5f}{:s} km/s\\n\".format(self.v_avg_norot/1000, _uncer('{:.4f}', \\\n 'v_avg', multi=1.0/1000))\n out_str += \" Vinit = {:>9.5f}{:s} km/s{:s}\\n\".format(self.v_init_norot/1000, _uncer('{:.4f}', \\\n 'v_init', multi=1.0/1000), v_init_ht_str)\n\n\n\n ### ###\n\n\n # Check if the orbital elements could be calculated, and write them out\n if self.ra_g is not None:\n\n out_str += \"Radiant (geocentric, J2000):\\n\"\n out_str += \" R.A. = {:>9.5f}{:s} deg\\n\".format(np.degrees(self.ra_g), _uncer('{:.4f}', 'ra_g', \n deg=True))\n out_str += \" Dec = {:>+9.5f}{:s} deg\\n\".format(np.degrees(self.dec_g), _uncer('{:.4f}', 'dec_g', \n deg=True))\n out_str += \" Vg = {:>9.5f}{:s} km/s\\n\".format(self.v_g/1000, _uncer('{:.4f}', 'v_g', \n multi=1.0/1000))\n out_str += \" Vinf = {:>9.5f}{:s} km/s\\n\".format(self.v_inf/1000, _uncer('{:.4f}', 'v_inf', \n multi=1.0/1000))\n out_str += \" Zc = {:>9.5f}{:s} deg\\n\".format(np.degrees(self.zc), _uncer('{:.4f}', 'zc', \n deg=True))\n out_str += \" Zg = {:>9.5f}{:s} deg\\n\".format(np.degrees(self.zg), _uncer('{:.4f}', 'zg', \n deg=True))\n out_str += \"Radiant (ecliptic geocentric, J2000):\\n\"\n out_str += \" Lg = {:>9.5f}{:s} deg\\n\".format(np.degrees(self.L_g), _uncer('{:.4f}', 'L_g', \n deg=True))\n out_str += \" Bg = {:>+9.5f}{:s} deg\\n\".format(np.degrees(self.B_g), _uncer('{:.4f}', 'B_g', \n deg=True))\n out_str += \" Vh = {:>9.5f}{:s} km/s\\n\".format(self.v_h/1000, _uncer('{:.4f}', 'v_h', \n multi=1/1000.0))\n out_str += \"Radiant (ecliptic heliocentric, J2000):\\n\"\n out_str += \" Lh = {:>9.5f}{:s} deg\\n\".format(np.degrees(self.L_h), _uncer('{:.4f}', 'L_h', \n deg=True))\n out_str += \" Bh = {:>+9.5f}{:s} deg\\n\".format(np.degrees(self.B_h), _uncer('{:.4f}', 'B_h', \n deg=True))\n out_str += \" Vh_x = {:>9.5f}{:s} km/s\\n\".format(self.v_h_x, _uncer('{:.4f}', 'v_h_x'))\n out_str += \" Vh_y = {:>9.5f}{:s} km/s\\n\".format(self.v_h_y, _uncer('{:.4f}', 'v_h_y'))\n out_str += \" Vh_z = {:>9.5f}{:s} km/s\\n\".format(self.v_h_z, _uncer('{:.4f}', 'v_h_z'))\n out_str += \"Orbit:\\n\"\n out_str += \" La Sun = {:>10.6f}{:s} deg\\n\".format(np.degrees(self.la_sun), _uncer('{:.4f}', 'la_sun', \n deg=True))\n out_str += \" a = {:>10.6f}{:s} AU\\n\".format(self.a, _uncer('{:.4f}', 'a'))\n out_str += \" e = {:>10.6f}{:s}\\n\".format(self.e, _uncer('{:.4f}', 'e'))\n out_str += \" i = {:>10.6f}{:s} deg\\n\".format(np.degrees(self.i), _uncer('{:.4f}', 'i', \n deg=True))\n out_str += \" peri = {:>10.6f}{:s} deg\\n\".format(np.degrees(self.peri), _uncer('{:.4f}', 'peri', \n deg=True))\n out_str += \" node = {:>10.6f}{:s} deg\\n\".format(np.degrees(self.node), _uncer('{:.4f}', 'node', \n deg=True))\n out_str += \" Pi = {:>10.6f}{:s} deg\\n\".format(np.degrees(self.pi), _uncer('{:.4f}', 'pi', \n deg=True))\n if hasattr(self, 'b'):\n out_str += \" b = {:>10.6f}{:s} deg\\n\".format(np.degrees(self.b), _uncer('{:.4f}', 'b', \n deg=True))\n out_str += \" q = {:>10.6f}{:s} AU\\n\".format(self.q, _uncer('{:.4f}', 'q'))\n out_str += \" f = {:>10.6f}{:s} deg\\n\".format(np.degrees(self.true_anomaly), _uncer('{:.4f}', \n 'true_anomaly', deg=True))\n out_str += \" M = {:>10.6f}{:s} deg\\n\".format(np.degrees(self.mean_anomaly), _uncer('{:.4f}', \n 'mean_anomaly', deg=True))\n out_str += \" Q = {:>10.6f}{:s} AU\\n\".format(self.Q, _uncer('{:.4f}', 'Q'))\n out_str += \" n = {:>10.6f}{:s} deg/day\\n\".format(np.degrees(self.n), _uncer('{:.4f}', 'n', \n deg=True))\n out_str += \" T = {:>10.6f}{:s} years\\n\".format(self.T, _uncer('{:.4f}', 'T'))\n \n if self.last_perihelion is not None:\n out_str += \" Last perihelion JD = {:.6f} \".format(datetime2JD(self.last_perihelion)) \\\n + \"(\" + str(self.last_perihelion) + \")\" + _uncer('{:.4f} days', 'last_perihelion') \\\n + \"\\n\"\n else:\n out_str += \" Last perihelion JD = NaN \\n\"\n\n out_str += \" Tj = {:>10.6f}{:s}\\n\".format(self.Tj, _uncer('{:.4f}', 'Tj'))\n\n\n out_str += \"Shower association:\\n\"\n\n # Perform shower association\n shower_obj = associateShower(self.la_sun, self.L_g, self.B_g, self.v_g)\n if shower_obj is None:\n shower_no = -1\n shower_code = '...'\n else:\n shower_no = shower_obj.IAU_no\n shower_code = shower_obj.IAU_code\n\n out_str += \" IAU No. = {:>4d}\\n\".format(shower_no)\n out_str += \" IAU code = {:>4s}\\n\".format(shower_code)\n\n\n return out_str\n\n\n\ndef calcLatitudeOfPerihelion(peri, node, incl):\n \"\"\" Calculate the latitude of perihelion. \n Source: https://en.wikipedia.org/wiki/Longitude_of_the_periapsis#Derivation_of_ecliptic_longitude_and_latitude_of_perihelion_for_inclined_orbits\n \"\"\"\n\n Ap = np.cos(peri)*np.cos(node) - np.sin(peri)*np.sin(node)*np.cos(incl)\n Bp = np.cos(J2000_OBLIQUITY)*(np.cos(peri)*np.sin(node) + np.sin(peri)*np.cos(node)*np.cos(incl)) \\\n - np.sin(J2000_OBLIQUITY)*np.sin(peri)*np.sin(incl)\n Cp = np.sin(J2000_OBLIQUITY)*(np.cos(peri)*np.sin(node) + np.sin(peri)*np.cos(node)*np.cos(incl)) \\\n + np.cos(J2000_OBLIQUITY)*np.sin(peri)*np.sin(incl)\n\n # RA/Dec of the direction of perihelion\n ra_p = np.arctan2(Bp, Ap)%(2*np.pi)\n dec_p = np.arcsin(Cp)\n \n # Longitue of perihelion\n # pi_t = np.arctan2(np.sin(ra_p)*np.cos(J2000_OBLIQUITY) + np.tan(dec_p)*np.sin(J2000_OBLIQUITY), np.cos(ra_p))%(2*np.pi)\n \n # Latitude of perihelion\n b = np.arcsin(np.sin(dec_p)*np.cos(J2000_OBLIQUITY) - np.cos(dec_p)*np.sin(J2000_OBLIQUITY)*np.sin(ra_p))\n\n return b\n\n\ndef calcOrbit(radiant_eci, v_init, v_avg, eci_ref, jd_ref, stations_fixed=False, reference_init=True, \\\n rotation_correction=False):\n \"\"\" Calculate the meteor's orbit from the given meteor trajectory. The orbit of the meteoroid is defined \n relative to the centre of the Sun (heliocentric).\n\n Arguments:\n radiant_eci: [3 element ndarray] Radiant vector in ECI coordinates (meters).\n v_init: [float] Initial velocity (m/s).\n v_avg: [float] Average velocity of a meteor (m/s).\n eci_ref: [float] reference ECI coordinates in the epoch of date (meters, in the epoch of date) of the \n meteor trajectory. They can be calculated with the geo2Cartesian function. Ceplecha (1987) assumes \n this to the the average point on the trajectory, while Jennsikens et al. (2011) assume this to be \n the first point on the trajectory as that point is not influenced by deceleration.\n NOTE: If the stations are not fixed, the reference ECI coordinates should be the ones\n of the initial point on the trajectory, NOT of the average point!\n jd_ref: [float] reference Julian date of the meteor trajectory. Ceplecha (1987) takes this as the \n average time of the trajectory, while Jenniskens et al. (2011) take this as the the first point\n on the trajectory.\n \n Keyword arguments:\n stations_fixed: [bool] If True, the correction for Earth's rotation will be performed on the radiant,\n but not the velocity. This should be True ONLY in two occasions:\n - if the ECEF coordinate system was used for trajectory estimation\n - if the ECI coordinate system was used for trajectory estimation, BUT the stations were not\n moved in time, but were kept fixed at one point, regardless of the trajectory estimation\n method.\n It is necessary to perform this correction for the intersecting planes method, but not for\n the lines of sight method ONLY when the stations are not fixed. Of course, if one is using the \n lines of sight method with fixed stations, one should perform this correction!\n reference_init: [bool] If True (default), the initial point on the trajectory is given as the reference\n one, i.e. the reference ECI coordinates are the ECI coordinates of the initial point on the\n trajectory, where the meteor has the velocity v_init. If False, then the reference point is the\n average point on the trajectory, and the average velocity will be used to do the corrections.\n rotation_correction: [bool] If True, the correction of the initial velocity for Earth's rotation will\n be performed. False by default. This should ONLY be True if the coordiante system for trajectory\n estimation was ECEF, i.e. did not rotate with the Earth. In all other cases it should be False, \n even if fixed station coordinates were used in the ECI coordinate system!\n\n Return:\n orb: [Orbit object] Object containing the calculated orbit.\n\n \"\"\"\n\n\n ### Correct the velocity vector for the Earth's rotation if the stations are fixed ###\n ##########################################################################################################\n\n eci_x, eci_y, eci_z = eci_ref\n\n # Calculate the geocentric latitude (latitude which considers the Earth as an elipsoid) of the reference \n # trajectory point\n lat_geocentric = np.arctan2(eci_z, np.sqrt(eci_x**2 + eci_y**2))\n\n\n # Calculate the dynamical JD\n jd_dyn = jd2DynamicalTimeJD(jd_ref)\n\n # Calculate the geographical coordinates of the reference trajectory ECI position\n lat_ref, lon_ref, ht_ref = cartesian2Geo(jd_ref, *eci_ref)\n\n\n # Initialize a new orbit structure and assign calculated parameters\n orb = Orbit()\n\n\n\n # Calculate the velocity of the Earth rotation at the position of the reference trajectory point (m/s)\n v_e = 2*np.pi*vectMag(eci_ref)*np.cos(lat_geocentric)/86164.09053\n\n \n # Calculate the equatorial coordinates of east from the reference position on the trajectory\n azimuth_east = np.pi/2\n altitude_east = 0\n ra_east, dec_east = altAz2RADec(azimuth_east, altitude_east, jd_ref, lat_ref, lon_ref)\n\n\n # Compute velocity components of the state vector\n if reference_init:\n\n # If the initial velocity was the reference velocity, use it for the correction\n v_ref_vect = v_init*radiant_eci\n\n\n else:\n # Calculate reference velocity vector using the average point on the trajectory and the average\n # velocity\n v_ref_vect = v_avg*radiant_eci\n\n\n\n # Apply the Earth rotation correction if the station coordinates are fixed (a MUST for the \n # intersecting planes method!)\n if stations_fixed:\n\n ### Set fixed stations radiant info ###\n\n # If the stations are fixed, then the input state vector is already fixed to the ground\n orb.ra_norot, orb.dec_norot = eci2RaDec(radiant_eci)\n\n # Apparent azimuth and altitude (no rotation)\n orb.azimuth_apparent_norot, orb.elevation_apparent_norot = raDec2AltAz(orb.ra_norot, orb.dec_norot, \\\n jd_ref, lat_ref, lon_ref)\n\n # Estimated average velocity (no rotation)\n orb.v_avg_norot = v_avg\n\n # Estimated initial velocity (no rotation)\n orb.v_init_norot = v_init\n\n ### ###\n\n\n v_ref_corr = np.zeros(3)\n\n # Calculate the corrected reference velocity vector/radiant\n v_ref_corr[0] = v_ref_vect[0] - v_e*np.cos(ra_east)\n v_ref_corr[1] = v_ref_vect[1] - v_e*np.sin(ra_east)\n v_ref_corr[2] = v_ref_vect[2]\n\n\n\n else:\n\n # MOVING STATIONS\n # Velocity vector will remain unchanged if the stations were moving\n if reference_init:\n v_ref_corr = v_init*radiant_eci\n\n else:\n v_ref_corr = v_avg*radiant_eci\n\n\n\n ### ###\n # If the rotation correction does not have to be applied, meaning that the rotation is already\n # included, compute a version of the radiant and the velocity without Earth's rotation\n # (REPORTING PURPOSES ONLY, THESE VALUES ARE NOT USED IN THE CALCULATION)\n\n v_ref_nocorr = np.zeros(3)\n\n # Calculate the derotated reference velocity vector/radiant\n v_ref_nocorr[0] = v_ref_vect[0] + v_e*np.cos(ra_east)\n v_ref_nocorr[1] = v_ref_vect[1] + v_e*np.sin(ra_east)\n v_ref_nocorr[2] = v_ref_vect[2]\n\n # Compute the radiant without Earth's rotation included\n orb.ra_norot, orb.dec_norot = eci2RaDec(vectNorm(v_ref_nocorr))\n orb.azimuth_apparent_norot, orb.elevation_apparent_norot = raDec2AltAz(orb.ra_norot, orb.dec_norot, \\\n jd_ref, lat_ref, lon_ref)\n orb.v_init_norot = vectMag(v_ref_nocorr)\n orb.v_avg_norot = orb.v_init_norot - v_init + v_avg\n\n ### ###\n\n\n \n\n ##########################################################################################################\n\n\n\n ### Correct velocity for Earth's gravity ###\n ##########################################################################################################\n\n # If the reference velocity is the initial velocity\n if reference_init:\n\n # Use the corrected velocity for Earth's rotation (when ECEF coordinates are used)\n if rotation_correction:\n v_init_corr = vectMag(v_ref_corr)\n\n else:\n # IMPORTANT NOTE: The correction in this case is only done on the radiant (even if the stations \n # were fixed, but NOT on the initial velocity!). Thus, correction from Ceplecha 1987, \n # equation (35) is not needed. If the initial velocity is determined from time vs. length and in \n # ECI coordinates, whose coordinates rotate with the Earth, the moving stations play no role in \n # biasing the velocity.\n v_init_corr = v_init\n\n else:\n\n if rotation_correction:\n\n # Calculate the corrected initial velocity if the reference velocity is the average velocity\n v_init_corr = vectMag(v_ref_corr) + v_init - v_avg\n \n\n else:\n v_init_corr = v_init\n\n\n\n # Calculate apparent RA and Dec from radiant state vector\n orb.ra, orb.dec = eci2RaDec(radiant_eci)\n orb.v_init = v_init\n orb.v_avg = v_avg\n\n # Calculate the apparent azimuth and altitude (geodetic latitude, because ra/dec are calculated from ECI,\n # which is calculated from WGS84 coordinates)\n orb.azimuth_apparent, orb.elevation_apparent = raDec2AltAz(orb.ra, orb.dec, jd_ref, lat_ref, lon_ref)\n\n orb.jd_ref = jd_ref\n orb.lon_ref = lon_ref\n orb.lat_ref = lat_ref\n orb.ht_ref = ht_ref\n orb.lat_geocentric = lat_geocentric\n\n # Assume that the velocity in infinity is the same as the initial velocity (after rotation correction, if\n # it was needed)\n orb.v_inf = v_init_corr\n\n\n # Make sure the velocity of the meteor is larger than the escape velocity\n if v_init_corr**2 > (2*6.67408*5.9722)*1e13/vectMag(eci_ref):\n\n # Calculate the geocentric velocity (sqrt of squared inital velocity minus the square of the Earth escape \n # velocity at the height of the trajectory), units are m/s.\n # Square of the escape velocity is: 2GM/r, where G is the 2014 CODATA-recommended value of \n # 6.67408e-11 m^3/(kg s^2), and the mass of the Earth is M = 5.9722e24 kg\n v_g = np.sqrt(v_init_corr**2 - (2*6.67408*5.9722)*1e13/vectMag(eci_ref))\n\n\n # Calculate the radiant corrected for Earth's rotation (ONLY if the stations were fixed, otherwise it\n # is the same as the apparent radiant)\n ra_corr, dec_corr = eci2RaDec(vectNorm(v_ref_corr))\n\n # Calculate the Local Sidreal Time of the reference trajectory position\n lst_ref = np.radians(jd2LST(jd_ref, np.degrees(lon_ref))[0])\n\n # Calculate the apparent zenith angle\n zc = np.arccos(np.sin(dec_corr)*np.sin(lat_geocentric) \\\n + np.cos(dec_corr)*np.cos(lat_geocentric)*np.cos(lst_ref - ra_corr))\n\n # Calculate the zenith attraction correction\n delta_zc = 2*np.arctan2((v_init_corr - v_g)*np.tan(zc/2), v_init_corr + v_g)\n\n # Zenith distance of the geocentric radiant\n zg = zc + np.abs(delta_zc)\n\n ##########################################################################################################\n\n\n\n ### Calculate the geocentric radiant ###\n ##########################################################################################################\n\n # Get the azimuth from the corrected RA and Dec\n azimuth_corr, _ = raDec2AltAz(ra_corr, dec_corr, jd_ref, lat_geocentric, lon_ref)\n\n # Calculate the geocentric radiant\n ra_g, dec_g = altAz2RADec(azimuth_corr, np.pi/2 - zg, jd_ref, lat_geocentric, lon_ref)\n \n\n ### Precess ECI coordinates to J2000 ###\n\n # Convert rectangular to spherical coordiantes\n re, delta_e, alpha_e = cartesianToSpherical(*eci_ref)\n\n # Precess coordinates to J2000\n alpha_ej, delta_ej = equatorialCoordPrecession(jd_ref, J2000_JD.days, alpha_e, delta_e)\n\n # Convert coordinates back to rectangular\n eci_ref = sphericalToCartesian(re, delta_ej, alpha_ej)\n eci_ref = np.array(eci_ref)\n\n ######\n\n # Precess the geocentric radiant to J2000\n ra_g, dec_g = equatorialCoordPrecession(jd_ref, J2000_JD.days, ra_g, dec_g)\n\n\n # Calculate the ecliptic latitude and longitude of the geocentric radiant (J2000 epoch)\n L_g, B_g = raDec2Ecliptic(J2000_JD.days, ra_g, dec_g)\n\n\n # Load the JPL ephemerids data\n jpl_ephem_data = SPK.open(config.jpl_ephem_file)\n \n # Get the position of the Earth (km) and its velocity (km/s) at the given Julian date (J2000 epoch)\n # The position is given in the ecliptic coordinates, origin of the coordinate system is in the centre\n # of the Sun\n earth_pos, earth_vel = calcEarthRectangularCoordJPL(jd_dyn, jpl_ephem_data, sun_centre_origin=True)\n\n # print('Earth position:')\n # print(earth_pos)\n # print('Earth velocity:')\n # print(earth_vel)\n\n # Convert the Earth's position to rectangular equatorial coordinates (FK5)\n earth_pos_eq = rotateVector(earth_pos, np.array([1, 0, 0]), J2000_OBLIQUITY)\n\n # print('Earth position (FK5):')\n # print(earth_pos_eq)\n\n # print('Meteor ECI:')\n # print(eci_ref)\n\n # Add the position of the meteor's trajectory to the position of the Earth to calculate the \n # equatorial coordinates of the meteor (in kilometers)\n meteor_pos = earth_pos_eq + eci_ref/1000\n\n\n # print('Meteor position (FK5):')\n # print(meteor_pos)\n\n # Convert the position of the trajectory from FK5 to heliocentric ecliptic coordinates\n meteor_pos = rotateVector(meteor_pos, np.array([1, 0, 0]), -J2000_OBLIQUITY)\n\n # print('Meteor position:')\n # print(meteor_pos)\n\n\n ##########################################################################################################\n\n # Calculate components of the heliocentric velocity of the meteor (km/s)\n v_h = np.array(earth_vel) + np.array(eclipticToRectangularVelocityVect(L_g, B_g, v_g/1000))\n\n # Calculate the heliocentric velocity in km/s\n v_h_mag = vectMag(v_h)\n\n\n # Calculate the heliocentric ecliptic coordinates of the meteoroid using the method of \n # Sato and Watanabe (2014).\n L_h, B_h, met_v_h = correctedEclipticCoord(L_g, B_g, v_g/1000, earth_vel)\n\n\n # Calculate the solar longitude\n la_sun = jd2SolLonJPL(jd_dyn)\n\n\n # Calculations below done using Dave Clark's Master thesis equations\n\n # Specific orbital energy\n epsilon = (vectMag(v_h)**2)/2 - SUN_MU/vectMag(meteor_pos)\n\n # Semi-major axis in AU\n a = -SUN_MU/(2*epsilon*AU)\n\n # Calculate mean motion in rad/day\n n = np.sqrt(G*SUN_MASS/((np.abs(a)*AU*1000.0)**3))*86400.0\n\n\n # Calculate the orbital period in years\n # avoid floating point error if orbit is hyperbolic\n if a > 0: \n T = 2*np.pi*np.sqrt(((a*AU)**3)/SUN_MU)/(86400*SIDEREAL_YEAR)\n else:\n T = np.nan\n\n # Calculate the orbit angular momentum\n h_vect = np.cross(meteor_pos, v_h)\n \n # Calculate inclination\n incl = np.arccos(h_vect[2]/vectMag(h_vect))\n\n\n # Calculate eccentricity\n e_vect = np.cross(v_h, h_vect)/SUN_MU - vectNorm(meteor_pos)\n eccentricity = vectMag(e_vect)\n\n\n # Calculate perihelion distance (source: Jenniskens et al., 2011, CAMS overview paper)\n if eccentricity == 1:\n q = (vectMag(meteor_pos) + np.dot(e_vect, meteor_pos))/(1 + vectMag(e_vect))\n else:\n q = a*(1.0 - eccentricity)\n\n # Calculate the aphelion distance\n Q = a*(1.0 + eccentricity)\n\n\n # Normal vector to the XY reference frame\n k_vect = np.array([0, 0, 1])\n\n # Vector from the Sun pointing to the ascending node\n n_vect = np.cross(k_vect, h_vect)\n\n # Calculate node\n if vectMag(n_vect) == 0:\n node = 0\n else:\n node = np.arctan2(n_vect[1], n_vect[0])\n\n node = node%(2*np.pi)\n\n\n # Calculate argument of perihelion\n if vectMag(n_vect) != 0:\n peri = np.arccos(np.dot(n_vect, e_vect)/(vectMag(n_vect)*vectMag(e_vect)))\n\n if e_vect[2] < 0:\n peri = 2*np.pi - peri\n\n else:\n peri = np.arccos(e_vect[0]/vectMag(e_vect))\n\n peri = peri%(2*np.pi)\n\n\n\n # Calculate the longitude of perihelion\n pi = (node + peri)%(2*np.pi)\n\n # Calculate the latitude of perihelion\n b = calcLatitudeOfPerihelion(peri, node, incl)\n\n\n ### Calculate true anomaly\n true_anomaly = np.arccos(np.dot(e_vect, meteor_pos)/(vectMag(e_vect)*vectMag(meteor_pos)))\n if np.dot(meteor_pos, v_h) < 0:\n true_anomaly = 2*np.pi - true_anomaly\n\n true_anomaly = true_anomaly%(2*np.pi)\n\n ###\n\n\n # Calculate eccentric anomaly\n # not meaningful for eccentricity > 1\n if eccentricity < 1: \n eccentric_anomaly = np.arctan2(np.sqrt(1 - eccentricity**2)*np.sin(true_anomaly), eccentricity \\\n + np.cos(true_anomaly))\n\n # Calculate mean anomaly\n mean_anomaly = eccentric_anomaly - eccentricity * np.sin(eccentric_anomaly)\n mean_anomaly = mean_anomaly % (2 * np.pi)\n else:\n eccentric_anomaly = np.nan\n mean_anomaly = np.nan\n\n # Calculate the time in days since the last perihelion passage of the meteoroid\n # not meaningful for non-closed orbits\n if a > 0:\n dt_perihelion = (mean_anomaly*a**(3.0/2))/0.01720209895\n else:\n dt_perihelion = np.nan\n\n if not np.isnan(dt_perihelion):\n \n # Calculate the date and time of the last perihelion passage\n last_perihelion = jd2Date(jd_dyn - dt_perihelion, dt_obj=True)\n\n else:\n last_perihelion = None\n\n\n # Calculate Tisserand's parameter with respect to Jupiter\n Tj = 2*np.sqrt((1 - eccentricity**2)*a/5.204267)*np.cos(incl) + 5.204267/a\n\n\n\n # Assign calculated parameters\n orb.lst_ref = lst_ref\n orb.jd_dyn = jd_dyn\n orb.v_g = v_g\n orb.ra_g = ra_g\n orb.dec_g = dec_g\n\n orb.meteor_pos = meteor_pos\n orb.L_g = L_g\n orb.B_g = B_g\n\n orb.v_h_x, orb.v_h_y, orb.v_h_z = met_v_h\n orb.L_h = L_h\n orb.B_h = B_h\n\n orb.zc = zc\n orb.zg = zg\n\n orb.v_h = v_h_mag*1000\n\n orb.la_sun = la_sun\n\n orb.a = a\n orb.e = eccentricity\n orb.i = incl\n orb.peri = peri\n orb.node = node\n orb.pi = pi\n orb.b = b\n orb.q = q\n orb.Q = Q\n orb.true_anomaly = true_anomaly\n orb.eccentric_anomaly = eccentric_anomaly\n orb.mean_anomaly = mean_anomaly\n orb.last_perihelion = last_perihelion\n orb.n = n\n orb.T = T\n\n orb.Tj = Tj\n\n\n return orb\n\n\n\n\nif __name__ == \"__main__\":\n\n from wmpl.Utils.TrajConversions import raDec2ECI\n\n ### COMMAND LINE ARGUMENTS\n\n # Init the command line arguments parser\n arg_parser = argparse.ArgumentParser(description=\"\"\" Compute the orbit from given trajectory parameters, or recompute the orbit using the given trajectory pickle file and a few modified trajectory values.\n Usage:\n\n a) Recomputing an orbit using an existing trajectory, but modifying one one of the trajectory parameters, e.g. with the initial velocity of 20.5 km/s:\n python -m wmpl.Trajectory.Orbit trajectory.pickle -v 20.5\n\n b) Compute the orbit from scratch:\n python -m wmpl.Trajectory.Orbit -r 317.74 -d 31.72 -v 54.9 -t \"20180614-072809.3\" -a 44.43 -o -81.56 -e 105.8\n\n c) If the apparent radiant was given in J2000, use the --j2000 option.\n \"\"\",\n formatter_class=argparse.RawTextHelpFormatter)\n\n arg_parser.add_argument('pickle_file', type=str, nargs='?', help='Path to the trajectory pickle file.')\n\n arg_parser.add_argument('-r', '--ra', help='Custom right ascention of the apparent radiant (deg) in the epoch of date (use option --j2000 to use the J2000 epoch).', type=float, \\\n default=None)\n\n arg_parser.add_argument('-d', '--dec', help='Custom declination of the apparent radiant (deg) in the epoch of date (use option --j2000 to use the J2000 epoch).', type=float, \\\n default=None)\n\n arg_parser.add_argument('-v', '--vinit', help='Custom initial velocity in km/s.', type=float, \\\n default=None)\n\n arg_parser.add_argument('-w', '--vavg', help='Custom average velocity in km/s.', type=float, \\\n default=None)\n\n arg_parser.add_argument('-t', '--time', help='Reference UTC date and time for which the relative time of the meteor is t = 0. Format: YYYYMMDD-HHMMSS.uuu', \\\n type=str, default=None)\n\n arg_parser.add_argument('-a', '--lat', help='Latitude +N of the reference position on the trajectory (deg).', \\\n type=float, default=None)\n\n arg_parser.add_argument('-o', '--lon', help='Longitude +E of the reference position on the trajectory (deg).', \\\n type=float, default=None)\n\n arg_parser.add_argument('-e', '--ele', help='Height of the reference position on the trajectory (km).', \\\n type=float, default=None)\n\n arg_parser.add_argument('-j', '--j2000', \\\n help=\"Give the radiant in J2000.\", \\\n action=\"store_true\")\n\n arg_parser.add_argument('-k', '--refavg', \\\n help=\"The average position on the trajectory is used as a reference position instead of the initial position (e.g. with MILIG). The correction for Earth's rotation will be applied.\", \\\n action=\"store_true\")\n\n arg_parser.add_argument('-c', '--vrotcorr', \\\n help=\"Correct the magnitude of the velocity due to the Earth's rotation.\", \\\n action=\"store_true\")\n\n arg_parser.add_argument('-s', '--statfixed', \\\n help=\"Shoud be used if the stations were fixed during trajectory estimation (e.g. with MILIG).\", \\\n action=\"store_true\")\n\n arg_parser.add_argument('-m', '--milig', \\\n help=\"MILIG input mode, i.e. the trajectory was estimated with fixed stations and reference average position on the trajectory. This replaces calling both options --refavg and --statfixed.\", \\\n action=\"store_true\")\n\n\n # Parse the command line arguments\n cml_args = arg_parser.parse_args()\n\n ############################\n\n\n # Load the pickle file, if given\n if cml_args.pickle_file is not None:\n traj = loadPickle(*os.path.split(cml_args.pickle_file))\n\n else:\n traj = None\n\n\n\n parameter_missing_message = \"To compute the orbit without the existing trajectory file, {:s} must also be provided!\"\n\n if cml_args.ra is not None:\n ra = np.radians(cml_args.ra)\n elif traj is not None:\n ra = traj.orbit.ra\n else:\n print(parameter_missing_message.format('RA'))\n sys.exit()\n\n if cml_args.dec is not None:\n dec = np.radians(cml_args.dec)\n elif traj is not None:\n dec = traj.orbit.dec\n else:\n print(parameter_missing_message.format('Dec'))\n sys.exit()\n\n if cml_args.vinit is not None:\n v_init = 1000*cml_args.vinit\n elif traj is not None:\n v_init = traj.orbit.v_init\n else:\n print(parameter_missing_message.format('initial velocity'))\n sys.exit()\n\n if cml_args.vavg is not None:\n v_avg = 1000*cml_args.vavg\n elif traj is not None:\n v_avg = traj.orbit.v_avg\n elif v_init is not None:\n v_avg = v_init\n else:\n print(parameter_missing_message.format('average velocity'))\n sys.exit()\n\n if cml_args.time is not None:\n dt_ref = datetime.datetime.strptime(cml_args.time, \"%Y%m%d-%H%M%S.%f\")\n jd_ref = datetime2JD(dt_ref)\n elif traj is not None:\n jd_ref = traj.orbit.jd_ref\n else:\n print(parameter_missing_message.format('reference time'))\n sys.exit()\n\n\n # Parse reference location\n if (cml_args.lat is None) and (cml_args.lon is None) and (cml_args.ele is None):\n\n # Reuse the ECI coordinates from the given trajectory file\n if traj is not None:\n eci_ref = traj.state_vect_mini\n\n else:\n print(parameter_missing_message.format('lat, lon, ht'))\n sys.exit()\n\n\n else:\n\n # Parse individual location parameters\n if cml_args.lat is not None:\n lat_ref = np.radians(cml_args.lat)\n elif traj is not None:\n lat_ref = traj.orbit.lat_ref\n else:\n print(parameter_missing_message.format('latitude'))\n sys.exit()\n\n if cml_args.lon is not None:\n lon_ref = np.radians(cml_args.lon)\n elif traj is not None:\n lon_ref = traj.orbit.lon_ref\n else:\n print(parameter_missing_message.format('longitude'))\n sys.exit()\n\n if cml_args.ele is not None:\n ht_ref = 1000*cml_args.ele\n elif traj is not None:\n ht_ref = traj.orbit.ht_ref\n else:\n print(parameter_missing_message.format('height'))\n sys.exit()\n\n\n # Compute the ECI coordinates of the reference point on the trajectory\n eci_ref = geo2Cartesian(lat_ref, lon_ref, ht_ref, jd_ref)\n\n\n\n # Presess to epoch of date if given in J2000\n if cml_args.j2000:\n ra, dec = equatorialCoordPrecession(J2000_JD.days, jd_ref, ra, dec)\n\n # Compute the radiant vector in ECI coordinates\n radiant_eci = np.array(raDec2ECI(ra, dec))\n\n\n # Set the right flags\n reference_init = (not cml_args.refavg) and (not cml_args.milig)\n rotation_correction = cml_args.vrotcorr or cml_args.milig #or cml_args.statfixed\n stations_fixed = cml_args.statfixed or cml_args.milig\n\n\n # # Test values\n # radiant_eci = np.array(raDec2ECI(np.radians(265.16047), np.radians(-18.84373)))\n # v_init = 16424.81\n # v_avg = 15768.71\n # eci_ref = np.array([3757410.98, -2762153.20, 4463901.73])\n # jd_ref = 2457955.794670294970\n\n # Compute the orbit\n orb = calcOrbit(radiant_eci, v_init, v_avg, eci_ref, jd_ref, reference_init=reference_init, \\\n rotation_correction=rotation_correction, stations_fixed=stations_fixed)\n\n # Print the results\n print('Ref JD:', jd_ref)\n print('ECI ref:', *eci_ref)\n\n print(orb)\n\n"
] |
[
[
"numpy.array",
"numpy.sin",
"numpy.dot",
"numpy.isnan",
"numpy.zeros",
"numpy.arcsin",
"numpy.tan",
"numpy.degrees",
"numpy.radians",
"numpy.arctan2",
"numpy.sqrt",
"numpy.cos",
"numpy.abs",
"numpy.cross"
]
] |
vervacity/ggr-project
|
[
"7a9155c5fb573f7b877c63390f8052fcda5b6f6e"
] |
[
"ggr/analyses/filtering.py"
] |
[
"\"\"\"Contains functions for filtering\n\"\"\"\n\nimport math\nimport pandas as pd\n\n\ndef remove_media_timepoints(in_mat_file, args, out_mat_file):\n \"\"\"Takes an input matrix and removes specific timepoints\n \"\"\"\n media_timepoints = args.misc[\"media_timepoints\"]\n data = pd.read_table(in_mat_file)\n keep_columns = [column for column in data.columns\n if column.split('_')[0] not in media_timepoints]\n data_filt = data.filter(items=keep_columns)\n data_filt.to_csv(out_mat_file, sep='\\t', compression=\"gzip\")\n\n return None\n\n\ndef filter_for_ids(mat_file, keep_ids_file, gz_out_file, opposite=False, counts=False):\n \"\"\"Given a list of ids, filter the matrix file to keep\n only those ids. First column must be ids.\n \"\"\"\n data_df = pd.read_csv(mat_file, sep='\\t', header=0, index_col=0)\n keep_ids = pd.read_csv(keep_ids_file, header=None)\n if opposite:\n \tkeep_data = data_df.loc[~data_df.index.isin(keep_ids[0])]\n else:\n \tkeep_data = data_df.loc[data_df.index.isin(keep_ids[0])]\n if counts:\n keep_data = keep_data.applymap(int)\n keep_data.to_csv(gz_out_file, sep='\\t', compression='gzip')\n\n return None\n\n\ndef remove_mat_columns(mat_file, columns, out_mat_file, remove_zero_rows=True):\n \"\"\"Given a mat file and columns, \n remove these columns from the matrix and return\n \"\"\"\n assert out_mat_file.endswith(\".gz\")\n data = pd.read_table(mat_file, header=0, index_col=0)\n data = data.drop(labels=columns, axis=1)\n\n # also remove rows that are now zero because of dropped columns\n if remove_zero_rows:\n data = data.loc[~(data==0).all(axis=1)]\n\n data.to_csv(out_mat_file, compression=\"gzip\", sep=\"\\t\")\n\n return None\n\n\ndef get_ordered_subsample(in_file, out_file, out_nrow=2000):\n \"\"\"Given an input text file, grab an ordered sample\n \"\"\"\n num_lines = 0\n with open(in_file, \"r\") as fp:\n for line in fp:\n num_lines += 1\n\n skip = math.ceil(float(num_lines) / out_nrow)\n\n num_lines = 0\n with open(out_file, \"w\") as out:\n with open(in_file, \"r\") as fp:\n for line in fp:\n if num_lines % skip == 0:\n out.write(line)\n num_lines += 1\n \n return None\n\n\ndef sort_by_clusters(\n cluster_files,\n out_clusters_file,\n out_list_file):\n \"\"\"Given (cluster_file, cluster_column) in order,\n bring together and sort according to order\n \"\"\"\n # pull first file as initial\n cluster_file, cluster_cols = cluster_files[0]\n data = pd.read_table(cluster_file)\n sort_columns = cluster_cols\n #data = data[[\"id\", cluster_col]]\n \n # read in the rest\n for cluster_file, cluster_cols in cluster_files[1:]:\n cluster_data = pd.read_table(cluster_file)\n #cluster_data = cluster_data[[\"id\", cluster_col]]\n data = data.merge(cluster_data, on=\"id\")\n sort_columns += cluster_cols\n \n # sort and save out. shuffle first to spread more evenly\n data = data.sample(frac=1.0, random_state=42)\n data_sorted = data.sort_values(sort_columns, ascending=True)\n data_sorted.to_csv(out_clusters_file, sep=\"\\t\", index=False)\n data_sorted.to_csv(out_list_file, columns=[\"id\"], compression=\"gzip\", sep=\"\\t\",\n index=False, header=False)\n\n return None\n"
] |
[
[
"pandas.read_table",
"pandas.read_csv"
]
] |
graphnj/mmdetection
|
[
"a53cc3766cf2bf54a28392212d07cff4486f6bb3"
] |
[
"mmdet/core/export/pytorch2onnx.py"
] |
[
"from functools import partial\n\nimport mmcv\nimport numpy as np\nimport torch\nfrom mmcv.runner import load_checkpoint\n\n\ndef generate_inputs_and_wrap_model(config_path, checkpoint_path, input_config):\n \"\"\"Prepare sample input and wrap model for ONNX export.\n\n The ONNX export API only accept args, and all inputs should be\n torch.Tensor or corresponding types (such as tuple of tensor).\n So we should call this function before exporting. This function will:\n\n 1. generate corresponding inputs which are used to execute the model.\n 2. Wrap the model's forward function.\n\n For example, the MMDet models' forward function has a parameter\n ``return_loss:bool``. As we want to set it as False while export API\n supports neither bool type or kwargs. So we have to replace the forward\n like: ``model.forward = partial(model.forward, return_loss=False)``\n\n Args:\n config_path (str): the OpenMMLab config for the model we want to\n export to ONNX\n checkpoint_path (str): Path to the corresponding checkpoint\n input_config (dict): the exactly data in this dict depends on the\n framework. For MMSeg, we can just declare the input shape,\n and generate the dummy data accordingly. However, for MMDet,\n we may pass the real img path, or the NMS will return None\n as there is no legal bbox.\n\n Returns:\n tuple: (model, tensor_data) wrapped model which can be called by \\\n model(*tensor_data) and a list of inputs which are used to execute \\\n the model while exporting.\n \"\"\"\n\n model = build_model_from_cfg(config_path, checkpoint_path)\n one_img, one_meta = preprocess_example_input(input_config)\n tensor_data = [one_img]\n model.forward = partial(\n model.forward, img_metas=[[one_meta]], return_loss=False)\n\n # pytorch has some bug in pytorch1.3, we have to fix it\n # by replacing these existing op\n opset_version = 11\n # put the import within the function thus it will not cause import error\n # when not using this function\n try:\n from mmcv.onnx.symbolic import register_extra_symbolics\n except ModuleNotFoundError:\n raise NotImplementedError('please update mmcv to version>=v1.0.4')\n register_extra_symbolics(opset_version)\n\n return model, tensor_data\n\n\ndef build_model_from_cfg(config_path, checkpoint_path):\n \"\"\"Build a model from config and load the given checkpoint.\n\n Args:\n config_path (str): the OpenMMLab config for the model we want to\n export to ONNX\n checkpoint_path (str): Path to the corresponding checkpoint\n\n Returns:\n torch.nn.Module: the built model\n \"\"\"\n from mmdet.models import build_detector\n\n cfg = mmcv.Config.fromfile(config_path)\n # import modules from string list.\n if cfg.get('custom_imports', None):\n from mmcv.utils import import_modules_from_strings\n import_modules_from_strings(**cfg['custom_imports'])\n cfg.model.pretrained = None\n cfg.data.test.test_mode = True\n\n # build the model\n model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)\n load_checkpoint(model, checkpoint_path, map_location='cpu')\n model.cpu().eval()\n return model\n\n\ndef preprocess_example_input(input_config):\n \"\"\"Prepare an example input image for ``generate_inputs_and_wrap_model``.\n\n Args:\n input_config (dict): customized config describing the example input.\n\n Returns:\n tuple: (one_img, one_meta), tensor of the example input image and \\\n meta information for the example input image.\n\n Examples:\n >>> from mmdet.core.export import preprocess_example_input\n >>> input_config = {\n >>> 'input_shape': (1,3,224,224),\n >>> 'input_path': 'demo/demo.jpg',\n >>> 'normalize_cfg': {\n >>> 'mean': (123.675, 116.28, 103.53),\n >>> 'std': (58.395, 57.12, 57.375)\n >>> }\n >>> }\n >>> one_img, one_meta = preprocess_example_input(input_config)\n >>> print(one_img.shape)\n torch.Size([1, 3, 224, 224])\n >>> print(one_meta)\n {'img_shape': (224, 224, 3),\n 'ori_shape': (224, 224, 3),\n 'pad_shape': (224, 224, 3),\n 'filename': '<demo>.png',\n 'scale_factor': 1.0,\n 'flip': False}\n \"\"\"\n input_path = input_config['input_path']\n input_shape = input_config['input_shape']\n one_img = mmcv.imread(input_path)\n if 'normalize_cfg' in input_config.keys():\n normalize_cfg = input_config['normalize_cfg']\n mean = np.array(normalize_cfg['mean'], dtype=np.float32)\n std = np.array(normalize_cfg['std'], dtype=np.float32)\n one_img = mmcv.imnormalize(one_img, mean, std)\n one_img = mmcv.imresize(one_img, input_shape[2:][::-1]).transpose(2, 0, 1)\n one_img = torch.from_numpy(one_img).unsqueeze(0).float().requires_grad_(\n True)\n (_, C, H, W) = input_shape\n one_meta = {\n 'img_shape': (H, W, C),\n 'ori_shape': (H, W, C),\n 'pad_shape': (H, W, C),\n 'filename': '<demo>.png',\n 'scale_factor': 1.0,\n 'flip': False\n }\n\n return one_img, one_meta\n"
] |
[
[
"numpy.array",
"torch.from_numpy"
]
] |
modi712/Computer-Vision
|
[
"a34d3d73f883beae812c50b879f4dc8ef679b3ac"
] |
[
"src/projectSift.py"
] |
[
"# Single projection original code\n\nimport argparse\nimport cv2\nimport numpy as np\nimport math\nimport os\nfrom objloader_simple import *\n\n# PARAMETERS\nTHRESHOLD = 10\t# min number of matches to be recognized\n# 105 for mark4, 65 - mark2\n# for sift: 10 for mark4\nSIZE = 3\t\t# size for the display obj\n# 3 for rat,fox, 1 for wolf, 100 for Rixa\nranthresh = 5.0\t#5.0\n#SIFT\nsig = 2\nloweRatio = 0.55\t# 0.55 criteria for selectionf of features\nbestMatchNumber = 2\t#no of matches for points 2 for lowe ratio test\n#PATHS\nref ='reference/mark2.jpg'\nmod ='models/rat.obj'\n\n#This functions loads the target surface image,\ndef main():\n\n homo = None\n l= None\n\n# camera_parameters = np.array([[800, 0, 320], [0, 800, 240], [0, 0, 1]])\n camera_parameters = np.array([[824.05762458, 0, 381.10745975],[0, 839.01299642, 134.22842609],[0, 0, 1]])\n # create ORB/SIFT keypoint detector\n# orb = cv2.ORB_create()\n sift = cv2.xfeatures2d.SIFT_create(sigma=sig)#<>sigma\n \n # create BFMatcher object based on hamming distance\n bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n # load the reference surface that will be searched in the video stream\n dir_name = os.getcwd()\n model = cv2.imread(os.path.join(dir_name, ref), 0)\n # Compute model keypoints and its descriptors\n# kp_model, des_model = orb.detectAndCompute(model, None)\n kp_model,des_model = sift.detectAndCompute(model,None)\n kp_modelKP = kp_model\n kp_model = np.float32([k.pt for k in kp_model])\n # Load 3D model from OBJ file\n obj = OBJ(os.path.join(dir_name, mod ), swapyz=True)\n # init video capture\n cap = cv2.VideoCapture(0)\n\n while True:\n # read the current frame\n ret, frame = cap.read()\n if not ret:\n print (\"Unable to capture video\")\n return\n # find and draw the keypoints of the frame\n #orb\n# kp_frame, des_frame = orb.detectAndCompute(frame, None)\n #sift\n kp_frame,des_frame = sift.detectAndCompute(frame,None)\n kp_frameKP = kp_frame\n kp_frame = np.float32([k.pt for k in kp_frame])\n # match frame descriptors with model descriptors\n try:\n# \tmatches = bf.match(des_model, des_frame)\n \tmatches = matcher(kp_model,kp_frame,des_model,des_frame)\n except:\n \tprint(\"Too Dark\")\n \tcap.release()\n \treturn 0\n\n # sort them in the order of their distance\n # the lower the distance, the better the match\n# matches = sorted(matches, key=lambda x: x.distance)\n\n # compute Homography if enough matches are found\n if len(matches) > THRESHOLD:\n # differenciate between source points and destination points\n print( \"Enough matches found - %d/%d\" % (len(matches), THRESHOLD) )\n #orb\n# src_pts = np.float32([kp_model[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)\n# dst_pts = np.float32([kp_frame[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)\n# sift\n src_pts = np.float32([kp_model[i] for (_, i) in matches])\n dst_pts = np.float32([kp_frame[i] for (i, _) in matches])\n # compute Homography\n homo, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, ranthresh)\n \n # Draw a rectangle that marks the found model in the frame\n if args.rectangle:\n h, w = model.shape\n pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)\n # project corners into frame\n dst = cv2.perspectiveTransform(pts, homo)\n # connect them with lines\n frame = cv2.polylines(frame, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)\n \n # if a valid homography matrix was found render cube on model plane\n if (homo is not None )and (not args.model):\n try:\n # obtain 3D projection matrix from homography matrix and camera parameters\n (projection,l) = projection_matrix(camera_parameters, homo)\n # project cube or model\n frame = render(frame, obj, projection, model, False)\n #frame = render(frame, model, projection)\n except:\n pass\n \n # print pose of camera\n if args.pose:\n \tprint('Pose of camera')\n \tprint(l) \n # draw first 10 matches.\n# if args.matches:\n# frame = cv2.drawMatches(model, kp_model, frame, kp_frame, matches[:10], 0, flags=2)\n# frame = cv2.drawMatches(model, kp_modelKP, frame, kp_frameKP, matches[:10], 0, flags=2)\n\n # show result\n cv2.imshow('frame', frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n else:\n print( \"Not enough matches found - %d/%d\" % (len(matches), THRESHOLD) )\n # draw first 10 matches.\n# if args.matches:\n# frame = cv2.drawMatches(model, kp_model, frame, kp_frame, matches[:10], 0, flags=2)\n# frame = cv2.drawMatches(model, kp_modelKP, frame, kp_frameKP, matches[:10], 0, flags=2)\n # show result\n cv2.imshow('frame', frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n cap.release()\n cv2.destroyAllWindows()\n return 0\n#---END Main---#\n\ndef matcher(kp1,kp2,features1,features2):\n\tmatcher = cv2.DescriptorMatcher_create(\"BruteForce\")\n\trawMatches = matcher.knnMatch(features1, features2, bestMatchNumber)\n\t# keeping only good matches wrt to lowe ratio ## check\n\tmatches=[]\n\tfor m,n in rawMatches:\n\t\tif m.distance < n.distance*loweRatio:\n\t\t\tmatches.append((m.trainIdx,n.queryIdx))\n\treturn matches\n\n#Render a loaded obj model into the current video frame\ndef render(img, obj, projection, model, color=False):\n\n vertices = obj.vertices\n scale_matrix = np.eye(3) * SIZE\n h, w = model.shape\n\n for face in obj.faces:\n face_vertices = face[0]\n points = np.array([vertices[vertex - 1] for vertex in face_vertices])\n points = np.dot(points, scale_matrix)\n # render model in the middle of the reference surface. To do so,\n # model points must be displaced\n points = np.array([[p[0] + w / 2, p[1] + h / 2, p[2]] for p in points])\n dst = cv2.perspectiveTransform(points.reshape(-1, 1, 3), projection)\n imgpts = np.int32(dst)\n if color is False:\n cv2.fillConvexPoly(img, imgpts, (137, 27, 211))\n else:\n color = hex_to_rgb(face[-1])\n color = color[::-1] # reverse\n cv2.fillConvexPoly(img, imgpts, color)\n\n return img\n\ndef projection_matrix(camera_parameters, homography):\n \"\"\"\n From the camera calibration matrix and the estimated homography\n compute the 3D projection matrix\n \"\"\"\n # Compute rotation along the x and y axis as well as the translation\n homography = homography * (-1)\n rot_and_transl = np.dot(np.linalg.inv(camera_parameters), homography)\n col_1 = rot_and_transl[:, 0]\n col_2 = rot_and_transl[:, 1]\n col_3 = rot_and_transl[:, 2]\n # normalise vectors\n l = math.sqrt(np.linalg.norm(col_1, 2) * np.linalg.norm(col_2, 2))\n rot_1 = col_1 / l\n rot_2 = col_2 / l\n translation = col_3 / l\n # compute the orthonormal basis\n c = rot_1 + rot_2\n p = np.cross(rot_1, rot_2)\n d = np.cross(c, p)\n rot_1 = np.dot(c / np.linalg.norm(c, 2) + d / np.linalg.norm(d, 2), 1 / math.sqrt(2))\n rot_2 = np.dot(c / np.linalg.norm(c, 2) - d / np.linalg.norm(d, 2), 1 / math.sqrt(2))\n rot_3 = np.cross(rot_1, rot_2)\n # finally, compute the 3D projection matrix from the model to the current frame\n projection = np.stack((rot_1, rot_2, rot_3, translation)).T\n return (np.dot(camera_parameters, projection),projection)\n#---projection END---#\n\n#Helper function to convert hex strings to RGB\ndef hex_to_rgb(hex_color):\n \n hex_color = hex_color.lstrip('#')\n h_len = len(hex_color)\n return tuple(int(hex_color[i:i + h_len // 3], 16) for i in range(0, h_len, h_len // 3))\n\n\n# Command line argument parsing\nparser = argparse.ArgumentParser(description='Augmented reality application')\n\nparser.add_argument('-mo','--model', help = 'do not draw model on target surface on frame', action = 'store_true')\nparser.add_argument('-r','--rectangle', help = 'draw rectangle delimiting target surface on frame', action = 'store_true')\n#parser.add_argument('-ma','--matches', help = 'draw matches between keypoints', action = 'store_true')\nparser.add_argument('-po','--pose', help = 'print camera pose for each frame', action = 'store_true')\n\nargs = parser.parse_args()\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"numpy.array",
"numpy.dot",
"numpy.linalg.norm",
"numpy.linalg.inv",
"numpy.eye",
"numpy.float32",
"numpy.stack",
"numpy.int32",
"numpy.cross"
]
] |
soar-telescope/sami
|
[
"8a9e2b28e3e7d753d05220abd0bac6912fa36ad1"
] |
[
"soar_simager/data_reduction/reduce.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf8 -*-\n\"\"\"\n SAMI XJoin\n\n This script simply joins the four existing extensions inside a FITS file\n created during observations with SAMI (SAM Imager). During the reduce,\n it also fits a 2nd degree polynomium to the OVERSCAN region that is\n subtracted from the corresponding image.\n\n The user also may want to add flags in order to reduce the images\n according to the following options (in order):\n\n - BIAS subtraction;\n - DARK subtraction;\n - Remove hot pixels and cosmic rays;\n - Remove overglow using a long exposure DARK image;\n - Divide by the FLAT;\n - Divide by the exposure time;\n\n The documentation for each reduce is shown in the corresponding function.\n\n Todo\n ----\n - Use multithread or multiprocessing to run this script faster.\n - Use astropy.ccdproc to reduce the data.\n\n Bruno Quint (bquint at ctio.noao.edu)\n May 2016\n\n Thanks to Andrei Tokovinin and Claudia M. de Oliveira for the ideas that\n were implemented here.\n\"\"\"\n\nimport numpy as _np\n\nfrom ccdproc import cosmicray_lacosmic as _cosmicray_lacosmic\nfrom scipy import stats\n\nfrom astropy import wcs\nfrom astropy.coordinates import SkyCoord\nfrom astropy import units as u\n\nfrom soar_simager.io import pyfits as _pyfits\nfrom soar_simager.io.logging import get_logger\nfrom soar_simager.tools import slices\n\nlogger = get_logger(__name__)\n\n\n# Piece of code from cosmics.py\n# We define the laplacian kernel to be used\n_laplkernel = _np.array([[0.0, -1.0, 0.0], [-1.0, 4.0, -1.0], [0.0, -1.0, 0.0]])\n\n# Other kernels :\n_growkernel = _np.ones((3, 3))\n\n# dilation structure for some morphological operations\n_dilstruct = _np.ones((5, 5))\n_dilstruct[0, 0] = 0\n_dilstruct[0, 4] = 0\n_dilstruct[4, 0] = 0\n_dilstruct[4, 4] = 0\n\n\nclass Reducer:\n \"\"\"\n This class holds all the methods used to join the extensions within a\n FITS file obtained with SAMI.\n\n Parameters\n ----------\n zero_file : str\n The filename of the master zero that will be used in subtraction.\n\n clean : bool\n Clean bad collumns by taking the _median value of the pixels around\n them.\n\n cosmic_rays : bool\n Clean cosmic rays using LACosmic package. See noted bellow for\n reference.\n\n dark_file : str\n Master Dark's filename to be used for dark subtraction.\n\n debug : bool\n Turn on debug mode with lots of printing.\n\n flat_file : str\n Master Flat filename to be used for normalization.\n\n glow_file : str\n Master file that contains the lateral glowings sometimes present in\n SAMI's data.\n\n time : bool\n Divide each pixel's values by the exposure time and update header.\n\n verbose : bool\n Turn on verbose mode (not so talktive as debug mode).\n\n Attributes\n ----------\n gain : list\n A list containing the gain that converts ADU values to eletrons for \n each simager amplifier.\n \n read_noise : list\n A list containing the read noise on each simager amplifier.\n\n See also\n --------\n LACosmic - http://www.astro.yale.edu/dokkum/lacosmic/\n \"\"\"\n\n gain = [2.6, 2.6, 2.6, 2.6]\n read_noise = [10., 10., 10., 10.]\n\n def __init__(self, clean=False, cosmic_rays=False, dark_file=None,\n debug=False, flat_file=None, glow_file=None, merge=False,\n overscan=False, norm_flat=False, time=False, verbose=False,\n zero_file=None):\n\n logger.setLevel(\"ERROR\")\n\n if verbose:\n logger.setLevel(\"INFO\")\n\n if debug:\n logger.setLevel(\"DEBUG\")\n\n self.clean = clean\n self.cosmic_rays = cosmic_rays\n self.dark_file = dark_file\n self.flat_file = flat_file\n self.glow_file = glow_file\n self._merge = merge\n self.norm_flat = norm_flat\n self.overscan = overscan\n self.time = time\n self.zero_file = zero_file\n\n return\n\n def reduce(self, hdu_list, prefix=\"\"):\n\n # If the number of extensions is just 1, then the file is already\n # processed.\n if len(hdu_list) == 1:\n return hdu_list, ''\n\n # Merge file\n data, header, prefix = self.merge(hdu_list)\n\n # Correct ZERO\n data, header, prefix = self.correct_zero(\n data, header, prefix, self.zero_file\n )\n\n # Correct DARK\n data, header, prefix = self.correct_dark(\n data, header, prefix, self.dark_file\n )\n\n # Remove cosmic rays and hot pixels\n data, header, prefix = self.remove_cosmic_rays(\n data, header, prefix, self.cosmic_rays\n )\n\n # Remove lateral glows\n data, header, prefix = self.correct_lateral_glow(\n data, header, prefix, self.glow_file\n )\n\n # Correct FLAT\n data, header, prefix = self.correct_flat(\n data, header, prefix, self.flat_file\n )\n\n # Normalize by the EXPOSURE TIME\n data, header, prefix = self.divide_by_exposuretime(\n data, header, prefix, self.time\n )\n\n # Clean known bad columns and lines\n data, header, prefix = self.clean_hot_columns_and_lines(\n data, header, prefix, self.clean\n )\n\n # Add WCS\n data, header = self.create_wcs(\n data, header\n )\n\n return data, header, prefix\n\n @staticmethod\n def create_wcs(data, header):\n \"\"\"\n Creates a first guess of the WCS using the telescope coordinates, the\n CCDSUM (binning), position angle and plate scale.\n\n Parameters\n ----------\n data : numpy.ndarray\n 2D array with the data.\n\n header : astropy.io.fits.Header\n Primary Header to be updated.\n\n Returns\n -------\n header : astropy.io.fits.Header\n Primary Header with updated WCS information.\n \"\"\"\n h = header\n\n if 'EQUINOX' not in h:\n h['EQUINOX'] = 2000.\n\n if 'EPOCH' not in h:\n h['EPOCH'] = 2000.\n\n if h['PIXSCAL1'] != h['PIXSCAL2']:\n logger.warning('Pixel scales for X and Y do not mach.')\n\n if h['OBSTYPE'] != 'OBJECT':\n return data, header\n\n binning = _np.array([int(b) for b in h['CCDSUM'].split(' ')])\n plate_scale = h['PIXSCAL1'] * u.arcsec\n p = plate_scale.to('degree').value\n w = wcs.WCS(naxis=2)\n\n try:\n coordinates = SkyCoord(ra=h['RA'], dec=h['DEC'],\n unit=(u.hourangle, u.deg))\n\n except ValueError:\n\n logger.error(\n '\"RA\" and \"DEC\" missing. Using \"TELRA\" and \"TELDEC\" instead.')\n\n coordinates = SkyCoord(ra=h['TELRA'], dec=h['TELDEC'],\n unit=(u.hourangle, u.deg))\n\n ra = coordinates.ra.to('degree').value\n dec = coordinates.dec.to('degree').value\n\n w.wcs.crpix = [data.shape[1] / 2, data.shape[0] / 2]\n w.wcs.cdelt = p * binning\n w.wcs.crval = [ra, dec]\n w.wcs.ctype = [\"RA---TAN\", \"DEC--TAN\"]\n\n wcs_header = w.to_header()\n\n theta = _np.deg2rad(h['DECPANGL'])\n wcs_header['cd1_1'] = p * binning[0] * _np.cos(theta)\n wcs_header['cd2_2'] = p * binning[0] * _np.cos(theta)\n wcs_header['cd1_2'] = p * binning[0] * _np.sin(theta)\n wcs_header['cd2_1'] = - p * binning[0] * _np.sin(theta)\n\n for key in wcs_header.keys():\n header[key] = wcs_header[key]\n\n return data, header\n\n @staticmethod\n def check_header(hdu_list, prefix):\n\n for i in range(5):\n\n h = hdu_list[i].header\n\n try:\n h['RADESYSa'] = h['RADECSYS']\n del h['RADECSYS']\n except KeyError:\n pass\n\n if 'EQUINOX' in h and 'unavail' in h['EQUINOX']:\n h['EQUINOX'] = 2000.\n\n if 'EPOCH' not in h:\n h['EPOCH'] = 2000.\n\n return hdu_list, prefix\n\n @staticmethod\n def clean_column(_data, x0, y0, yf, n=5):\n \"\"\"\n Substitutes a single column by the _median of the neighbours columns.\n\n Args:\n\n _data (numpy.ndarray) : A 2D numpy array that contains the data.\n\n x0 (int) : X position of the pixel to be cleaned.\n\n y0 (int) : Start position of the column.\n\n yf (int) : Final position of the column.\n\n n (int, optional) : Number of neighbour columns (Default=5).\n\n Returns:\n\n _data (numpy.ndarray) : Processed 2D numpy array.\n\n See also:\n\n Reducer.clean_columns\n Reducer.clean_line\n Reducer.clean_lines\n \"\"\"\n\n if not isinstance(_data, _np.ndarray):\n raise (TypeError, 'Please, use a np.array as input')\n\n if _data.ndim is not 2:\n raise (TypeError, 'Data contains %d dimensions while it was '\n 'expected 2 dimensions.')\n\n t1 = _data[y0:yf, x0 - n:x0]\n t2 = _data[y0:yf, x0 + 1:x0 + n]\n t = _np.hstack((t1, t2))\n _data[y0:yf, x0] = _np.median(t, axis=1)\n\n return _data\n\n def clean_columns(self, data, header):\n \"\"\"\n Clean the known bad columns that exists in most of SAMI's, SOI's or\n SIFS's data. This method is meant to be overwritten via inheritance.\n\n Args:\n\n data (numpy.ndarray) : A 2D numpy array that contains the data.\n\n header (astropy.io.fits.Header) : A header that will be updated.\n\n See also:\n\n Reducer.clean_column\n Reducer.clean_line\n Reducer.clean_lines\n \"\"\"\n return data, header\n\n @staticmethod\n def clean_line(_data, x0, xf, y, n=5):\n \"\"\"\n Substitutes a single column by the _median of the neighbours columns.\n\n Args:\n\n _data (numpy.ndarray) : A 2D numpy array that contains the data.\n\n x0 (int) : Start position of the line.\n\n xf (int) : Final position of the line.\n\n y (int) : Y position of the pixel to be cleaned.\n\n n (int) : Number of neighbour columns. (Default=5)\n\n See also:\n\n Reducer.clean_column\n Reducer.clean_columns\n Reducer.clean_lines\n \"\"\"\n if not isinstance(_data, _np.ndarray):\n raise (TypeError, 'Please, use a np.array as input')\n\n if _data.ndim is not 2:\n raise (TypeError, 'Data contains %d dimensions while it was '\n 'expected 2 dimensions.')\n\n t1 = _data[y - n:y, x0:xf]\n t2 = _data[y + 1:y + n, x0:xf]\n t = _np.vstack((t1, t2))\n _data[y, x0:xf] = _np.median(t, axis=0)\n\n return _data\n\n def clean_lines(self, data, header):\n \"\"\"\n Clean the known bad lines that exists in most of SAMI's, SOI's or\n SIFS's data. This method is meant to be overwritten via inheritance.\n\n Args:\n\n data (numpy.ndarray) : A 2D numpy array that contains the data.\n\n header (astropy.io.fits.Header) : A header that will be updated.\n\n See also:\n\n Reducer.clean_column\n Reducer.clean_line\n Reducer.clean_lines\n \"\"\"\n return data, header\n\n def clean_hot_columns_and_lines(self, data, header, prefix, clean):\n \"\"\"\n Clean known hot columns and lines from SAMI's images.\n\n Args:\n\n data (numpy.ndarray) : A 2D numpy array that contains the data.\n\n header (astropy.io.fits.Header) : A header that will be updated.\n\n prefix (str) : File prefix that is added after each reduce.\n\n clean (bool) : Should I perform the clean?\n\n See also:\n\n Reducer.clean_column\n Reducer.clean_columns\n Reducer.clean_line\n Reducer.clean_lines\n \"\"\"\n if clean is True:\n\n data = self.clean_columns(data, header)\n data = self.clean_lines(data, header)\n header.add_history('Cleaned bad columns and lines.')\n prefix = 'c' + prefix\n\n return data, header, prefix\n\n @staticmethod\n def correct_dark(data, header, prefix, dark_file=None):\n \"\"\"\n Subtract the dark file from data and add HISTORY to header.\n\n Args:\n\n data (numpy.ndarray) : A 2D numpy array that contains the data.\n\n header (astropy.io.fits.Header) : A header that will be updated.\n\n prefix : str\n File prefix that is added after each reduce.\n\n dark_file: str | None\n Master Dark filename. If None is given, nothing is done.\n \"\"\"\n\n if not isinstance(prefix, str):\n raise (TypeError, 'Expected string but found %s instead.' %\n prefix.__class__)\n\n if dark_file is not None:\n\n dark = _pyfits.open(dark_file)[0]\n dark.data = dark.data / float(dark.header['EXPTIME'])\n\n data = data - dark.data * header['EXPTIME']\n header['DARKFILE'] = dark_file\n prefix = 'd' + prefix\n\n return data, header, prefix\n\n @staticmethod\n def correct_flat(data, header, prefix, flat_file):\n \"\"\"\n Divide the image by the master flat file and add HISTORY to header.\n\n Args:\n\n data (numpy.ndarray) : A 2D numpy array that contains the data.\n\n header (astropy.io.fits.Header) : A header that will be updated.\n\n prefix (str) : File prefix that is added after each reduce.\n\n flat_file (str or None) : Master flat filename. If None is given,\n nothing is done.\n \"\"\"\n if not isinstance(prefix, str):\n raise (TypeError, 'Expected string but found %s instead.' %\n prefix.__class__)\n\n if flat_file is not None:\n flat = _pyfits.open(flat_file)[0]\n\n data /= flat.data\n header['FLATFILE'] = flat_file\n prefix = 'f' + prefix\n\n return data, header, prefix\n\n def correct_lateral_glow(self, data, header, prefix, glow_file):\n \"\"\"\n Remove lateral glows by scaling the glows in the `glow_file` based\n on `data` and subtracting it.\n\n Args:\n\n data (numpy.ndarray) : A 2D numpy array that contains the data.\n\n header (astropy.io.fits.Header) : A header that will be updated.\n\n prefix : str\n Filename prefix to flag images that were clean.\n\n glow_file : str\n Path to a long dark file that contains the lateral glow.\n \"\"\"\n\n if glow_file is not None:\n\n # Create four different regions.\n regions = [\n [_np.median(data[539:589, 6:56]), # Top Left\n _np.median(data[539:589, 975:1019])], # Top Right\n [_np.median(data[449:506, 6:56]), # Bottom Left\n _np.median(data[449:506, 975:1019])] # Bottom Right\n ]\n\n min_std_region = _np.argmin(regions) % 2\n\n # The upper reg has background lower or equal to the lower reg\n midpt1 = regions[0][min_std_region]\n midpt2 = regions[1][min_std_region]\n diff = midpt2 - midpt1\n\n dark = _pyfits.getdata(glow_file)\n dark = self.clean_columns(dark)\n dark = self.clean_lines(dark)\n\n dark_regions = [\n [_np.median(dark[539:589, 6:56]), # Top Left\n _np.median(dark[539:589, 975:1019])], # Top Right\n [_np.median(dark[449:506, 6:56]), # Bottom Left\n _np.median(dark[449:506, 975:1019])] # Bottom Right\n ]\n\n dark_midpt1 = dark_regions[0][min_std_region]\n dark_midpt2 = dark_regions[1][min_std_region]\n\n dark_diff = dark_midpt2 - dark_midpt1\n dark -= dark_midpt1\n\n k = diff / dark_diff\n temp_dark = dark * k\n data -= midpt1\n data -= temp_dark\n\n header.add_history('Lateral glow removed using %s file' % glow_file)\n prefix = 'g' + prefix\n\n return data, header, prefix\n\n @staticmethod\n def correct_zero(data, header, prefix, zero_file):\n \"\"\"\n Subtract zero from data.\n\n Args:\n\n data (numpy.ndarray) : A 2D numpy array that contains the data.\n\n header (astropy.io.fits.Header) : A header that will be updated.\n\n prefix (str) : File prefix that is added after each reduce.\n\n zero_file (str | None) : Master Bias filename. If None is given,\n nothing is done.\n\n \"\"\"\n from os.path import abspath\n\n if zero_file is not None:\n\n zero = _pyfits.open(abspath(zero_file))[0]\n data = data - zero.data\n header['BIASFILE'] = zero_file\n prefix = 'z' + prefix\n\n return data, header, prefix\n\n @staticmethod\n def divide_by_exposuretime(data, header, prefix, time):\n \"\"\"\n Divide the image by the exposure time and add HISTORY to header.\n\n Args:\n\n data (numpy.ndarray) : A 2D numpy array that contains the data.\n\n header (astropy.io.fits.Header) : A header that will be updated.\n\n prefix : str\n File prefix that is added after each reduce.\n\n time: bool\n Divide image by exposure time?\n \"\"\"\n if time is True:\n\n h = header\n\n try:\n\n h['UNITS'] = 'adu / s'\n t = float(h['EXPTIME'])\n d = data / t\n\n header = h\n data = d\n\n except AttributeError:\n header = h\n\n except KeyError:\n pass\n\n prefix = 't' + prefix\n\n return data, header, prefix\n\n def get_header(self, hdu_source):\n \"\"\"\n Return the header of the primary HDU extension of a FITS file.\n\n Args:\n\n hdu_source (str or astropy.io.fits.HDUList) : HDUList or name of the\n file which contains a HDUList.\n \"\"\"\n from os.path import exists\n\n if isinstance(hdu_source, str):\n\n if not exists(hdu_source):\n raise (IOError, '%s file not found.' % hdu_source)\n\n hdu_source = _pyfits.open(hdu_source)\n\n h0 = hdu_source[0].header\n h1 = hdu_source[1].header\n\n h0.append('UNITS')\n h0.set('UNITS', value='ADU', comment='Pixel intensity units.')\n\n # Save the CCD binning in the main header\n h0['CCDSUM'] = h1['CCDSUM']\n h0['DETSEC'] = h1['DETSEC']\n\n # Save the area that corresponds to each amplifier\n bin_size = _np.array(h0['CCDSUM'].split(' '), dtype=int)\n\n dx, dy = slices.iraf2python(h0['DETSEC'])\n dx, dy = dx // bin_size[0], dy // bin_size[1]\n\n h0['AMP_SEC1'] = slices.python2iraf(\n dx[0], dx[1], dy[0], dy[1])\n\n h0['AMP_SEC2'] = slices.python2iraf(\n dx[0] + dx[1], dx[1] + dx[1], dy[0], dy[1])\n\n h0['AMP_SEC3'] = slices.python2iraf(\n dx[0], dx[1], dy[0] + dy[1], dy[1] + dy[1])\n\n h0['AMP_SEC4'] = slices.python2iraf(\n dx[0] + dx[1], dx[1] + dx[1], dy[0] + dy[1], dy[1] + dy[1])\n\n return h0\n\n def get_prefix(self):\n \"\"\"\n Return a prefix to be added to the file deppending on the data\n reduction steps.\n\n Returns\n -------\n prefix : (str)\n The prefix that can be used.\n m = merged amplifiers.\n z = zero subtracted.\n f = flat corrected.\n \"\"\"\n\n prefix = 'm_'\n\n if self.zero_file:\n prefix = 'z' + prefix\n\n if self.dark_file:\n prefix = 'd' + prefix\n\n if self.flat_file:\n prefix = 'f' + prefix\n\n return prefix\n\n def merge(self, hdul):\n \"\"\"\n Open a FITS image and try to join its extensions in a single array.\n\n Args:\n\n hdul (astropy.io.fits.HDUList) : an HDUList that contains one\n PrimaryHDU and four ImageHDU\n\n \"\"\"\n w, h = slices.iraf2python(hdul[1].header['DETSIZE'])\n\n if len(hdul) is 1:\n logger.warning('%s file contains a single extension. ' % hdul +\n 'Not doing anything')\n return hdul[0].data\n\n # Correct for binning\n bin_size = _np.array(hdul[1].header['CCDSUM'].split(' '),\n dtype=int)\n bw, bh = w[1] // bin_size[0], h[1] // bin_size[1]\n\n # Create empty full frame\n new_data = _np.empty((bh, bw), dtype=float)\n\n # Process each extension\n for i in range(1, 5):\n tx, ty = slices.iraf2python(hdul[i].header['TRIMSEC'])\n bx, by = slices.iraf2python(hdul[i].header['BIASSEC'])\n\n data = hdul[i].data\n trim = data[ty[0]:ty[1], tx[0]:tx[1]]\n bias = data[by[0]:by[1], bx[0]:bx[1]]\n\n # Collapse the bias columns to a single column.\n bias = _np.median(bias, axis=1)\n\n # Fit and remove OVERSCAN\n x = _np.arange(bias.size) + 1\n bias_fit_pars = _np.polyfit(x, bias, 2) # Last par = inf\n bias_fit = _np.polyval(bias_fit_pars, x)\n bias_fit = bias_fit.reshape((bias_fit.size, 1))\n bias_fit = _np.repeat(bias_fit, trim.shape[1], axis=1)\n\n trim = trim - bias_fit\n dx, dy = slices.iraf2python(hdul[i].header['DETSEC'])\n dx, dy = dx // bin_size[0], dy // bin_size[1]\n new_data[dy[0]:dy[1], dx[0]:dx[1]] = trim\n\n header = self.get_header(hdul)\n\n return new_data, header, \"m_\"\n\n @staticmethod\n def remove_cosmic_rays(data, header, prefix, cosmic_rays):\n \"\"\"\n Use LACosmic to remove cosmic rays.\n\n Args:\n\n data (numpy.ndarray) : A 2D numpy array that contains the data.\n\n header (astropy.io.fits.Header) : A header that will be updated.\n\n prefix : str\n Filename prefix to flag images that were clean.\n\n cosmic_rays : bool\n Flag to indicate if cosmic rays removal should be performed.\n \"\"\"\n if cosmic_rays:\n\n d = data\n d, _ = _cosmicray_lacosmic(\n d, gain=2.6, readnoise=10.0, sigclip=2.5, sigfrac=0.3,\n objlim=5.0)\n d /= 2.6\n\n h = header\n h.add_history(\n 'Cosmic rays and hot pixels removed using LACosmic')\n\n data = d\n header = h\n\n return data, header, prefix\n\n @staticmethod\n def remove_wcs(header):\n\n return header\n\n\nclass SamiReducer(Reducer):\n\n gain = [2.1, 2.0537, 2.1, 2.0823]\n read_noise = [10., 10., 10., 10.]\n \n def reduce(self, hdu_list, prefix=\"\"):\n\n # If the number of extensions is just 1, then the file is already\n # processed.\n if len(hdu_list) == 1:\n return hdu_list, ''\n\n # Merge file\n data, header, prefix = self.merge(hdu_list)\n\n # Removing bad column and line\n data, header, prefix = self.remove_central_bad_columns(\n data, header, prefix,\n )\n\n # Correct ZERO\n data, header, prefix = self.correct_zero(\n data, header, prefix, self.zero_file\n )\n\n # Correct DARK\n data, header, prefix = self.correct_dark(\n data, header, prefix, self.dark_file\n )\n\n # Remove cosmic rays and hot pixels\n data, header, prefix = self.remove_cosmic_rays(\n data, header, prefix, self.cosmic_rays\n )\n\n # Remove lateral glows\n data, header, prefix = self.correct_lateral_glow(\n data, header, prefix, self.glow_file\n )\n\n # Correct FLAT\n data, header, prefix = self.correct_flat(\n data, header, prefix, self.flat_file\n )\n\n # Normalize by the EXPOSURE TIME\n data, header, prefix = self.divide_by_exposuretime(\n data, header, prefix, self.time\n )\n\n # Clean known bad columns and lines\n data, header, prefix = self.clean_hot_columns_and_lines(\n data, header, prefix, self.clean\n )\n\n # Add WCS\n data, header = self.create_wcs(\n data, header\n )\n\n return data, header, prefix\n\n def clean_columns(self, data, header):\n \"\"\"\n Clean the known bad columns that exists in most of SAMI's, SOI's or\n SIFS's data. This method is meant to be overwritten via inheritance.\n\n Args:\n\n data (numpy.ndarray) : A 2D numpy array that contains the data.\n\n header (astropy.io.fits.Header) : A header that will be updated.\n\n See also:\n\n Reducer.clean_column\n Reducer.clean_line\n Reducer.clean_lines\n \"\"\"\n binning = header['CCDSUM'].split(' ')[0]\n binning = int(binning.strip())\n\n if binning == 4:\n bad_columns = [\n [167, 0, 513],\n [213, 513, 1023],\n [304, 0, 513],\n [309, 1, 512],\n [386, 0, 513],\n [476, 0, 513],\n [602, 0, 513],\n [671, 0, 513],\n [673, 475, 513],\n [678, 0, 513],\n [741, 0, 513],\n [810, 0, 513],\n [919, 0, 513],\n [212, 513, 1023],\n [680, 513, 1023],\n [725, 513, 1023],\n [848, 513, 1023],\n [948, 0, 512],\n [949, 0, 512]\n ]\n else:\n []\n\n for column in bad_columns:\n x0 = column[0]\n y0 = column[1]\n yf = column[2]\n data = self.clean_column(data, x0, y0, yf)\n\n return data\n\n def clean_lines(self, data, header):\n \"\"\"\n Clean the known bad lines that exists in most of SAMI's, SOI's or\n SIFS's data. This method is meant to be overwritten via inheritance.\n\n Args:\n\n data (numpy.ndarray) : A 2D numpy array that contains the data.\n\n header (astropy.io.fits.Header) : A header that will be updated.\n\n See also:\n\n Reducer.clean_column\n Reducer.clean_line\n Reducer.clean_lines\n \"\"\"\n binning = header['CCDSUM'].split(' ')[0]\n binning = int(binning.strip())\n\n if binning == 4:\n bad_lines = [\n [166, 206, 282],\n [212, 258, 689],\n [214, 239, 688],\n [304, 345, 291],\n [386, 422, 454],\n [398, 422, 38],\n [477, 516, 490],\n [387, 429, 455],\n [574, 603, 494],\n [574, 603, 493],\n [640, 672, 388],\n [604, 671, 388],\n [698, 746, 198],\n [706, 634, 634],\n [772, 812, 354],\n [900, 938, 426],\n [904, 920, 396]\n ]\n\n else:\n bad_lines = []\n\n for line in bad_lines:\n x0 = line[0]\n xf = line[1]\n y = line[2]\n data = self.clean_line(data, x0, xf, y)\n\n return data\n\n @staticmethod\n def remove_central_bad_columns(data, header, prefix):\n \"\"\"\n Remove central bad columns at the interface of the four extensions.\n\n Parameter\n ---------\n data : numpy.ndarray\n 2D Array containing the data.\n \"\"\"\n n_rows, n_columns = data.shape\n\n # Copy the central bad columns to a temp array\n temp_column = data[:, n_columns // 2 - 1:n_columns // 2 + 1]\n\n # Shift the whole image by two columns\n data[:, n_columns // 2 - 1:-2] = data[:, n_columns // 2 + 1:]\n\n # Copy the bad array in the end (right) of the image).\n data[:, -2:] = temp_column\n\n return data, header, prefix\n\n\nclass SifsReducer(SamiReducer):\n pass\n\n\nclass SoiReducer(Reducer):\n \"\"\"\n SoiReducer\n\n This class holds all the methods used to join the extensions within a\n FITS file obtained with SOI.\n\n Parameters\n ----------\n zero_file : str\n The filename of the master zero that will be used in subtraction.\n\n clean : bool\n Clean bad collumns by taking the _median value of the pixels around\n them.\n\n cosmic_rays : bool\n Clean cosmic rays using LACosmic package. See noted bellow for\n reference.\n\n dark_file : str\n Master Dark's filename to be used for dark subtraction.\n\n debug : bool\n Turn on debug mode with lots of printing.\n\n flat_file : str\n Master Flat filename to be used for normalization.\n\n glow_file : str\n Master file that contains the lateral glowings sometimes present in\n SAMI's data.\n\n time : bool\n Divide each pixel's values by the exposure time and update header.\n\n verbose : bool\n Turn on verbose mode (not so talktive as debug mode).\n\n See also\n --------\n LACosmic - http://www.astro.yale.edu/dokkum/lacosmic/\n \"\"\"\n\n @staticmethod\n def add_gap(data, header, interpolation_factor=10):\n \"\"\"\n SOI has two detectors which are separated by 7.8 arcsec (or 102\n unbinned pixels). This method reads an merged array and adds the gap\n based on the detector's binning.\n\n Parameters\n ----------\n data : numpy.ndarray\n 2D array with the data merged.\n\n header : astropy.io.fits.Header\n a header that contains the binning information on the 'CCDSUM'\n key.\n \"\"\"\n if header['OBSTYPE'] == 'OBJECT':\n\n binning = header['CCDSUM']\n binning = int(binning.split()[0])\n\n gap_size = 7.8 # arcseconds\n pixel_scale = 0.0767 # arcsecond / pixel\n gap_pixel = int(round(gap_size / pixel_scale / binning, 0))\n\n nrow, ncol = data.shape\n\n data = _np.append(data, _np.zeros((nrow, gap_pixel)), axis=1)\n data[:, ncol // 2 + gap_pixel:] = data[:, ncol // 2:- gap_pixel]\n data[:, ncol // 2:ncol // 2 + gap_pixel] = 0\n\n return data, header\n\n def clean_columns(self, _data, _header):\n \"\"\"\n Clean the known bad columns that exists in most of SAMI's data.\n\n Parameters\n ----------\n _data : numpy.ndarray\n A 2D numpy array that contains the data.\n\n _header : astropy.io.fits.Header\n a header that contains the binning information on the 'CCDSUM'\n key.\n\n See also\n --------\n SoiMerger.clean_column\n SoiMerger.clean_line\n SoiMerger.clean_lines\n \"\"\"\n if not isinstance(_data, _np.ndarray):\n raise (TypeError, 'Please, use a np.array as input')\n if _data.ndim is not 2:\n raise (TypeError, 'Data contains %d dimensions while it was '\n 'expected 2 dimensions.')\n\n b = int(_header['CCDSUM'].strip().split(' ')[0])\n\n if b == 1:\n bad_columns = []\n elif b == 2:\n bad_columns = [\n [855, 0, 2047],\n ]\n elif b == 4:\n bad_columns = [\n [427, 0, 1023]\n ]\n else:\n logger.warning(\n 'Skipping clean_columns for binning {} x {}'.format(b, b))\n bad_columns = []\n\n for column in bad_columns:\n x0 = column[0]\n y0 = column[1]\n yf = column[2]\n _data = self.clean_column(_data, x0, y0, yf)\n\n return _data\n\n def clean_lines(self, hdu_list):\n \"\"\"\n Clean the known bad lines that exists in most of SAMI's, SOI's or\n SIFS's data. This method is meant to be overwritten via inheritance.\n\n Args:\n\n hdu_list (astropy.io.fits.HDUList)\n\n See also:\n\n Reducer.clean_column\n Reducer.clean_line\n Reducer.clean_lines\n \"\"\"\n if not isinstance(hdu_list, _pyfits.HDUList):\n raise TypeError('Please, use a HDUList as input')\n\n if len(hdu_list) != 5:\n raise ValueError(\n \"HDUList is expected to have 1 + 4 elements. Found {}\".format(\n len(hdu_list)))\n\n for i in range(1, len(hdu_list)):\n\n _data = hdu_list[i].data\n _hdr = hdu_list[i].header\n\n bad_lines = [\n # [166, 206, 282],\n # [212, 258, 689],\n # [214, 239, 688],\n # [304, 345, 291],\n # [386, 422, 454],\n # [398, 422, 38],\n # [477, 516, 490],\n # [387, 429, 455],\n # [574, 603, 494],\n # [574, 603, 493],\n # [640, 672, 388],\n # [604, 671, 388],\n # [698, 746, 198],\n # [706, 634, 634],\n # [772, 812, 354],\n # [900, 938, 426],\n # [904, 920, 396]\n ]\n\n for line in bad_lines:\n x0 = line[0]\n xf = line[1]\n y = line[2]\n _data = self.clean_line(_data, x0, xf, y)\n\n hdu_list[i].data = _data\n\n return hdu_list\n\n\ndef _normalize_data(data):\n \"\"\"\n This method is intended to normalize flat data before it is applied to the\n images that are being reduced. A total of 1000 random points are used to\n estimate the _median level that will be used for normalization.\n\n Args:\n\n data (numpy.ndarray) : Data that will be normalized\n\n Returns:\n norm_data (numpy.ndarray) : Normalized data.\n \"\"\"\n sample = _np.random.randint(0, high=data.size - 1, size=1000)\n mode = stats.mode(data.ravel()[sample])[0]\n\n return data / mode\n"
] |
[
[
"numpy.repeat",
"numpy.array",
"numpy.sin",
"numpy.empty",
"numpy.argmin",
"numpy.zeros",
"numpy.median",
"numpy.ones",
"numpy.polyval",
"numpy.random.randint",
"numpy.polyfit",
"numpy.cos",
"numpy.arange",
"numpy.deg2rad",
"numpy.hstack",
"numpy.vstack"
]
] |
haophancs/TREQS
|
[
"49e354ce2a08cf963ec139d99936020e0f80ced8"
] |
[
"LeafNATS/eval_scripts/eval_class_v1.py"
] |
[
"import numpy as np\nfrom sklearn.metrics import precision_recall_fscore_support\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import mean_squared_error\n\ndef evaluation(args):\n '''\n We use f-score, accuracy, MSE to evaluation the performance of different models.\n Here, the best model is selected based on the averaged f-score.\n '''\n score_test = 0.0\n score_validate = 0.0\n mdx_test = 1\n mdx_validate = 1\n memo = []\n for epoch in range(1, args.n_epoch+1):\n print('='*50)\n print('Epoch: {}'.format(epoch))\n score_dict = {}\n\n mem_score = {'validate': [], 'test': []}\n\n pred_data = np.loadtxt('../nats_results/validate_pred_{}.txt'.format(epoch))\n true_data = np.loadtxt('../nats_results/validate_true_{}.txt'.format(epoch))\n\n (p1, r1, f1, _) = precision_recall_fscore_support(true_data, pred_data, average='macro')\n accu = accuracy_score(true_data, pred_data)\n mse = mean_squared_error(true_data, pred_data)\n\n print('f_score={}, Accuracy={}, MSE={}'.format(\n np.round(f1, 4), np.round(accu, 4), np.round(mse, 4)))\n mem_score['validate']= [p1, r1, f1, accu, mse]\n\n pred_data = np.loadtxt('../nats_results/test_pred_{}.txt'.format(epoch))\n true_data = np.loadtxt('../nats_results/test_true_{}.txt'.format(epoch))\n\n if accu > score_validate:\n score_validate = accu\n mdx_validate = epoch\n\n (p1, r1, f1, _) = precision_recall_fscore_support(true_data, pred_data, average='macro')\n accu = accuracy_score(true_data, pred_data)\n mse = mean_squared_error(true_data, pred_data)\n print('f_score={}, Accuracy={}, MSE={}'.format(\n np.round(f1, 4), np.round(accu, 4), np.round(mse, 4)))\n mem_score['test'] = [p1, r1, f1, accu, mse]\n\n if accu > score_test:\n score_test = accu\n mdx_test = epoch\n\n memo.append(mem_score)\n\n print('='*50)\n print('Best epoch {}'.format(mdx_validate))\n print('='*50)\n print('Val')\n [p1, r1, f1, accu, mse] = memo[mdx_validate-1]['validate']\n print('f_score={}, Accuracy={}, MSE={}'.format(\n np.round(f1, 4), np.round(accu, 4), np.round(mse, 4)))\n print('Test')\n [p1, r1, f1, accu, mse] = memo[mdx_validate-1]['test']\n print('f_score={}, Accuracy={}, MSE={}'.format(\n np.round(f1, 4), np.round(accu, 4), np.round(mse, 4)))\n print('='*50)\n print('Max epoch {}'.format(mdx_test))\n print('='*50)\n print('Val')\n [p1, r1, f1, accu, mse] = memo[mdx_test-1]['validate']\n print('f_score={}, Accuracy={}, MSE={}'.format(\n np.round(f1, 4), np.round(accu, 4), np.round(mse, 4)))\n print('Test')\n [p1, r1, f1, accu, mse] = memo[mdx_test-1]['test']\n print('f_score={}, Accuracy={}, MSE={}'.format(\n np.round(f1, 4), np.round(accu, 4), np.round(mse, 4)))"
] |
[
[
"numpy.round",
"sklearn.metrics.mean_squared_error",
"sklearn.metrics.accuracy_score",
"sklearn.metrics.precision_recall_fscore_support"
]
] |
nsfzyzz/dispersion-score
|
[
"ac0c633fe3af091e83d2d198809d98545a0a311a"
] |
[
"eval/output_ds_synthetic.py"
] |
[
"\"\"\"This script is used to measure output dispersion score of synthetic datasets\n\"\"\"\nimport os\nimport sys\nimport numpy as np\nimport torch\nimport random\nimport tqdm\nimport time\nfrom pathlib import Path\nfrom os.path import join\nfrom model.model import EncoderDecoder\nsys.path.append(join(os.path.dirname(os.path.abspath(__file__)), \"../\"))\nfrom dataset.toy_dataset.toydataset import ToyDataset\nfrom auxiliary.my_utils import plant_seeds\nfrom auxiliary.metric_parser import parser\nfrom model.pseudo_network import Generator\nfrom eval.metric import ChamferDistanceL2, compute_ptcloud_dismatrix_batch, cluster_eval\nfrom eval.eval_utils import get_logger, CountFrequency, dic_to_array, mean_std\nimport auxiliary.ChamferDistancePytorch.chamfer3D.dist_chamfer_3D as dist_chamfer_3D\n\nopt = parser()\n###Mkdir and logger\nopt.device = torch.device(\"cuda\")\nres_path = join(opt.dir_name, opt.res_folder)\nPath(res_path).mkdir(parents=True, exist_ok=True)\nproc_logger = get_logger(\"process\", res_path, \"process.log\")\nres_logger = get_logger(\"results\", res_path, \"score.log\")\nopt.logger = proc_logger\nprint(opt.trained_exp_dir)\n\n\nnviews_dic = {\"train\":opt.nviews_train, \"test\":opt.nviews_test}\nnum_seed = max(len(opt.seed_list), 1)\nscore_collect = {}\neval_label_list = set()\n\nfor seed_idx in range(num_seed):\n if opt.seed_list:\n opt.seed = opt.seed_list[seed_idx]\n score_collect.update({str(opt.seed):{}})\n plant_seeds(opt.seed)\n\n ##Loading Data and Network\n if opt.split == 'pred':\n eval_loss = ChamferDistanceL2().to(opt.device)\n distChamfer = dist_chamfer_3D.chamfer_3DDist()\n\n if opt.network=='atlasnet':\n network = EncoderDecoder(opt)\n opt.logger.info(f\"Reloading Network Weights from {opt.reload_model_path}...\")\n network.load_state_dict(torch.load(opt.reload_model_path)['model_state_dict'])\n network.to(opt.device)\n \n if opt.split == \"train\":\n dataset = ToyDataset(data_base_dir=opt.data_base_dir, \n json_file=opt.train_json_file,\n num_points=opt.number_points, \n train=True, \n normalization=opt.normalization, \n logger=opt.logger) \n elif opt.split == \"test\" or opt.split == \"pred\":\n dataset = ToyDataset(data_base_dir=opt.data_base_dir, \n json_file=opt.test_json_file,\n num_points=opt.number_points, \n train=False, \n normalization=opt.normalization, \n logger=opt.logger) \n else:\n raise NotImplementedError()\n\n loader = torch.utils.data.DataLoader(dataset, \n batch_size=opt.pred_batch_size, \n shuffle=False, num_workers=8)\n if opt.rsample == 1:\n sample_num = len(dataset)\n opt.nsample = len(dataset)\n else:\n if opt.rsample != -1:\n opt.nsample = int(opt.rsample * len(dataset))\n subset_index = random.sample(range(len(dataset)), opt.nsample)\n dataset = torch.utils.data.Subset(dataset, subset_index)\n sample_num = len(subset_index)\n data = None\n pred_loss = 0.0\n with torch.set_grad_enabled(False): \n for batch in tqdm.tqdm(loader, desc=f\"loading {opt.split} {opt.type} data\"):\n if opt.split == 'pred':\n input_img = batch['image'].to(opt.device)\n pred_points = network(input_img, train=False)\n pred_points = pred_points.transpose(2, 3).contiguous()\n B = pred_points.shape[0]\n pred_points = pred_points.view(B, -1, 3)\n gt_points = batch['points'].to(opt.device)\n assert gt_points.shape[0] == B, f'gt {gt_points.shape[0]}, while pred {B}'\n if data is None:\n data = pred_points\n else:\n data = torch.cat((data, pred_points), dim=0)\n pred_loss += eval_loss(gt_points, pred_points).item()\n dist1, dist2, idx1, idx2 = distChamfer(gt_points, pred_points)\n opt.type = 'points'\n\n pred_loss /= len(loader)\n proc_logger.info(f\"Pred Chamfer Loss: {pred_loss:4f}\")\n start_time = time.time()\n\n if opt.type == 'points':\n data = data.to(opt.device)\n metric = ChamferDistanceL2().to(opt.device)\n distance_matrix = compute_ptcloud_dismatrix_batch(data, data, metric, \n opt.pred_batch_size, opt.device, proc_logger)\n else:\n raise NotImplementedError()\n\n elasp_time = (time.time() - start_time) / 60\n\n distance_matrix = distance_matrix.cpu().numpy()\n\n score_collect[str(opt.seed)].update({\"dm\": distance_matrix})\n score_collect[str(opt.seed)].update({\"pred_chamfer\": pred_loss})\n \n n_evals = len(opt.perf_pc_list)\n for index in range(n_evals):\n c_method, e_method, n_cluster, perf_pc = opt.c_method[index], opt.e_method[index], opt.cluster_k[index], opt.perf_pc_list[index]\n\n score, part_label = cluster_eval(c_method=c_method, e_method=e_method, distance_matrix=distance_matrix, \n seed=opt.seed, n_cluster=n_cluster, pc=perf_pc)\n\n label_stat_verbose = \"\"\n freq = CountFrequency(part_label)\n for key, value in freq.items(): \n label_stat_verbose += \"% d :% d | \"%(key, value)\n\n proc_logger.info(f\"{opt.type} mode: {opt.mode}, split: {opt.split} \" + \n f\"nviews: train {opt.nviews_train}, test {opt.nviews_test}, sample num:{sample_num} \" + \n f\"seed{opt.seed}, metric{opt.metric} perf{perf_pc}% \" + \n f\"samp{distance_matrix.shape[0]}, Pred Chamfer: {pred_loss:.4f}, score: {score:.4f} DM\" + \n f\"{distance_matrix.shape[0]}, compute time {elasp_time:2f} min\")\n\n eval_label = f\"{c_method}_{e_method}_k{n_cluster}p{perf_pc}\"\n score_collect[str(opt.seed)].update({eval_label: {}})\n eval_label_list.add(eval_label)\n score_collect[str(opt.seed)][eval_label].update({\"score\": score})\n score_collect[str(opt.seed)][eval_label].update({\"label\": np.array(part_label)}) # cluster label\n score_collect[str(opt.seed)][eval_label].update({\"perf_percent\": perf_pc})\n score_collect[str(opt.seed)][eval_label].update({\"label_stats\": dic_to_array(freq)})\n \neval_label_list = list(eval_label_list)\neval_label_list.sort()\nss_list = {}\nfor eval_label in eval_label_list:\n ss_list.update({eval_label:[]})\n\npred_list = []\n\nfor seed in score_collect:\n pred_list.append(score_collect[seed]['pred_chamfer'])\n for eval_label in eval_label_list:\n ss_list[eval_label].append(score_collect[seed][eval_label][\"score\"])\n\nfor eval_label in eval_label_list:\n avg_score_lst = [score/sample_num for score in ss_list[eval_label]]\n ss_mean, ss_std = mean_std(ss_list[eval_label])\n avg_ss_mean, avg_ss_std = mean_std(avg_score_lst)\n score_collect.update({f'{eval_label}': np.array([ss_mean, ss_std])})\n score_collect.update({f'avg_{eval_label}': np.array([avg_ss_mean, avg_ss_std])})\n\npred_loss_mean, pred_loss_std = mean_std(pred_list)\n\nscore_collect.update({'split': opt.split})\nscore_collect.update({'type': opt.type})\nscore_collect.update({'mode': opt.mode})\nscore_collect.update({'sample_num': sample_num})\nscore_collect.update({'chamfer_stats': np.array([pred_loss_mean, pred_loss_std])})\nscore_collect.update({'trainnv': np.array([opt.nviews_train])})\nscore_collect.update({'testnv': np.array([opt.nviews_test])})\n\nfor eval_label in eval_label_list:\n ss_mean, ss_std = score_collect[f'{eval_label}'][0], score_collect[f'{eval_label}'][1]\n avg_ss_mean, avg_ss_std = score_collect[f'avg_{eval_label}'][0], score_collect[f'avg_{eval_label}'][1]\n res_logger.info(f\"{opt.network} {opt.type} mode: {opt.mode}, split: {opt.split}, \" + \n f\"nviews: train {opt.nviews_train}, test {opt.nviews_test}, sample num: {sample_num} \" + \n f\"seed_list {opt.seed_list}, metric {opt.metric} perf: {perf_pc} % {opt.metric} {opt.trained_exp_dir} {eval_label} \" + \n f\"Sum of Score: (mean: {ss_mean:.4f}|std: {ss_std:.4f}) \"+ \n f\"Average Score: (mean: {avg_ss_mean:.4f}|std: {avg_ss_std:.4f}) \"+ \n f\"Pred Chamfer: (mean:{pred_loss_mean:.4f}|std: {pred_loss_std:.4f}) \" +\n f\"DM compute time {elasp_time:.2f} min\")\n \nnp.savez_compressed(os.path.join(res_path, \nf\"{opt.network}_{opt.mode}_{opt.split}_{opt.type}_{sample_num}_{opt.trained_exp_dir.split('/')[-1]}.npz\"), **score_collect)\n \nres_logger.info(f\"###############END OF {opt.type} {opt.network} {opt.trained_exp_dir} PIPELINE#################\")\n\n\n\n"
] |
[
[
"torch.device",
"numpy.array",
"torch.cat",
"torch.utils.data.DataLoader",
"torch.load",
"torch.utils.data.Subset",
"torch.set_grad_enabled"
]
] |
elifesciences-publications/nelpy
|
[
"68c1ffff5eee5de60fc365c4f5df3c7200f5c319",
"68c1ffff5eee5de60fc365c4f5df3c7200f5c319"
] |
[
"nelpy/decoding.py",
"nelpy/contrib/_ringlike.py"
] |
[
"\"\"\"Bayesian encoding and decoding\"\"\"\n\n__all__ = ['decode1D',\n 'decode2D',\n 'k_fold_cross_validation',\n 'cumulative_dist_decoding_error_using_xval',\n 'cumulative_dist_decoding_error',\n 'get_mode_pth_from_array',\n 'get_mean_pth_from_array']\n\nimport numpy as np\nfrom . import auxiliary\n\ndef get_mode_pth_from_array(posterior, tuningcurve=None):\n \"\"\"If tuningcurve is provided, then we map it back to the external coordinates / units.\n Otherwise, we stay in the bin space.\"\"\"\n n_xbins = posterior.shape[0]\n\n if tuningcurve is None:\n xmin = 0\n xmax = n_xbins\n else:\n # TODO: this only works for TuningCurve1D currently\n if isinstance(tuningcurve, auxiliary.TuningCurve1D):\n xmin = tuningcurve.bins[0]\n xmax = tuningcurve.bins[-1]\n else:\n raise TypeError(\"tuningcurve type not yet supported!\")\n\n _, bins = np.histogram([], bins=n_xbins, range=(xmin,xmax))\n xbins = (bins + xmax/n_xbins)[:-1]\n\n mode_pth = np.argmax(posterior, axis=0)*xmax/n_xbins\n mode_pth = np.where(np.isnan(posterior.sum(axis=0)), np.nan, mode_pth)\n\n return mode_pth\n\ndef get_mean_pth_from_array(posterior, tuningcurve=None):\n \"\"\"If tuningcurve is provided, then we map it back to the external coordinates / units.\n Otherwise, we stay in the bin space.\"\"\"\n n_xbins = posterior.shape[0]\n\n if tuningcurve is None:\n xmin = 0\n xmax = 1\n else:\n # TODO: this only works for TuningCurve1D currently\n if isinstance(tuningcurve, auxiliary.TuningCurve1D):\n xmin = tuningcurve.bins[0]\n xmax = tuningcurve.bins[-1]\n else:\n raise TypeError(\"tuningcurve type not yet supported!\")\n\n _, bins = np.histogram([], bins=n_xbins, range=(xmin,xmax))\n xbins = (bins + xmax/n_xbins)[:-1]\n\n mean_pth = (xbins * posterior.T).sum(axis=1)\n\n return mean_pth\n\ndef decode1D(bst, ratemap, xmin=0, xmax=100, w=1, nospk_prior=None, _skip_empty_bins=True):\n \"\"\"Decodes binned spike trains using a ratemap with shape (n_units, n_ext)\n\n TODO: complete docstring\n TODO: what if we have higher dimensional external correlates? This\n function assumes a 1D correlate. Even if we linearize a 2D\n environment, for example, then mean_pth decoding no longer works as\n expected, so this function should probably be refactored.\n\n Parameters\n ----------\n bst :\n ratemap: array_like\n Firing rate map with shape (n_units, n_ext), where n_ext is the\n number of external correlates, e.g., position bins. The rate map\n is in spks/second.\n xmin : float\n xmax : float\n w : int\n nospk_prior : array_like\n Prior distribution over external correlates with shape (n_ext,)\n that will be used if no spikes are observed in a decoding window\n Default is np.nan.\n If nospk_prior is any scalar, then a uniform prior is assumed.\n\n _skip_empty_bins is only used to return the posterior regardless of\n whether any spikes were observed, so that we can understand the spatial\n distribution in the absence of spikes, or at low firing rates.\n\n Returns\n -------\n posteriors : array\n Posterior distribution with shape (n_ext, n_posterior_bins),\n where n_posterior bins <= bst.n_bins, but depends on w and the\n event lengths.\n cum_posterior_lengths : array\n\n mode_pth :\n\n mean_pth :\n\n Examples\n --------\n\n \"\"\"\n\n if w is None:\n w=1\n assert float(w).is_integer(), \"w must be a positive integer!\"\n assert w > 0, \"w must be a positive integer!\"\n\n n_units, t_bins = bst.data.shape\n _, n_xbins = ratemap.shape\n\n # if we pass a TuningCurve1D object, extract the ratemap and re-order\n # units if necessary\n if isinstance(ratemap, auxiliary.TuningCurve1D):\n xmin = ratemap.bins[0]\n xmax = ratemap.bins[-1]\n bin_centers = ratemap.bin_centers\n # re-order units if necessary\n ratemap = ratemap.reorder_units_by_ids(bst.unit_ids)\n ratemap = ratemap.ratemap\n else:\n xmin = 0\n xmax = n_xbins\n bin_centers = np.arange(n_xbins)\n\n if nospk_prior is None:\n nospk_prior = np.full(n_xbins, np.nan)\n elif isinstance(nospk_priors, numbers.Number):\n nospk_prior = np.full(n_xbins, 1.0)\n\n assert nospk_prior.shape[0] == n_xbins, \"prior must have length {}\".format(n_xbins)\n assert nospk_prior.size == n_xbins, \"prior must be a 1D array with length {}\".format(n_xbins)\n\n lfx = np.log(ratemap)\n\n eterm = -ratemap.sum(axis=0)*bst.ds*w\n\n # if we decode using multiple bins at a time (w>1) then we have to decode each epoch separately:\n\n # first, we determine the number of bins we will decode. This requires us to scan over the epochs\n n_bins = 0\n cumlengths = np.cumsum(bst.lengths)\n posterior_lengths = np.zeros(bst.n_epochs, dtype=np.int)\n prev_idx = 0\n for ii, to_idx in enumerate(cumlengths):\n datalen = to_idx - prev_idx\n prev_idx = to_idx\n posterior_lengths[ii] = np.max((1,datalen - w + 1))\n\n n_bins = posterior_lengths.sum()\n posterior = np.zeros((n_xbins, n_bins))\n\n # next, we decode each epoch separately, one bin at a time\n cum_posterior_lengths = np.insert(np.cumsum(posterior_lengths),0,0)\n prev_idx = 0\n for ii, to_idx in enumerate(cumlengths):\n data = bst.data[:,prev_idx:to_idx]\n prev_idx = to_idx\n datacum = np.cumsum(data, axis=1) # ii'th data segment, with column of zeros prepended\n datacum = np.hstack((np.zeros((n_units,1)), datacum))\n re = w # right edge ptr\n # TODO: check if datalen < w and act appropriately\n if posterior_lengths[ii] > 1: # more than one full window fits into data length\n for tt in range(posterior_lengths[ii]):\n obs = datacum[:, re] - datacum[:, re-w] # spikes in window of size w\n re+=1\n post_idx = cum_posterior_lengths[ii] + tt\n if obs.sum() == 0 and _skip_empty_bins:\n # no spikes to decode in window!\n posterior[:,post_idx] = nospk_prior\n else:\n posterior[:,post_idx] = (np.tile(np.array(obs, ndmin=2).T, n_xbins) * lfx).sum(axis=0) + eterm\n else: # only one window can fit in, and perhaps only partially. We just take all the data we can get,\n # and ignore the scaling problem where the window size is now possibly less than bst.ds*w\n post_idx = cum_posterior_lengths[ii]\n obs = datacum[:, -1] # spikes in window of size at most w\n if obs.sum() == 0 and _skip_empty_bins:\n # no spikes to decode in window!\n posterior[:,post_idx] = nospk_prior\n else:\n posterior[:,post_idx] = (np.tile(np.array(obs, ndmin=2).T, n_xbins) * lfx).sum(axis=0) + eterm\n\n # normalize posterior:\n posterior = np.exp(posterior) / np.tile(np.exp(posterior).sum(axis=0),(n_xbins,1))\n\n # TODO: what was my rationale behid the following? Why not use bin centers?\n # _, bins = np.histogram([], bins=n_xbins, range=(xmin,xmax))\n # xbins = (bins + xmax/n_xbins)[:-1]\n\n mode_pth = np.argmax(posterior, axis=0)*xmax/n_xbins\n mode_pth = np.where(np.isnan(posterior.sum(axis=0)), np.nan, mode_pth)\n mean_pth = (bin_centers * posterior.T).sum(axis=1)\n return posterior, cum_posterior_lengths, mode_pth, mean_pth\n\ndef decode2D(bst, ratemap, xmin=0, xmax=100, ymin=0, ymax=100, w=1, nospk_prior=None, _skip_empty_bins=True):\n \"\"\"Decodes binned spike trains using a ratemap with shape (n_units, ext_nx, ext_ny)\n\n TODO: complete docstring\n TODO: what if we have higher dimensional external correlates? This\n function assumes a 2D correlate. Even if we linearize a 2D\n environment, for example, then mean_pth decoding no longer works as\n expected, so this function should probably be refactored.\n\n Parameters\n ----------\n bst :\n ratemap: array_like\n Firing rate map with shape (n_units, ext_nx, ext_ny), where n_ext is the\n number of external correlates, e.g., position bins. The rate map\n is in spks/second.\n xmin : float\n xmax : float\n w : int\n nospk_prior : array_like\n Prior distribution over external correlates with shape (n_ext,)\n that will be used if no spikes are observed in a decoding window\n Default is np.nan.\n If nospk_prior is any scalar, then a uniform prior is assumed.\n\n _skip_empty_bins is only used to return the posterior regardless of\n whether any spikes were observed, so that we can understand the spatial\n distribution in the absence of spikes, or at low firing rates.\n\n Returns\n -------\n posteriors : array\n Posterior distribution with shape (ext_nx, ext_ny, n_posterior_bins),\n where n_posterior bins <= bst.n_tbins, but depends on w and the\n event lengths.\n cum_posterior_lengths : array\n\n mode_pth :\n\n mean_pth :\n\n Examples\n --------\n\n \"\"\"\n\n def tile_obs(obs, nx, ny):\n n_units = len(obs)\n out = np.zeros((n_units, nx, ny))\n for unit in range(n_units):\n out[unit,:,:] = obs[unit]\n return out\n\n if w is None:\n w=1\n assert float(w).is_integer(), \"w must be a positive integer!\"\n assert w > 0, \"w must be a positive integer!\"\n\n n_units, t_bins = bst.data.shape\n\n xbins = None\n ybins = None\n\n # if we pass a TuningCurve2D object, extract the ratemap and re-order\n # units if necessary\n if isinstance(ratemap, auxiliary.TuningCurve2D):\n xbins = ratemap.xbins\n ybins = ratemap.ybins\n xbin_centers = ratemap.xbin_centers\n ybin_centers = ratemap.ybin_centers\n # re-order units if necessary\n ratemap = ratemap.reorder_units_by_ids(bst.unit_ids)\n ratemap = ratemap.ratemap\n\n _, n_xbins, n_ybins = ratemap.shape\n\n if nospk_prior is None:\n nospk_prior = np.full((n_xbins, n_ybins), np.nan)\n elif isinstance(nospk_priors, numbers.Number):\n nospk_prior = np.full((n_xbins, n_ybins), 1.0)\n\n assert nospk_prior.shape == (n_xbins, n_ybins), \"prior must have shape ({}, {})\".format(n_xbins, n_ybins)\n\n lfx = np.log(ratemap)\n\n eterm = -ratemap.sum(axis=0)*bst.ds*w\n\n # if we decode using multiple bins at a time (w>1) then we have to decode each epoch separately:\n\n # first, we determine the number of bins we will decode. This requires us to scan over the epochs\n n_tbins = 0\n cumlengths = np.cumsum(bst.lengths)\n posterior_lengths = np.zeros(bst.n_epochs, dtype=np.int)\n prev_idx = 0\n for ii, to_idx in enumerate(cumlengths):\n datalen = to_idx - prev_idx\n prev_idx = to_idx\n posterior_lengths[ii] = np.max((1,datalen - w + 1))\n\n n_tbins = posterior_lengths.sum()\n\n ########################################################################\n posterior = np.zeros((n_xbins, n_ybins, n_tbins))\n\n # next, we decode each epoch separately, one bin at a time\n cum_posterior_lengths = np.insert(np.cumsum(posterior_lengths),0,0)\n prev_idx = 0\n for ii, to_idx in enumerate(cumlengths):\n data = bst.data[:,prev_idx:to_idx]\n prev_idx = to_idx\n datacum = np.cumsum(data, axis=1) # ii'th data segment, with column of zeros prepended\n datacum = np.hstack((np.zeros((n_units,1)), datacum))\n re = w # right edge ptr\n # TODO: check if datalen < w and act appropriately\n if posterior_lengths[ii] > 1: # more than one full window fits into data length\n for tt in range(posterior_lengths[ii]):\n obs = datacum[:, re] - datacum[:, re-w] # spikes in window of size w\n re+=1\n post_idx = cum_posterior_lengths[ii] + tt\n if obs.sum() == 0 and _skip_empty_bins:\n # no spikes to decode in window!\n posterior[:,:,post_idx] = nospk_prior\n else:\n posterior[:,:,post_idx] = (tile_obs(obs, n_xbins, n_ybins) * lfx).sum(axis=0) + eterm\n else: # only one window can fit in, and perhaps only partially. We just take all the data we can get,\n # and ignore the scaling problem where the window size is now possibly less than bst.ds*w\n post_idx = cum_posterior_lengths[ii]\n obs = datacum[:, -1] # spikes in window of size at most w\n if obs.sum() == 0 and _skip_empty_bins:\n # no spikes to decode in window!\n posterior[:,:,post_idx] = nospk_prior\n else:\n posterior[:,:,post_idx] = (tile_obs(obs, n_xbins, n_ybins) * lfx).sum(axis=0) + eterm\n\n # normalize posterior:\n # see http://timvieira.github.io/blog/post/2014/02/11/exp-normalize-trick/\n for tt in range(n_tbins):\n posterior[:,:,tt] = posterior[:,:,tt] - posterior[:,:,tt].max()\n posterior[:,:,tt] = np.exp(posterior[:,:,tt])\n posterior[:,:,tt] = posterior[:,:,tt] / posterior[:,:,tt].sum()\n\n # if xbins is None:\n # _, bins = np.histogram([], bins=n_xbins, range=(xmin,xmax))\n # xbins = (bins + xmax/n_xbins)[:-1]\n # if ybins is None:\n # _, bins = np.histogram([], bins=n_ybins, range=(ymin,ymax))\n # ybins = (bins + ymax/n_ybins)[:-1]\n\n mode_pth = np.zeros((2, n_tbins))\n for tt in range(n_tbins):\n if np.any(np.isnan(posterior[:,:,tt])):\n mode_pth[0,tt] = np.nan\n mode_pth[0,tt] = np.nan\n else:\n x_, y_ = np.unravel_index(np.argmax(posterior[:,:,tt]), (n_xbins, n_ybins))\n mode_pth[0,tt] = xbins[x_]\n mode_pth[1,tt] = ybins[y_]\n\n expected_x = (xbin_centers * posterior.sum(axis=1).T).sum(axis=1)\n expected_y = (ybin_centers * posterior.sum(axis=0).T).sum(axis=1)\n mean_pth = np.vstack((expected_x, expected_y))\n\n posterior = np.transpose(posterior, axes=[1,0,2])\n\n return posterior, cum_posterior_lengths, mode_pth, mean_pth\n\ndef k_fold_cross_validation(X, k=None, randomize=False):\n \"\"\"\n Generates K (training, validation) pairs from the items in X.\n\n Each pair is a partition of X, where validation is an iterable\n of length len(X)/K. So each training iterable is of length\n (K-1)*len(X)/K.\n\n Parameters\n ----------\n X : list or int\n list of items, or list of indices, or integer number of indices\n k : int, or str, optional\n k > 1 number of folds for k-fold cross validation; k='loo' or\n 'LOO' for leave-one-out cross-validation (equivalent to\n k==n_samples). Default is 5.\n randomize : bool\n If true, a copy of X is shuffled before partitioning, otherwise\n its order is preserved in training and validation.\n\n Returns\n -------\n (training, validation)\n\n Example\n -------\n >>> X = [i for i in range(97)]\n >>> for training, validation in k_fold_cross_validation(X, k=5):\n >>> print(training, validation)\n >>> for x in X: assert (x in training) ^ (x in validation), x\n\n \"\"\"\n # deal with default values:\n if isinstance(X, int):\n X = range(X)\n n_samples = len(X)\n if k is None:\n k=5\n elif k=='loo' or k=='LOO':\n k=n_samples\n\n if randomize:\n from random import shuffle\n X=list(X)\n shuffle(X)\n for _k_ in range(k):\n training = [x for i, x in enumerate(X) if i % k != _k_]\n validation = [x for i, x in enumerate(X) if i % k == _k_]\n yield training, validation\n\ndef cumulative_dist_decoding_error_using_xval(bst, extern,*, decodefunc=decode1D, tuningcurve=None, k=5, transfunc=None, n_extern=100, extmin=0, extmax=100, sigma=3, n_bins=None):\n \"\"\"Cumulative distribution of decoding errors during epochs in\n BinnedSpikeTrainArray, evaluated using a k-fold cross-validation\n procedure.\n\n Parameters\n ----------\n bst: BinnedSpikeTrainArray\n BinnedSpikeTrainArray containing all the epochs to be decoded.\n Should typically have the same type of epochs as the ratemap\n (e.g., online epochs), but this is not a requirement.\n tuningcurve : TuningCurve1D\n extern : query-able object of external correlates (e.g. pos AnalogSignalArray)\n ratemap : array_like\n The ratemap (in Hz) with shape (n_units, n_ext) where n_ext are\n the external correlates, e.g., position bins.\n k : int, optional\n Number of fold for k-fold cross-validation. Default is k=5.\n n_bins : int\n Number of decoding error bins, ranging from tuningcurve.extmin\n to tuningcurve.extmax.\n\n Returns\n -------\n\n (error, cum_prob)\n (see Fig 3.(b) of \"Analysis of Hippocampal Memory Replay Using\n Neural Population Decoding\", Fabian Kloosterman, 2012)\n\n NOTE: should we allow for an optional tuning curve to be specified,\n or should we always recompute it ourselves?\n \"\"\"\n\n def _trans_func(extern, at):\n \"\"\"Default transform function to map extern into numerical bins\"\"\"\n\n _, ext = extern.asarray(at=at)\n\n return ext\n\n if transfunc is None:\n transfunc = _trans_func\n\n if n_bins is None:\n n_bins = 200\n\n max_error = extmax - extmin\n\n # indices of training and validation epochs / events\n\n hist = np.zeros(n_bins)\n for training, validation in k_fold_cross_validation(bst.n_epochs, k=k):\n # estimate place fields using bst[training]\n tc = auxiliary.TuningCurve1D(bst=bst[training], extern=extern, n_extern=n_extern, extmin=extmin, extmax=extmax, sigma=sigma)\n # decode position using bst[validation]\n posterior, _, mode_pth, mean_pth = decodefunc(bst[validation], tc)\n # calculate validation error (for current fold) by comapring\n # decoded pos v target pos\n target = transfunc(extern, at=bst[validation].bin_centers)\n\n histnew, bins = np.histogram(np.abs(target - mean_pth), bins=n_bins, range=(0, max_error))\n hist = hist + histnew\n\n # build cumulative error distribution\n cumhist = np.cumsum(hist)\n cumhist = cumhist / cumhist[-1]\n bincenters = (bins + (bins[1] - bins[0])/2)[:-1]\n\n # modify to start at (0,0):\n cumhist = np.insert(cumhist, 0, 0)\n bincenters = np.insert(bincenters, 0, 0)\n\n # modify to end at (max_error,1):\n cumhist = np.append(cumhist, 1)\n bincenters = np.append(bincenters, max_error)\n\n return cumhist, bincenters\n\ndef cumulative_dist_decoding_error(bst, *, tuningcurve, extern,\n decodefunc=decode1D, transfunc=None,\n n_bins=None):\n \"\"\"Cumulative distribution of decoding errors during epochs in\n BinnedSpikeTrainArray using a fixed TuningCurve.\n\n Parameters\n ----------\n bst: BinnedSpikeTrainArray\n BinnedSpikeTrainArray containing all the epochs to be decoded.\n Should typically have the same type of epochs as the ratemap\n (e.g., online epochs), but this is not a requirement.\n tuningcurve : TuningCurve1D\n extern : query-able object of external correlates (e.g. pos AnalogSignalArray)\n n_bins : int\n Number of decoding error bins, ranging from tuningcurve.extmin\n to tuningcurve.extmax.\n\n Returns\n -------\n\n (cumhist, bincenters)\n (see Fig 3.(b) of \"Analysis of Hippocampal Memory Replay Using\n Neural Population Decoding\", Fabian Kloosterman, 2012)\n\n \"\"\"\n\n def _trans_func(extern, at):\n \"\"\"Default transform function to map extern into numerical bins\"\"\"\n\n _, ext = extern.asarray(at=at)\n\n return ext\n\n if transfunc is None:\n transfunc = _trans_func\n if n_bins is None:\n n_bins = 200\n\n # indices of training and validation epochs / events\n\n max_error = tuningcurve.bins[-1] - tuningcurve.bins[0]\n\n posterior, _, mode_pth, mean_pth = decodefunc(bst=bst, ratemap=tuningcurve)\n target = transfunc(extern, at=bst.bin_centers)\n hist, bins = np.histogram(\n np.abs(target - mean_pth),\n bins=n_bins,\n range=(0, max_error))\n\n # build cumulative error distribution\n cumhist = np.cumsum(hist)\n cumhist = cumhist / cumhist[-1]\n bincenters = (bins + (bins[1] - bins[0])/2)[:-1]\n\n # modify to start at (0,0):\n cumhist = np.insert(cumhist, 0, 0)\n bincenters = np.insert(bincenters, 0, 0)\n\n # modify to end at (max_error,1):\n cumhist = np.append(cumhist, 1)\n bincenters = np.append(bincenters, max_error)\n\n return cumhist, bincenters\n\ndef rmse(predictions, targets):\n \"\"\"Calculate the root mean squared error of an array of predictions.\n\n Parameters\n ----------\n predictions : array_like\n Array of predicted values.\n targets : array_like\n Array of target values.\n\n Returns\n -------\n rmse: float\n Root mean squared error of the predictions wrt the targets.\n \"\"\"\n predictions = np.asanyarray(predictions)\n targets = np.asanyarray(targets)\n rmse = np.sqrt(np.nanmean((predictions - targets) ** 2))\n return rmse",
"\"\"\"Ring-like AnalogSignal/PositionArray\n\nsubsample, simplify, interpolation, smoothing, asarray, downsample, ... all have\nto be handled in a special way when we have a ring-like environment.\n\"\"\"\n\nimport copy\nimport numpy as np\nfrom scipy import interpolate\nfrom collections import namedtuple\n\nfrom ..core import _analogsignalarray, _epocharray\nfrom ..auxiliary import _position\nfrom .. import utils\n\nclass RinglikeTrajectory(_analogsignalarray.AnalogSignalArray):\n\n __attributes__ = ['_track_length', '_is_wrapped'] # RinglikeTrajectory-specific attributes\n __attributes__.extend(_analogsignalarray.AnalogSignalArray.__attributes__)\n def __init__(self, ydata=[], *, timestamps=None, fs=None, step=None,\n merge_sample_gap=0, support=None, in_memory=True, labels=None,\n track_length=None, empty=False):\n\n # if an empty object is requested, return it:\n if empty:\n super().__init__(empty=True)\n for attr in self.__attributes__:\n exec(\"self.\" + attr + \" = None\")\n self._support = _epocharray.EpochArray(empty=True)\n return\n\n # cast an AnalogSignalArray to a RinglikeTrajectory:\n if isinstance(ydata, _analogsignalarray.AnalogSignalArray):\n assert ydata.n_signals == 1, \\\n \"only 1D AnalogSignalArrays can be cast to RinglikeTrajectories!\"\n self.__dict__ = copy.deepcopy(ydata.__dict__)\n self._track_length = None\n self._is_wrapped = None\n self.__renew__()\n else:\n kwargs = {\"ydata\": ydata,\n \"timestamps\": timestamps,\n \"fs\": fs,\n \"step\": step,\n \"merge_sample_gap\": merge_sample_gap,\n \"support\": support,\n \"in_memory\": in_memory,\n \"labels\": labels}\n\n # initialize super:\n super().__init__(**kwargs)\n\n self._track_length = track_length\n self._is_wrapped = None # intialize to unknown (None) state\n\n def __repr__(self):\n address_str = \" at \" + str(hex(id(self)))\n if self.isempty:\n return \"<empty RinglikeTrajectory\" + address_str + \">\"\n if self.n_epochs > 1:\n epstr = \": {} segments\".format(self.n_epochs)\n else:\n epstr = \"\"\n dstr = \" for a total of {}\".format(utils.PrettyDuration(self.support.duration))\n if self.is_1d:\n return \"<1D RinglikeTrajectory%s%s>%s\" % (address_str, epstr, dstr)\n raise TypeError (\"RinglikeTrajectories must be 1D at this time!\")\n\n @property\n def is_1d(self):\n try:\n return self.n_signals == 1\n except IndexError:\n return False\n\n @property\n def is_wrapped(self):\n if self._is_wrapped is None:\n if self.max() > self._track_length:\n self._is_wrapped = False\n else:\n self._is_wrapped = True\n return self._is_wrapped\n\n @property\n def track_length(self):\n \"\"\"Length of the ringlike environment.\"\"\"\n if not self._track_length:\n raise ValueError(\"Please initialize/set track_length first!\")\n return self._track_length\n\n @track_length.setter\n def track_length(self, val):\n \"\"\"Set the length of the ringlike environment\"\"\"\n # TODO: do data integrity cheking / validation\n self._track_length = val\n\n def _unwrap(self, arr):\n \"\"\"Unwrap trajectory to winding distance.\"\"\"\n lin = copy.deepcopy(arr.squeeze())\n for ii in range(1, len(lin)):\n if lin[ii] - lin[ii-1] >= self.track_length/2:\n lin[ii:] = lin[ii:] - self.track_length\n elif lin[ii] - lin[ii-1] < - self.track_length/2:\n lin[ii:] = lin[ii:] + self.track_length\n return np.atleast_2d(lin)\n\n def _wrap(self, arr):\n \"\"\"Wrap trajectory around ring.\"\"\"\n return arr % self.track_length\n\n def wrap(self):\n \"\"\"Wrap trajectory around ring.\"\"\"\n self._ydata = np.atleast_2d(self._wrap(self._ydata.squeeze()))\n self._is_wrapped = True\n # self._interp = None\n\n def unwrap(self):\n \"\"\"Unwrap trajectory to winding distance.\"\"\"\n self._ydata = np.atleast_2d(self._unwrap(self._ydata.squeeze()))\n self._is_wrapped = False\n # self._interp = None\n\n def _wraptimes(self):\n \"\"\"Return timestamps when trajectory wraps around.\"\"\"\n is_wrapped = self.is_wrapped\n if not is_wrapped:\n self.wrap()\n lin = copy.deepcopy(self.ydata.squeeze())\n wraptimes = []\n for ii in range(1, len(lin)):\n if lin[ii] - lin[ii-1] >= self.track_length/2:\n lin[ii:] = lin[ii:] - self.track_length\n wraptimes.append(self.time[ii])\n elif lin[ii] - lin[ii-1] < - self.track_length/2:\n lin[ii:] = lin[ii:] + self.track_length\n wraptimes.append(self.time[ii])\n if not is_wrapped:\n self.unwrap()\n return np.asarray(wraptimes)\n\n def _wrapepochs(self):\n \"\"\"Return an epoch (trial-like) for each time the trajectory wraps around.\"\"\"\n\n trial_bds = self._wraptimes()\n trial_bds = np.insert(trial_bds, 0, self.support.start)\n np.append(trial_bds, self.support.stop)\n trial_epochs = _epocharray.EpochArray(np.vstack((trial_bds[:-1], trial_bds[1:])).T)\n\n return trial_epochs\n\n def shift(self, amount, *, inplace=False):\n \"\"\"\"\"\"\n is_wrapped = self.is_wrapped\n if inplace:\n out = self\n else:\n out = copy.deepcopy(self)\n out.unwrap()\n out = out + amount\n if is_wrapped:\n out.wrap()\n return out\n\n def smooth(self, *, fs=None, sigma=None, bw=None, inplace=False):\n \"\"\"Smooths the regularly sampled RinglikeTrajectory with a Gaussian kernel.\n\n Smoothing is applied in time, and the same smoothing is applied to each\n signal in the RinglikeTrajectory.\n\n Smoothing is applied within each epoch.\n\n Parameters\n ----------\n fs : float, optional\n Sampling rate (in Hz) of RinglikeTrajectory. If not provided, it will\n be obtained from asa.fs\n sigma : float, optional\n Standard deviation of Gaussian kernel, in seconds. Default is 0.05 (50 ms)\n bw : float, optional\n Bandwidth outside of which the filter value will be zero. Default is 4.0\n inplace : bool\n If True the data will be replaced with the smoothed data.\n Default is False.\n\n Returns\n -------\n out : RinglikeTrajectory\n A RinglikeTrajectory with smoothed data is returned.\n \"\"\"\n\n is_wrapped = self.is_wrapped\n\n kwargs = {'inplace' : inplace,\n 'fs' : fs,\n 'sigma' : sigma,\n 'bw' : bw}\n\n if is_wrapped:\n self.unwrap()\n out = utils.gaussian_filter(self, **kwargs)\n out.__renew__()\n if is_wrapped:\n out.wrap()\n if not inplace:\n self.wrap()\n return out\n\n def _get_interp1d(self,* , kind='linear', copy=True, bounds_error=False,\n fill_value=np.nan, assume_sorted=None):\n \"\"\"returns a scipy interp1d object, extended to have values at all epoch\n boundaries!\n \"\"\"\n\n if assume_sorted is None:\n assume_sorted = utils.is_sorted(self.time)\n\n if self.n_signals > 1:\n axis = 1\n else:\n axis = -1\n\n time = self.time\n yvals = self._unwrap(self._ydata_rowsig)\n lengths = self.lengths\n empty_epoch_ids = np.argwhere(lengths==0).squeeze().tolist()\n first_timestamps_per_epoch_idx = np.insert(np.cumsum(lengths[:-1]),0,0)\n first_timestamps_per_epoch_idx[empty_epoch_ids] = 0\n last_timestamps_per_epoch_idx = np.cumsum(lengths)-1\n last_timestamps_per_epoch_idx[empty_epoch_ids] = 0\n first_timestamps_per_epoch = self.time[first_timestamps_per_epoch_idx]\n last_timestamps_per_epoch = self.time[last_timestamps_per_epoch_idx]\n\n boundary_times = []\n boundary_vals = []\n for ii, (start, stop) in enumerate(self.support.time):\n if lengths[ii] == 0:\n continue\n if first_timestamps_per_epoch[ii] > start:\n boundary_times.append(start)\n boundary_vals.append(yvals[:,first_timestamps_per_epoch_idx[ii]])\n # print('adding {} at time {}'.format(yvals[:,first_timestamps_per_epoch_idx[ii]], start))\n if last_timestamps_per_epoch[ii] < stop:\n boundary_times.append(stop)\n boundary_vals.append(yvals[:,last_timestamps_per_epoch_idx[ii]])\n\n if boundary_times:\n insert_locs = np.searchsorted(time, boundary_times)\n time = np.insert(time, insert_locs, boundary_times)\n yvals = np.insert(yvals, insert_locs, np.array(boundary_vals).T, axis=1)\n\n time, unique_idx = np.unique(time, return_index=True)\n yvals = yvals[:,unique_idx]\n\n f = interpolate.interp1d(x=time,\n y=yvals,\n kind=kind,\n axis=axis,\n copy=copy,\n bounds_error=bounds_error,\n fill_value=fill_value,\n assume_sorted=assume_sorted)\n return f\n\n def asarray(self,*, where=None, at=None, kind='linear', copy=True,\n bounds_error=False, fill_value=np.nan, assume_sorted=None,\n recalculate=False, store_interp=True, n_points=None,\n split_by_epoch=False):\n \"\"\"returns a ydata_like array at requested points.\n\n Parameters\n ----------\n where : array_like or tuple, optional\n array corresponding to np where condition\n e.g., where=(ydata[1,:]>5) or tuple where=(speed>5,tspeed)\n at : array_like, optional\n Array of oints to evaluate array at. If none given, use\n self.time together with 'where' if applicable.\n n_points: int, optional\n Number of points to interplate at. These points will be\n distributed uniformly from self.support.start to stop.\n split_by_epoch: bool\n If True, separate arrays by epochs and return in a list.\n Returns\n -------\n out : (array, array)\n namedtuple tuple (xvals, yvals) of arrays, where xvals is an\n array of time points for which (interpolated) ydata are\n returned.\n \"\"\"\n\n # TODO: implement splitting by epoch\n\n if split_by_epoch:\n raise NotImplementedError(\"split_by_epoch not yet implemented...\")\n\n XYArray = namedtuple('XYArray', ['xvals', 'yvals'])\n\n if at is None and where is None and split_by_epoch is False and n_points is None:\n xyarray = XYArray(self.time, self._ydata_rowsig.squeeze())\n return xyarray\n\n if where is not None:\n assert at is None and n_points is None, \"'where', 'at', and 'n_points' cannot be used at the same time\"\n if isinstance(where, tuple):\n y = np.array(where[1]).squeeze()\n x = where[0]\n assert len(x) == len(y), \"'where' condition and array must have same number of elements\"\n at = y[x]\n else:\n x = np.asanyarray(where).squeeze()\n assert len(x) == len(self.time), \"'where' condition must have same number of elements as self.time\"\n at = self.time[x]\n elif at is not None:\n assert n_points is None, \"'at' and 'n_points' cannot be used at the same time\"\n else:\n at = np.linspace(self.support.start, self.support.stop, n_points)\n\n # if we made it this far, either at or where has been specified, and at is now well defined.\n\n kwargs = {'kind':kind,\n 'copy':copy,\n 'bounds_error':bounds_error,\n 'fill_value':fill_value,\n 'assume_sorted':assume_sorted}\n\n # retrieve an existing, or construct a new interpolation object\n if recalculate:\n interpobj = self._get_interp1d(**kwargs)\n else:\n try:\n interpobj = self._interp\n if interpobj is None:\n interpobj = self._get_interp1d(**kwargs)\n except AttributeError: # does not exist yet\n interpobj = self._get_interp1d(**kwargs)\n\n # store interpolation object, if desired\n if store_interp:\n self._interp = interpobj\n\n # do the actual interpolation\n try:\n if self.is_wrapped:\n out = self._wrap(interpobj(at))\n else:\n out = interpobj(at)\n except SystemError:\n interpobj = self._get_interp1d(**kwargs)\n if store_interp:\n self._interp = interpobj\n if self.is_wrapped:\n out = self._wrap(interpobj(at))\n else:\n out = interpobj(at)\n\n # TODO: set all values outside of self.support to fill_value\n\n xyarray = XYArray(xvals=np.asanyarray(at), yvals=np.asanyarray(out).squeeze())\n return xyarray\n"
] |
[
[
"numpy.max",
"numpy.histogram",
"numpy.append",
"numpy.full",
"numpy.isnan",
"numpy.array",
"numpy.zeros",
"numpy.log",
"numpy.exp",
"numpy.asanyarray",
"numpy.nanmean",
"numpy.transpose",
"numpy.arange",
"numpy.abs",
"numpy.cumsum",
"numpy.argmax",
"numpy.insert",
"numpy.vstack"
],
[
"scipy.interpolate.interp1d",
"numpy.array",
"numpy.asarray",
"numpy.asanyarray",
"numpy.vstack",
"numpy.append",
"numpy.cumsum",
"numpy.searchsorted",
"numpy.linspace",
"numpy.argwhere",
"numpy.insert",
"numpy.unique",
"numpy.atleast_2d"
]
] |
griff4692/fairseq
|
[
"3a1b078e93d6b359282868d8369eb97ed9fdb2e5"
] |
[
"fairseq/trainer.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\"\"\"\nTrain a network across multiple GPUs.\n\"\"\"\n\nimport contextlib\nimport logging\nimport sys\nimport time\nfrom argparse import Namespace\nfrom itertools import chain\nfrom typing import Any, Dict, List\n\nimport torch\nfrom fairseq import checkpoint_utils, models, optim, utils\nfrom fairseq.dataclass.configs import FairseqConfig\nfrom fairseq.dataclass.utils import convert_namespace_to_omegaconf\nfrom fairseq.distributed import utils as distributed_utils\nfrom fairseq.file_io import PathManager\nfrom fairseq.logging import meters, metrics\nfrom fairseq.nan_detector import NanDetector\nfrom fairseq.optim import lr_scheduler\n\nfrom omegaconf import OmegaConf\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Trainer(object):\n \"\"\"Main class for data parallel training.\n\n This class supports synchronous distributed data parallel training,\n where multiple workers each have a full model replica and gradients\n are accumulated across workers before each update. We use\n :class:`~torch.nn.parallel.DistributedDataParallel` to handle\n communication of the gradients across workers.\n \"\"\"\n\n def __init__(self, cfg: FairseqConfig, task, model, criterion, quantizer=None):\n\n if isinstance(cfg, Namespace):\n logger.warning(\n \"argparse.Namespace configuration is deprecated! Automatically converting to OmegaConf\"\n )\n cfg = convert_namespace_to_omegaconf(cfg)\n\n self.cfg = cfg\n self.task = task\n\n # catalog shared parameters\n shared_params = _catalog_shared_params(model)\n self.tpu = cfg.common.tpu\n self.cuda = torch.cuda.is_available() and not cfg.common.cpu and not self.tpu\n if self.cuda:\n self.device = torch.device(\"cuda\")\n elif self.tpu:\n self.device = utils.get_tpu_device()\n else:\n self.device = torch.device(\"cpu\")\n\n if self.cfg.distributed_training.ddp_backend == \"fully_sharded\":\n if self.cfg.common.bf16:\n raise ValueError(\n \"FullyShardedDataParallel is not compatible with --bf16 or \"\n \"--memory-efficient-bf16\"\n )\n if self.cfg.distributed_training.zero_sharding != \"none\":\n raise ValueError(\n \"FullyShardedDataParallel is not compatible with --zero-sharding \"\n \"option (it's already built in)\"\n )\n else:\n if self.cfg.distributed_training.cpu_offload:\n raise ValueError(\"--cpu-offload requires --ddp-backend=fully_sharded\")\n\n # copy model and criterion to current device/dtype\n self._criterion = criterion\n self._model = model\n if cfg.distributed_training.ddp_backend != \"fully_sharded\":\n if cfg.common.fp16:\n self._criterion = self._criterion.half()\n self._model = self._model.half()\n elif cfg.common.bf16:\n self._criterion = self._criterion.to(dtype=torch.bfloat16)\n self._model = self._model.to(dtype=torch.bfloat16)\n if (\n not cfg.distributed_training.pipeline_model_parallel\n # the DistributedFairseqModel wrapper will handle moving to device,\n # so only handle cases which don't use the wrapper\n and not self.use_distributed_wrapper\n ):\n self._criterion = self._criterion.to(device=self.device)\n self._model = self._model.to(device=self.device)\n self.pipeline_model_parallel = cfg.distributed_training.pipeline_model_parallel\n self.last_device = None\n if self.cuda and self.pipeline_model_parallel:\n self.last_device = torch.device(\n cfg.distributed_training.pipeline_devices[-1]\n )\n\n # check that shared parameters are preserved after device transfer\n for shared_param in shared_params:\n ref = _get_module_by_path(self._model, shared_param[0])\n for path in shared_param[1:]:\n logger.info(\n \"detected shared parameter: {} <- {}\".format(shared_param[0], path)\n )\n _set_module_by_path(self._model, path, ref)\n\n self._dummy_batch = None # indicates we don't have a dummy batch at first\n self._lr_scheduler = None\n self._num_updates = 0\n self._num_xla_compiles = 0 # for TPUs\n self._optim_history = None\n self._optimizer = None\n self._warn_once = set()\n self._wrapped_criterion = None\n self._wrapped_model = None\n\n # TODO(myleott): support tpu\n if self.cuda and self.data_parallel_world_size > 1:\n self._grad_norm_buf = torch.cuda.DoubleTensor(self.data_parallel_world_size)\n else:\n self._grad_norm_buf = None\n\n self.quantizer = quantizer\n if self.quantizer is not None:\n self.quantizer.set_trainer(self)\n\n # get detailed cuda environment\n if self.cuda:\n self.cuda_env = utils.CudaEnvironment()\n if self.data_parallel_world_size > 1:\n self.cuda_env_arr = distributed_utils.all_gather_list(\n self.cuda_env, group=distributed_utils.get_global_group()\n )\n else:\n self.cuda_env_arr = [self.cuda_env]\n if self.data_parallel_rank == 0:\n utils.CudaEnvironment.pretty_print_cuda_env_list(self.cuda_env_arr)\n else:\n self.cuda_env = None\n self.cuda_env_arr = None\n\n metrics.log_start_time(\"wall\", priority=790, round=0)\n\n self._start_time = time.time()\n self._previous_training_time = 0\n self._cumulative_training_time = None\n\n def reinitialize(self):\n \"\"\"Reinitialize the Trainer, typically after model params change.\"\"\"\n self._lr_scheduler = None\n self._optimizer = None\n self._wrapped_criterion = None\n self._wrapped_model = None\n\n @property\n def data_parallel_world_size(self):\n if self.cfg.distributed_training.distributed_world_size == 1:\n return 1\n return distributed_utils.get_data_parallel_world_size()\n\n @property\n def data_parallel_process_group(self):\n return distributed_utils.get_data_parallel_group()\n\n @property\n def data_parallel_rank(self):\n if self.cfg.distributed_training.distributed_world_size == 1:\n return 0\n return distributed_utils.get_data_parallel_rank()\n\n @property\n def is_data_parallel_master(self):\n # NOTE: this returns true for all model parallel replicas with data\n # parallel rank 0\n return self.data_parallel_rank == 0\n\n @property\n def use_distributed_wrapper(self) -> bool:\n return (\n self.data_parallel_world_size > 1\n and not self.cfg.optimization.use_bmuf\n ) or (\n self.cfg.distributed_training.ddp_backend == \"fully_sharded\"\n and self.cfg.distributed_training.cpu_offload\n )\n\n @property\n def should_save_checkpoint_on_current_rank(self) -> bool:\n \"\"\"Indicates whether to save checkpoints on the current DDP rank.\"\"\"\n if self.cfg.distributed_training.ddp_backend == \"fully_sharded\":\n return True\n else:\n return self.is_data_parallel_master\n\n @property\n def checkpoint_suffix(self) -> str:\n \"\"\"Suffix to add to the checkpoint file name.\"\"\"\n if self.cfg.distributed_training.ddp_backend == \"fully_sharded\":\n return self.cfg.checkpoint.checkpoint_suffix + \"-shard{0}\".format(self.data_parallel_rank)\n else:\n return self.cfg.checkpoint.checkpoint_suffix or \"\"\n\n @property\n def criterion(self):\n if self._wrapped_criterion is None:\n if (\n utils.has_parameters(self._criterion)\n and self.use_distributed_wrapper\n ):\n self._wrapped_criterion = models.DistributedFairseqModel(\n self.cfg.distributed_training,\n self._criterion,\n process_group=self.data_parallel_process_group,\n device=self.device,\n )\n else:\n self._wrapped_criterion = self._criterion\n return self._wrapped_criterion\n\n @property\n def model(self):\n if self._wrapped_model is None:\n if self.use_distributed_wrapper:\n self._wrapped_model = models.DistributedFairseqModel(\n self.cfg.distributed_training,\n self._model,\n process_group=self.data_parallel_process_group,\n device=self.device,\n )\n else:\n self._wrapped_model = self._model\n return self._wrapped_model\n\n @property\n def optimizer(self):\n if self._optimizer is None:\n self._build_optimizer()\n return self._optimizer\n\n @property\n def lr_scheduler(self):\n if self._lr_scheduler is None:\n self._build_optimizer() # this will initialize self._lr_scheduler\n return self._lr_scheduler\n\n def _build_optimizer(self):\n params = list(\n filter(\n lambda p: p.requires_grad,\n chain(self.model.parameters(), self.criterion.parameters()),\n )\n )\n\n if (\n self.cfg.distributed_training.ddp_backend == \"fully_sharded\"\n and self.cfg.common.fp16\n ):\n # FullyShardedDataParallel always uses MemoryEfficientFP16 wrapper,\n # mostly for the grad scaling. But if we don't have the\n # --memory-efficient-fp16 flag set, then we're effectively doing\n # regular --fp16 and can allow the use of optimizers that would\n # otherwise be unsupported by MemoryEfficientFP16Optimizer.\n allow_unsupported = not self.cfg.common.memory_efficient_fp16\n self._optimizer = optim.MemoryEfficientFP16Optimizer.build_optimizer(\n self.cfg, params, allow_unsupported=allow_unsupported\n )\n elif self.cfg.common.fp16 or self.cfg.common.bf16:\n if self.cuda and torch.cuda.get_device_capability(0)[0] < 7:\n logger.info(\n \"NOTE: your device does NOT support faster training with --fp16, \"\n \"please switch to FP32 which is likely to be faster\"\n )\n if (\n self.cfg.common.memory_efficient_fp16\n or self.cfg.common.memory_efficient_bf16\n ):\n self._optimizer = optim.MemoryEfficientFP16Optimizer.build_optimizer(\n self.cfg, params\n )\n else:\n self._optimizer = optim.FP16Optimizer.build_optimizer(self.cfg, params)\n else:\n if self.cuda and torch.cuda.get_device_capability(0)[0] >= 7:\n logger.info(\"NOTE: your device may support faster training with --fp16\")\n self._optimizer = optim.build_optimizer(self.cfg.optimizer, params)\n\n if self.cfg.distributed_training.ddp_backend == \"fully_sharded\":\n assert not self.cfg.optimization.use_bmuf, \\\n \"--ddp-backend=fully_sharded is not compatible with BMUF\"\n assert self._optimizer.supports_flat_params, (\n \"--ddp-backend=fully_sharded is only compatible with pointwise \"\n \"optimizers (e.g., Adam, AdamW, Adadelta, Adamax, SGD, etc.). \"\n \"However, the sharding will result in slightly different results when \"\n \"using non-pointwise optimizers (e.g., Adagrad, Adafactor, LAMB)\"\n )\n\n if self.cfg.optimization.use_bmuf:\n self._optimizer = optim.FairseqBMUF(\n self.cfg.bmuf,\n self._optimizer,\n )\n\n if self.cfg.distributed_training.zero_sharding == \"os\":\n if (\n self.cfg.common.fp16\n and not self.cfg.common.memory_efficient_fp16\n and not self.cfg.common.memory_efficient_bf16\n ) and not self.cfg.common.fp16_no_flatten_grads:\n raise ValueError(\n \"ZeRO is incomptabile with fp16 and flattened grads. \"\n \"Please use --fp16-no-flatten-grads\"\n )\n else:\n optim.shard_(self._optimizer, self.data_parallel_process_group)\n\n # We should initialize the learning rate scheduler immediately after\n # building the optimizer, so that the initial learning rate is set.\n self._lr_scheduler = lr_scheduler.build_lr_scheduler(\n self.cfg.lr_scheduler,\n self.optimizer,\n )\n self._lr_scheduler.step_update(0)\n\n def consolidate_optimizer(self):\n \"\"\"For OSS, we need to consolidate the state dict.\"\"\"\n if hasattr(self.optimizer.optimizer, \"consolidate_state_dict\"):\n self.optimizer.optimizer.consolidate_state_dict()\n\n def state_dict(self):\n state_dict = {\n \"args\": None, # legacy\n \"cfg\": (\n OmegaConf.to_container(self.cfg)\n if OmegaConf.is_config(self.cfg) else self.cfg\n ),\n \"model\": self.model.state_dict(),\n \"criterion\": (\n self.criterion.state_dict()\n if utils.has_parameters(self.criterion) else None\n ),\n \"optimizer_history\": (self._optim_history or [])\n + [\n {\n \"criterion_name\": self.get_criterion().__class__.__name__,\n \"optimizer_name\": self.optimizer.__class__.__name__,\n \"lr_scheduler_state\": self.lr_scheduler.state_dict(),\n \"num_updates\": self.get_num_updates(),\n }\n ],\n \"task_state\": self.task.state_dict() if self.task is not None else {},\n \"extra_state\": {\n \"metrics\": metrics.state_dict(),\n \"previous_training_time\": self.cumulative_training_time(),\n }\n }\n if not self.cfg.checkpoint.no_save_optimizer_state:\n state_dict[\"last_optimizer_state\"] = self.optimizer.state_dict()\n return state_dict\n\n def save_checkpoint(self, filename, extra_state):\n \"\"\"Save all training state in a checkpoint file.\"\"\"\n logger.info(f\"Saving checkpoint to {filename}\")\n # call state_dict on all ranks in case it needs internal communication\n state_dict = utils.move_to_cpu(self.state_dict())\n state_dict[\"extra_state\"].update(extra_state)\n if self.should_save_checkpoint_on_current_rank:\n checkpoint_utils.torch_persistent_save(\n state_dict,\n filename,\n async_write=self.cfg.checkpoint.write_checkpoints_asynchronously,\n )\n logger.info(f\"Finished saving checkpoint to {filename}\")\n\n def load_checkpoint(\n self,\n filename,\n reset_optimizer=False,\n reset_lr_scheduler=False,\n optimizer_overrides=None,\n reset_meters=False,\n ):\n \"\"\"\n Load all training state from a checkpoint file.\n rank = 0 will load the checkpoint, and then broadcast it to all\n other ranks.\n \"\"\"\n extra_state, self._optim_history, last_optim_state = None, [], None\n\n logger.info(f\"Preparing to load checkpoint {filename}\")\n is_distributed = self.data_parallel_world_size > 1\n bexists = PathManager.isfile(filename)\n if bexists:\n load_on_all_ranks = (\n self.cfg.checkpoint.load_checkpoint_on_all_dp_ranks\n # TPUs don't support broadcast yet, so load checkpoints\n # on every worker for now\n or self.tpu\n # FSDP requires loading checkpoint shards on all ranks\n or self.cfg.distributed_training.ddp_backend == \"fully_sharded\"\n )\n\n if load_on_all_ranks or self.data_parallel_rank == 0:\n state = checkpoint_utils.load_checkpoint_to_cpu(\n filename, load_on_all_ranks=load_on_all_ranks\n )\n last_optim_state = state.get(\"last_optimizer_state\", None)\n\n # If doing zero_sharding, do not broadcast global optimizer\n # state. Later we will broadcast sharded states to each rank\n # to avoid memory from exploding.\n if (\n not load_on_all_ranks\n and self.cfg.distributed_training.zero_sharding == \"os\"\n and \"last_optimizer_state\" in state\n and is_distributed\n ):\n state[\"last_optimizer_state\"] = \"SHARDED\"\n else:\n last_optim_state = None\n state = None\n\n if is_distributed and not load_on_all_ranks:\n state = distributed_utils.broadcast_object(\n state,\n src_rank=0,\n group=self.data_parallel_process_group,\n dist_device=self.device,\n )\n if self.data_parallel_rank > 0:\n last_optim_state = state.get(\"last_optimizer_state\", None)\n\n # load model parameters\n try:\n self.model.load_state_dict(\n state[\"model\"], strict=True, model_cfg=self.cfg.model\n )\n # save memory for later steps\n del state[\"model\"]\n if utils.has_parameters(self.get_criterion()):\n self.get_criterion().load_state_dict(\n state[\"criterion\"], strict=True\n )\n del state[\"criterion\"]\n\n except Exception:\n raise Exception(\n \"Cannot load model parameters from checkpoint {}; \"\n \"please ensure that the architectures match.\".format(filename)\n )\n extra_state = state[\"extra_state\"]\n self._optim_history = state[\"optimizer_history\"]\n\n if last_optim_state is not None and not reset_optimizer:\n # rebuild optimizer after loading model, since params may have changed\n self._build_optimizer()\n\n # only reload optimizer and lr_scheduler if they match\n last_optim = self._optim_history[-1]\n assert (\n last_optim[\"criterion_name\"] == self.get_criterion().__class__.__name__\n ), f\"Criterion does not match; please reset the optimizer (--reset-optimizer). {last_optim['criterion_name']} vs {self.get_criterion().__class__.__name__}\"\n assert (\n last_optim[\"optimizer_name\"] == self.optimizer.__class__.__name__\n ), f\"Optimizer does not match; please reset the optimizer (--reset-optimizer). {last_optim['optimizer_name']} vs {self.optimizer.__class__.__name__}\"\n\n if not reset_lr_scheduler:\n self.lr_scheduler.load_state_dict(last_optim[\"lr_scheduler_state\"])\n\n if not load_on_all_ranks and is_distributed:\n last_optim_state = self.optimizer.broadcast_global_state_dict(\n last_optim_state\n )\n self.optimizer.load_state_dict(last_optim_state, optimizer_overrides)\n\n self.set_num_updates(last_optim[\"num_updates\"])\n\n if extra_state is not None:\n itr_state = extra_state[\"train_iterator\"]\n epoch = itr_state[\"epoch\"]\n\n if \"previous_training_time\" in extra_state:\n self._previous_training_time = extra_state[\"previous_training_time\"]\n self._start_time = time.time()\n\n self.lr_step(epoch)\n\n if itr_state.get(\"version\", 1) >= 2 and itr_state[\"iterations_in_epoch\"] == 0:\n # reset meters at start of epoch\n reset_meters = True\n\n if \"metrics\" in extra_state and not reset_meters:\n metrics.load_state_dict(extra_state[\"metrics\"])\n\n # reset TimeMeters, since their start times don't make sense anymore\n for meter in metrics.get_meters(\"default\"):\n if isinstance(meter, meters.TimeMeter):\n meter.reset()\n\n logger.info(\n \"Loaded checkpoint {} (epoch {} @ {} updates)\".format(\n filename, epoch, self.get_num_updates()\n )\n )\n\n else:\n logger.info(\"No existing checkpoint found {}\".format(filename))\n\n return extra_state\n\n def get_train_iterator(\n self,\n epoch,\n combine=True,\n load_dataset=True,\n data_selector=None,\n shard_batch_itr=True,\n disable_iterator_cache=False,\n ):\n \"\"\"Return an EpochBatchIterator over the training set for a given epoch.\"\"\"\n if load_dataset:\n logger.info(\"loading train data for epoch {}\".format(epoch))\n self.task.load_dataset(\n self.cfg.dataset.train_subset,\n epoch=epoch,\n combine=combine,\n data_selector=data_selector,\n tpu=self.tpu,\n )\n batch_iterator = self.task.get_batch_iterator(\n dataset=self.task.dataset(self.cfg.dataset.train_subset),\n max_tokens=self.cfg.dataset.max_tokens,\n max_sentences=self.cfg.dataset.batch_size,\n max_positions=utils.resolve_max_positions(\n self.task.max_positions(),\n self.model.max_positions(),\n self.cfg.dataset.max_tokens,\n ),\n ignore_invalid_inputs=True,\n required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple,\n seed=self.cfg.common.seed,\n num_shards=self.data_parallel_world_size if shard_batch_itr else 1,\n shard_id=self.data_parallel_rank if shard_batch_itr else 0,\n num_workers=self.cfg.dataset.num_workers,\n epoch=epoch,\n data_buffer_size=self.cfg.dataset.data_buffer_size,\n disable_iterator_cache=disable_iterator_cache,\n )\n self.reset_dummy_batch(batch_iterator.first_batch)\n return batch_iterator\n\n def get_valid_iterator(\n self,\n subset,\n disable_iterator_cache=False,\n ):\n \"\"\"Return an EpochBatchIterator over given validation subset for a given epoch.\"\"\"\n batch_iterator = self.task.get_batch_iterator(\n dataset=self.task.dataset(subset),\n max_tokens=self.cfg.dataset.max_tokens_valid,\n max_sentences=self.cfg.dataset.batch_size_valid,\n max_positions=utils.resolve_max_positions(\n self.task.max_positions(),\n self.model.max_positions(),\n ),\n ignore_invalid_inputs=self.cfg.dataset.skip_invalid_size_inputs_valid_test,\n required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple,\n seed=self.cfg.common.seed,\n num_shards=self.data_parallel_world_size,\n shard_id=self.data_parallel_rank,\n num_workers=self.cfg.dataset.num_workers,\n # always pass a fixed \"epoch\" to keep validation data consistent\n # across training epochs\n epoch=1,\n data_buffer_size=self.cfg.dataset.data_buffer_size,\n disable_iterator_cache=disable_iterator_cache,\n )\n self.reset_dummy_batch(batch_iterator.first_batch)\n return batch_iterator\n\n def begin_epoch(self, epoch):\n \"\"\"Called at the beginning of each epoch.\"\"\"\n logger.info(\"begin training epoch {}\".format(epoch))\n\n self.lr_step_begin_epoch(epoch)\n\n if self.quantizer is not None:\n self.quantizer.begin_epoch(epoch)\n\n # task specific setup per epoch\n self.task.begin_epoch(epoch, self.get_model())\n\n if self.tpu:\n import torch_xla.core.xla_model as xm\n\n xm.rendezvous(\"begin_epoch\") # wait for all workers\n xm.mark_step()\n\n def begin_valid_epoch(self, epoch):\n \"\"\"Called at the beginning of each validation epoch.\"\"\"\n\n # task specific setup per validation epoch\n self.task.begin_valid_epoch(epoch, self.get_model())\n\n def reset_dummy_batch(self, batch):\n self._dummy_batch = batch\n\n @metrics.aggregate(\"train\")\n def train_step(self, samples, raise_oom=False):\n \"\"\"Do forward, backward and parameter update.\"\"\"\n self._set_seed()\n self.model.train()\n self.criterion.train()\n self.zero_grad()\n\n metrics.log_start_time(\"train_wall\", priority=800, round=0)\n\n # forward and backward pass\n logging_outputs, sample_size, ooms = [], 0, 0\n for i, sample in enumerate(samples): # delayed update loop\n sample, is_dummy_batch = self._prepare_sample(sample)\n\n def maybe_no_sync():\n \"\"\"\n Whenever *samples* contains more than one mini-batch, we\n want to accumulate gradients locally and only call\n all-reduce in the last backwards pass.\n \"\"\"\n if (\n self.data_parallel_world_size > 1\n and hasattr(self.model, \"no_sync\")\n and i < len(samples) - 1\n ):\n return self.model.no_sync()\n else:\n return contextlib.ExitStack() # dummy contextmanager\n\n try:\n with maybe_no_sync():\n # forward and backward\n loss, sample_size_i, logging_output = self.task.train_step(\n sample=sample,\n model=self.model,\n criterion=self.criterion,\n optimizer=self.optimizer,\n update_num=self.get_num_updates(),\n ignore_grad=is_dummy_batch,\n )\n del loss\n\n logging_outputs.append(logging_output)\n sample_size += sample_size_i\n\n # emptying the CUDA cache after the first step can\n # reduce the chance of OOM\n if self.cuda and self.get_num_updates() == 0:\n torch.cuda.empty_cache()\n except RuntimeError as e:\n if \"out of memory\" in str(e):\n self._log_oom(e)\n if raise_oom:\n raise e\n logger.warning(\n \"attempting to recover from OOM in forward/backward pass\"\n )\n ooms += 1\n self.zero_grad()\n if self.cuda:\n torch.cuda.empty_cache()\n if self.cfg.distributed_training.distributed_world_size == 1:\n return None\n else:\n raise e\n\n if self.tpu and i < len(samples) - 1:\n # tpu-comment: every XLA operation before marking step is\n # appended to the IR graph, and processing too many batches\n # before marking step can lead to OOM errors.\n # To handle gradient accumulation use case, we explicitly\n # mark step here for every forward pass without a backward pass\n self._xla_markstep_and_send_to_cpu()\n\n if is_dummy_batch:\n if torch.is_tensor(sample_size):\n sample_size.zero_()\n else:\n sample_size *= 0.0\n\n if torch.is_tensor(sample_size):\n sample_size = sample_size.float()\n else:\n sample_size = float(sample_size)\n\n # gather logging outputs from all replicas\n if self._sync_stats():\n train_time = self._local_cumulative_training_time()\n logging_outputs, (\n sample_size,\n ooms,\n total_train_time,\n ) = self._aggregate_logging_outputs(\n logging_outputs, sample_size, ooms, train_time, ignore=is_dummy_batch\n )\n self._cumulative_training_time = (\n total_train_time / self.data_parallel_world_size\n )\n\n overflow = False\n try:\n with torch.autograd.profiler.record_function(\"reduce-grads\"):\n # reduce gradients across workers\n self.optimizer.all_reduce_grads(self.model)\n if utils.has_parameters(self.criterion):\n self.optimizer.all_reduce_grads(self.criterion)\n\n with torch.autograd.profiler.record_function(\"multiply-grads\"):\n # multiply gradients by (data_parallel_size / sample_size) since\n # DDP normalizes by the number of data parallel workers for\n # improved fp16 precision.\n # Thus we get (sum_of_gradients / sample_size) at the end.\n # In case of fp16, this step also undoes loss scaling.\n # (Debugging note: Some optimizers perform this scaling on the\n # fly, so inspecting model.parameters() or optimizer.params may\n # still show the original, unscaled gradients.)\n numer = (\n self.data_parallel_world_size\n if not self.cfg.optimization.use_bmuf or self._sync_stats()\n else 1\n )\n self.optimizer.multiply_grads(numer / (sample_size or 1.0))\n # Note: (sample_size or 1.0) handles the case of a zero gradient, in a\n # way that avoids CPU/device transfers in case sample_size is a GPU or\n # TPU object. The assumption is that the gradient itself is also 0.\n\n with torch.autograd.profiler.record_function(\"clip-grads\"):\n # clip grads\n grad_norm = self.clip_grad_norm(self.cfg.optimization.clip_norm)\n\n # check that grad norms are consistent across workers\n # on tpu check tensor is slow\n if not self.tpu:\n if (\n not self.cfg.optimization.use_bmuf\n and self.cfg.distributed_training.ddp_backend != \"slow_mo\"\n ):\n self._check_grad_norms(grad_norm)\n if not torch.isfinite(grad_norm).all():\n # check local gradnorm single GPU case, trigger NanDetector\n raise FloatingPointError(\"gradients are Nan/Inf\")\n\n with torch.autograd.profiler.record_function(\"optimizer\"):\n # take an optimization step\n self.task.optimizer_step(\n self.optimizer, model=self.model, update_num=self.get_num_updates()\n )\n\n except FloatingPointError:\n # re-run the forward and backward pass with hooks attached to print\n # out where it fails\n self.zero_grad()\n with NanDetector(self.get_model()):\n for _, sample in enumerate(samples):\n sample, _ = self._prepare_sample(sample)\n self.task.train_step(\n sample,\n self.model,\n self.criterion,\n self.optimizer,\n self.get_num_updates(),\n ignore_grad=False,\n )\n raise\n except OverflowError as e:\n overflow = True\n logger.info(f\"NOTE: gradient overflow detected, ignoring gradient, {str(e)}\")\n grad_norm = torch.tensor(0.0).cuda()\n self.zero_grad()\n except RuntimeError as e:\n if \"out of memory\" in str(e):\n self._log_oom(e)\n logger.error(\"OOM during optimization, irrecoverable\")\n raise e\n\n # Some distributed wrappers (e.g., SlowMo) need access to the optimizer\n # after the step\n if hasattr(self.model, \"perform_additional_optimizer_actions\"):\n if hasattr(self.optimizer, \"fp32_params\"):\n self.model.perform_additional_optimizer_actions(\n self.optimizer.optimizer, self.optimizer.fp32_params\n )\n else:\n self.model.perform_additional_optimizer_actions(\n self.optimizer.optimizer\n )\n\n logging_output = None\n if not overflow or self.cfg.distributed_training.ddp_backend == \"slow_mo\":\n self.set_num_updates(self.get_num_updates() + 1)\n\n if self.tpu:\n import torch_xla.core.xla_model as xm\n\n # mark step on TPUs\n self._xla_markstep_and_send_to_cpu()\n\n # only log stats every log_interval steps\n # this causes wps to be misreported when log_interval > 1\n logging_output = {}\n if self.get_num_updates() % self.cfg.common.log_interval == 0:\n # log memory usage\n mem_info = xm.get_memory_info(self.device)\n gb_free = mem_info[\"kb_free\"] / 1024 / 1024\n gb_total = mem_info[\"kb_total\"] / 1024 / 1024\n metrics.log_scalar(\n \"gb_free\", gb_free, priority=1500, round=1, weight=0\n )\n metrics.log_scalar(\n \"gb_total\", gb_total, priority=1600, round=1, weight=0\n )\n logging_outputs = self._xla_markstep_and_send_to_cpu(logging_outputs)\n logging_output = self._reduce_and_log_stats(\n logging_outputs, sample_size, grad_norm\n )\n\n # log whenever there's an XLA compilation, since these\n # slow down training and may indicate opportunities for\n # optimization\n self._check_xla_compilation()\n else:\n if self.cuda and self.cuda_env is not None:\n # log minimum free memory over the iteration\n gb_used = torch.cuda.max_memory_allocated() / 1024 / 1024 / 1024\n torch.cuda.reset_peak_memory_stats()\n gb_free = self.cuda_env.total_memory_in_GB - gb_used\n metrics.log_scalar(\n \"gb_free\", gb_free, priority=1500, round=1, weight=0\n )\n\n # log stats\n logging_output = self._reduce_and_log_stats(\n logging_outputs, sample_size, grad_norm\n )\n\n # clear CUDA cache to reduce memory fragmentation\n if (\n self.cuda\n and self.cfg.common.empty_cache_freq > 0\n and (\n (self.get_num_updates() + self.cfg.common.empty_cache_freq - 1)\n % self.cfg.common.empty_cache_freq\n )\n == 0\n ):\n torch.cuda.empty_cache()\n\n if self.cfg.common.fp16:\n metrics.log_scalar(\n \"loss_scale\",\n self.optimizer.scaler.loss_scale,\n priority=700,\n round=4,\n weight=0,\n )\n\n metrics.log_stop_time(\"train_wall\")\n return logging_output\n\n @metrics.aggregate(\"valid\")\n def valid_step(self, sample, raise_oom=False):\n \"\"\"Do forward pass in evaluation mode.\"\"\"\n if self.tpu:\n import torch_xla.core.xla_model as xm\n xm.rendezvous(\"valid_step\") # wait for all workers\n\n with torch.no_grad():\n self.model.eval()\n self.criterion.eval()\n\n sample, is_dummy_batch = self._prepare_sample(sample)\n\n try:\n _loss, sample_size, logging_output = self.task.valid_step(\n sample, self.model, self.criterion\n )\n except RuntimeError as e:\n if \"out of memory\" in str(e):\n self._log_oom(e)\n if not raise_oom:\n logger.warning(\n \"ran out of memory in validation step, retrying batch\"\n )\n for p in self.model.parameters():\n if p.grad is not None:\n p.grad = None # free some memory\n if self.cuda:\n torch.cuda.empty_cache()\n return self.valid_step(sample, raise_oom=True)\n raise e\n\n logging_outputs = [logging_output]\n if is_dummy_batch:\n if torch.is_tensor(sample_size):\n sample_size.zero_()\n else:\n sample_size *= 0.0\n\n # gather logging outputs from all replicas\n if self.data_parallel_world_size > 1:\n logging_outputs, (sample_size,) = self._aggregate_logging_outputs(\n logging_outputs,\n sample_size,\n ignore=is_dummy_batch,\n )\n\n # log validation stats\n if self.tpu:\n logging_outputs = self._xla_markstep_and_send_to_cpu(logging_outputs)\n logging_output = self._reduce_and_log_stats(logging_outputs, sample_size)\n\n return logging_output\n\n def zero_grad(self):\n self.optimizer.zero_grad()\n\n def lr_step_begin_epoch(self, epoch):\n \"\"\"Adjust the learning rate at the beginning of the epoch.\"\"\"\n self.lr_scheduler.step_begin_epoch(epoch)\n # prefer updating the LR based on the number of steps\n return self.lr_step_update()\n\n def lr_step(self, epoch, val_loss=None):\n \"\"\"Adjust the learning rate at the end of the epoch.\"\"\"\n self.lr_scheduler.step(epoch, val_loss)\n # prefer updating the LR based on the number of steps\n return self.lr_step_update()\n\n def lr_step_update(self):\n \"\"\"Update the learning rate after each update.\"\"\"\n new_lr = self.lr_scheduler.step_update(self.get_num_updates())\n if isinstance(new_lr, dict):\n for k, v in new_lr.items():\n metrics.log_scalar(f\"lr_{k}\", v, weight=0, priority=300)\n new_lr = new_lr.get(\"default\", next(iter(new_lr.values())))\n else:\n metrics.log_scalar(\"lr\", new_lr, weight=0, priority=300)\n return new_lr\n\n def get_lr(self):\n \"\"\"Get the current learning rate.\"\"\"\n return self.optimizer.get_lr()\n\n def get_model(self):\n \"\"\"Get the (non-wrapped) model instance.\"\"\"\n return self._model\n\n def get_criterion(self):\n \"\"\"Get the (non-wrapped) criterion instance.\"\"\"\n return self._criterion\n\n def get_meter(self, name):\n \"\"\"[deprecated] Get a specific meter by name.\"\"\"\n from fairseq import meters\n\n if \"get_meter\" not in self._warn_once:\n self._warn_once.add(\"get_meter\")\n utils.deprecation_warning(\n \"Trainer.get_meter is deprecated. Please use fairseq.metrics instead.\"\n )\n\n train_meters = metrics.get_meters(\"train\")\n if train_meters is None:\n train_meters = {}\n\n if name == \"train_loss\" and \"loss\" in train_meters:\n return train_meters[\"loss\"]\n elif name == \"train_nll_loss\":\n # support for legacy train.py, which assumed this meter is\n # always initialized\n m = train_meters.get(\"nll_loss\", None)\n return m or meters.AverageMeter()\n elif name == \"wall\":\n # support for legacy train.py, which assumed this meter is\n # always initialized\n m = metrics.get_meter(\"default\", \"wall\")\n return m or meters.TimeMeter()\n elif name == \"wps\":\n m = metrics.get_meter(\"train\", \"wps\")\n return m or meters.TimeMeter()\n elif name in {\"valid_loss\", \"valid_nll_loss\"}:\n # support for legacy train.py, which assumed these meters\n # are always initialized\n k = name[len(\"valid_\") :]\n m = metrics.get_meter(\"valid\", k)\n return m or meters.AverageMeter()\n elif name == \"oom\":\n return meters.AverageMeter()\n elif name in train_meters:\n return train_meters[name]\n return None\n\n def get_num_updates(self):\n \"\"\"Get the number of parameters updates.\"\"\"\n return self._num_updates\n\n def set_num_updates(self, num_updates):\n \"\"\"Set the number of parameters updates.\"\"\"\n self._num_updates = num_updates\n self.lr_step_update()\n if self.quantizer:\n self.quantizer.step_update(self._num_updates)\n metrics.log_scalar(\"num_updates\", self._num_updates, weight=0, priority=200)\n\n def clip_grad_norm(self, clip_norm):\n\n def agg_norm_fn(total_norm):\n total_norm = total_norm.cuda().float() ** 2\n total_norm = distributed_utils.all_reduce(\n total_norm, group=self.data_parallel_process_group\n )\n return total_norm ** 0.5\n\n should_agg_norm = (\n self.cfg.distributed_training.ddp_backend == \"fully_sharded\"\n and (\n self.data_parallel_process_group is not None\n or torch.distributed.is_initialized()\n )\n )\n return self.optimizer.clip_grad_norm(\n clip_norm, aggregate_norm_fn=agg_norm_fn if should_agg_norm else None\n )\n\n def cumulative_training_time(self):\n if self._cumulative_training_time is None:\n # single GPU\n return self._local_cumulative_training_time()\n else:\n return self._cumulative_training_time\n\n def _local_cumulative_training_time(self):\n \"\"\"Aggregate training time in seconds.\"\"\"\n return time.time() - self._start_time + self._previous_training_time\n\n def _prepare_sample(self, sample, is_dummy=False):\n if sample == \"DUMMY\":\n raise Exception(\n \"Trying to use an uninitialized 'dummy' batch. This usually indicates \"\n \"that the total number of batches is smaller than the number of \"\n \"participating GPUs. Try reducing the batch size or using fewer GPUs.\"\n )\n\n if sample is None or len(sample) == 0:\n assert (\n self._dummy_batch is not None and len(self._dummy_batch) > 0\n ), \"Invalid dummy batch: {}\".format(self._dummy_batch)\n sample, _ = self._prepare_sample(self._dummy_batch, is_dummy=True)\n return sample, True\n\n if self.cuda:\n if self.pipeline_model_parallel:\n if \"target\" in sample:\n sample[\"target\"] = utils.move_to_cuda(\n sample[\"target\"], device=self.last_device\n )\n else:\n sample = utils.move_to_cuda(sample)\n elif self.tpu and is_dummy:\n # the dummy batch may not be on the appropriate device\n sample = utils.move_to_cuda(sample, device=self.device)\n\n def apply_half(t):\n if t.dtype is torch.float32:\n return t.half()\n return t\n\n def apply_bfloat16(t):\n if t.dtype is torch.float32:\n return t.to(dtype=torch.bfloat16)\n return t\n\n if self.cfg.common.fp16:\n sample = utils.apply_to_sample(apply_half, sample)\n\n if self.cfg.common.bf16:\n sample = utils.apply_to_sample(apply_bfloat16, sample)\n\n if self._dummy_batch == \"DUMMY\":\n self._dummy_batch = sample\n\n return sample, False\n\n def _set_seed(self):\n # Set seed based on args.seed and the update number so that we get\n # reproducible results when resuming from checkpoints\n seed = self.cfg.common.seed + self.get_num_updates()\n utils.set_torch_seed(seed)\n\n def _sync_stats(self):\n # Return True if it's using multiple GPUs and DDP or multiple GPUs with\n # BMUF and it's a bmuf sync with warmup iterations completed before.\n if self.data_parallel_world_size == 1:\n return False\n elif self.cfg.optimization.use_bmuf:\n return (\n self.get_num_updates() + 1\n ) % self.cfg.bmuf.global_sync_iter == 0 and (\n self.get_num_updates() + 1\n ) > self.cfg.bmuf.warmup_iterations\n else:\n return True\n\n def _log_oom(self, exc):\n msg = \"OOM: Ran out of memory with exception: {}\".format(exc)\n logger.warning(msg)\n if torch.cuda.is_available() and hasattr(torch.cuda, \"memory_summary\"):\n for device_idx in range(torch.cuda.device_count()):\n logger.warning(torch.cuda.memory_summary(device=device_idx))\n sys.stderr.flush()\n\n def _aggregate_logging_outputs(\n self,\n logging_outputs: List[Dict[str, Any]],\n *extra_stats_to_sum,\n ignore=False,\n ):\n if self.task.__class__.logging_outputs_can_be_summed(self.get_criterion()):\n return self._fast_stat_sync_sum(\n logging_outputs, *extra_stats_to_sum, ignore=ignore\n )\n else:\n return self._all_gather_list_sync(\n logging_outputs, *extra_stats_to_sum, ignore=ignore\n )\n\n def _all_gather_list_sync(\n self,\n logging_outputs: List[Dict[str, Any]],\n *extra_stats_to_sum,\n ignore=False,\n ):\n \"\"\"\n Sync logging outputs across workers. all_gather_list_sync is\n suitable when logging outputs are complex types.\n \"\"\"\n if self.tpu:\n raise NotImplementedError\n if ignore:\n logging_outputs = []\n results = list(\n zip(\n *distributed_utils.all_gather_list(\n [logging_outputs] + list(extra_stats_to_sum),\n max_size=getattr(self.cfg.common, \"all_gather_list_size\", 16384),\n group=self.data_parallel_process_group,\n )\n )\n )\n logging_outputs, extra_stats_to_sum = results[0], results[1:]\n logging_outputs = list(chain.from_iterable(logging_outputs))\n extra_stats_to_sum = [sum(s) for s in extra_stats_to_sum]\n return logging_outputs, extra_stats_to_sum\n\n def _fast_stat_sync_sum(\n self, logging_outputs: List[Dict[str, Any]], *extra_stats_to_sum, ignore=False,\n ):\n \"\"\"\n Sync logging outputs across workers. fast_stat_sync_sum is\n faster than all_gather_list_sync, but is only suitable when\n logging outputs are scalars and can be summed. Note that\n *logging_outputs* cannot contain any nested dicts/lists.\n \"\"\"\n data = {}\n for i, stat in enumerate(extra_stats_to_sum):\n data[\"extra_stats_\" + str(i)] = stat\n if len(logging_outputs) > 0:\n log_keys = list(logging_outputs[0].keys())\n for k in log_keys:\n if not ignore:\n v = sum(log[k] for log in logging_outputs if k in log)\n else:\n v = logging_outputs[0][k]\n v = torch.zeros_like(v) if torch.is_tensor(v) else 0\n data[\"logging_outputs_\" + k] = v\n else:\n log_keys = None\n\n data = distributed_utils.all_reduce_dict(\n data, device=self.device, group=self.data_parallel_process_group\n )\n\n extra_stats_to_sum = [\n data[\"extra_stats_\" + str(i)] for i in range(len(extra_stats_to_sum))\n ]\n if log_keys is not None:\n logging_outputs = [{k: data[\"logging_outputs_\" + k] for k in log_keys}]\n else:\n logging_outputs = []\n return logging_outputs, extra_stats_to_sum\n\n def _check_grad_norms(self, grad_norm):\n \"\"\"Check that grad norms are consistent across workers.\"\"\"\n if self._grad_norm_buf is not None:\n self._grad_norm_buf.zero_()\n self._grad_norm_buf[self.data_parallel_rank] = grad_norm\n distributed_utils.all_reduce(\n self._grad_norm_buf, group=self.data_parallel_process_group\n )\n\n def is_consistent(tensor):\n max_abs_diff = torch.max(torch.abs(tensor - tensor[0]))\n return (\n torch.isfinite(tensor).all()\n and (max_abs_diff / (tensor[0] + 1e-6) < 1e-6).all()\n )\n\n if not is_consistent(self._grad_norm_buf):\n pretty_detail = \"\\n\".join(\n \"rank {:3d} = {:.8f}\".format(r, n)\n for r, n in enumerate(self._grad_norm_buf.tolist())\n )\n error_detail = \"grad_norm across the workers:\\n{}\\n\".format(\n pretty_detail\n )\n # use FloatingPointError to trigger NanDetector\n raise FloatingPointError(\n \"Fatal error: gradients are inconsistent between workers. \"\n \"Try --ddp-backend=legacy_ddp. \"\n \"Or are you mixing up different generation of GPUs in training?\"\n + \"\\n\"\n + \"-\" * 80\n + \"\\n{}\\n\".format(error_detail)\n + \"-\" * 80\n )\n\n def _reduce_and_log_stats(self, logging_outputs, sample_size, grad_norm=None):\n if grad_norm is not None and (\n not torch.is_tensor(grad_norm) or torch.isfinite(grad_norm)\n ):\n metrics.log_speed(\"ups\", 1.0, priority=100, round=2)\n metrics.log_scalar(\"gnorm\", grad_norm, priority=400, round=3)\n if self.cfg.optimization.clip_norm > 0:\n metrics.log_scalar(\n \"clip\",\n torch.where(\n grad_norm > self.cfg.optimization.clip_norm,\n grad_norm.new_tensor(100),\n grad_norm.new_tensor(0),\n ),\n priority=500,\n round=1,\n )\n\n with metrics.aggregate() as agg:\n if logging_outputs is not None:\n self.task.reduce_metrics(logging_outputs, self.get_criterion())\n del logging_outputs\n\n # extra warning for criterions that don't properly log a loss value\n if \"loss\" not in agg:\n if \"loss\" not in self._warn_once:\n self._warn_once.add(\"loss\")\n logger.warning(\n \"Criterion.reduce_metrics did not log a 'loss' value, \"\n \"which may break some functionality\"\n )\n metrics.log_scalar(\"loss\", -1)\n\n # support legacy interface\n if self.tpu:\n logging_output = {}\n else:\n logging_output = agg.get_smoothed_values()\n logging_output[\"sample_size\"] = sample_size\n for key_to_delete in [\"ppl\", \"wps\", \"wpb\", \"bsz\"]:\n if key_to_delete in logging_output:\n del logging_output[key_to_delete]\n return logging_output\n\n def _check_xla_compilation(self):\n import torch_xla.debug.metrics as met\n\n compile_stats = met.metric_data(\"CompileTime\")\n if compile_stats is None:\n return\n num_xla_compiles = compile_stats[0]\n if num_xla_compiles > self._num_xla_compiles:\n logger.warning(\n \"XLA compilation detected on device #{}; too many of these can lead \"\n \"to slow training, but we expect a few in the beginning\".format(\n self.cfg.distributed_training.distributed_rank\n )\n )\n self._num_xla_compiles = num_xla_compiles\n\n def _xla_markstep_and_send_to_cpu(self, data=None):\n import torch_xla.core.xla_model as xm\n xm.mark_step()\n if data is not None:\n from fairseq.utils import xla_device_to_cpu\n return xla_device_to_cpu(data)\n\n\ndef _catalog_shared_params(module, memo=None, prefix=\"\"):\n if memo is None:\n first_call = True\n memo = {}\n else:\n first_call = False\n for name, param in module._parameters.items():\n param_prefix = prefix + (\".\" if prefix else \"\") + name\n if param not in memo:\n memo[param] = []\n memo[param].append(param_prefix)\n for name, m in module._modules.items():\n if m is None:\n continue\n submodule_prefix = prefix + (\".\" if prefix else \"\") + name\n _catalog_shared_params(m, memo, submodule_prefix)\n if first_call:\n return [x for x in memo.values() if len(x) > 1]\n\n\ndef _get_module_by_path(module, path):\n path = path.split(\".\")\n for name in path:\n module = getattr(module, name)\n return module\n\n\ndef _set_module_by_path(module, path, value):\n path = path.split(\".\")\n for name in path[:-1]:\n module = getattr(module, name)\n setattr(module, path[-1], value)\n"
] |
[
[
"torch.device",
"torch.cuda.reset_peak_memory_stats",
"torch.is_tensor",
"torch.no_grad",
"torch.cuda.memory_summary",
"torch.cuda.max_memory_allocated",
"torch.isfinite",
"torch.cuda.device_count",
"torch.abs",
"torch.distributed.is_initialized",
"torch.cuda.empty_cache",
"torch.cuda.is_available",
"torch.tensor",
"torch.cuda.get_device_capability",
"torch.zeros_like",
"torch.autograd.profiler.record_function",
"torch.cuda.DoubleTensor"
]
] |
xixiobba/MVP-Net
|
[
"07bf00390080670b5d9a643b99f633419322a1ec"
] |
[
"lib/modeling/rpn_heads.py"
] |
[
"from torch import nn\nfrom torch.nn import init\nimport torch.nn.functional as F\n\nfrom core.config import cfg\nfrom modeling.generate_anchors import generate_anchors\nfrom modeling.generate_proposals import GenerateProposalsOp\nfrom modeling.generate_proposal_labels import GenerateProposalLabelsOp\nimport modeling.FPN as FPN\nimport utils.net as net_utils\nfrom model.utils.loss import focal_loss\n\n\n# ---------------------------------------------------------------------------- #\n# RPN and Faster R-CNN outputs and losses\n# ---------------------------------------------------------------------------- #\n\ndef generic_rpn_outputs(dim_in, spatial_scale_in):\n \"\"\"Add RPN outputs (objectness classification and bounding box regression)\n to an RPN model. Abstracts away the use of FPN.\n \"\"\"\n if cfg.FPN.FPN_ON:\n # Delegate to the FPN module\n return FPN.fpn_rpn_outputs(dim_in, spatial_scale_in)\n else:\n # Not using FPN, add RPN to a single scale\n return single_scale_rpn_outputs(dim_in, spatial_scale_in)\n\n\ndef generic_rpn_losses(*inputs, **kwargs):\n \"\"\"Add RPN losses. Abstracts away the use of FPN.\"\"\"\n if cfg.FPN.FPN_ON:\n return FPN.fpn_rpn_losses(*inputs, **kwargs)\n else:\n return single_scale_rpn_losses(*inputs, **kwargs)\n\n\nclass single_scale_rpn_outputs(nn.Module):\n \"\"\"Add RPN outputs to a single scale model (i.e., no FPN).\"\"\"\n def __init__(self, dim_in, spatial_scale):\n super().__init__()\n self.dim_in = dim_in\n self.dim_out = dim_in if cfg.RPN.OUT_DIM_AS_IN_DIM else cfg.RPN.OUT_DIM\n anchors = generate_anchors(\n stride=1. / spatial_scale,\n sizes=cfg.RPN.SIZES,\n aspect_ratios=cfg.RPN.ASPECT_RATIOS)\n num_anchors = anchors.shape[0]\n\n # RPN hidden representation\n self.RPN_conv = nn.Conv2d(self.dim_in, self.dim_out, 3, 1, 1)\n # Proposal classification scores\n self.n_score_out = num_anchors * 2 if cfg.RPN.CLS_ACTIVATION == 'softmax' \\\n else num_anchors\n self.RPN_cls_score = nn.Conv2d(self.dim_out, self.n_score_out, 1, 1, 0)\n # Proposal bbox regression deltas\n self.RPN_bbox_pred = nn.Conv2d(self.dim_out, num_anchors * 4, 1, 1, 0)\n\n self.RPN_GenerateProposals = GenerateProposalsOp(anchors, spatial_scale)\n self.RPN_GenerateProposalLabels = GenerateProposalLabelsOp()\n\n self._init_weights()\n\n def _init_weights(self):\n init.normal_(self.RPN_conv.weight, std=0.01)\n init.constant_(self.RPN_conv.bias, 0)\n init.normal_(self.RPN_cls_score.weight, std=0.01)\n init.constant_(self.RPN_cls_score.bias, 0)\n init.normal_(self.RPN_bbox_pred.weight, std=0.01)\n init.constant_(self.RPN_bbox_pred.bias, 0)\n\n def detectron_weight_mapping(self):\n detectron_weight_mapping = {\n 'RPN_conv.weight': 'conv_rpn_w',\n 'RPN_conv.bias': 'conv_rpn_b',\n 'RPN_cls_score.weight': 'rpn_cls_logits_w',\n 'RPN_cls_score.bias': 'rpn_cls_logits_b',\n 'RPN_bbox_pred.weight': 'rpn_bbox_pred_w',\n 'RPN_bbox_pred.bias': 'rpn_bbox_pred_b'\n }\n orphan_in_detectron = []\n return detectron_weight_mapping, orphan_in_detectron\n\n def forward(self, x, im_info, roidb=None):\n \"\"\"\n x: feature maps from the backbone network. (Variable)\n im_info: (CPU Variable)\n roidb: (list of ndarray)\n \"\"\"\n rpn_conv = F.relu(self.RPN_conv(x), inplace=True)\n\n rpn_cls_logits = self.RPN_cls_score(rpn_conv)\n\n rpn_bbox_pred = self.RPN_bbox_pred(rpn_conv)\n\n return_dict = {\n 'rpn_cls_logits': rpn_cls_logits, 'rpn_bbox_pred': rpn_bbox_pred}\n\n if not self.training or cfg.MODEL.FASTER_RCNN:\n # Proposals are needed during:\n # 1) inference (== not model.train) for RPN only and Faster R-CNN\n # OR\n # 2) training for Faster R-CNN\n # Otherwise (== training for RPN only), proposals are not needed\n if cfg.RPN.CLS_ACTIVATION == 'softmax':\n B, C, H, W = rpn_cls_logits.size()\n rpn_cls_prob = F.softmax(\n rpn_cls_logits.view(B, 2, C // 2, H, W), dim=1)\n rpn_cls_prob = rpn_cls_prob[:, 1].squeeze(dim=1)\n else:\n rpn_cls_prob = F.sigmoid(rpn_cls_logits)\n\n rpn_rois, rpn_rois_prob = self.RPN_GenerateProposals(\n rpn_cls_prob, rpn_bbox_pred, im_info)\n\n return_dict['rpn_rois'] = rpn_rois\n return_dict['rpn_roi_probs'] = rpn_rois_prob\n\n if cfg.MODEL.FASTER_RCNN :\n if self.training:\n # Add op that generates training labels for in-network RPN proposals\n blobs_out = self.RPN_GenerateProposalLabels(rpn_rois, roidb, im_info)\n return_dict.update(blobs_out)\n else:\n # Alias rois to rpn_rois for inference\n return_dict['rois'] = return_dict['rpn_rois']\n\n return return_dict\n\n\ndef single_scale_rpn_losses(\n rpn_cls_logits, rpn_bbox_pred,\n rpn_labels_int32_wide, rpn_bbox_targets_wide,\n rpn_bbox_inside_weights_wide, rpn_bbox_outside_weights_wide):\n \"\"\"Add losses for a single scale RPN model (i.e., no FPN).\"\"\"\n h, w = rpn_cls_logits.shape[2:]\n rpn_labels_int32 = rpn_labels_int32_wide[:, :, :h, :w] # -1 means ignore\n h, w = rpn_bbox_pred.shape[2:]\n rpn_bbox_targets = rpn_bbox_targets_wide[:, :, :h, :w]\n rpn_bbox_inside_weights = rpn_bbox_inside_weights_wide[:, :, :h, :w]\n rpn_bbox_outside_weights = rpn_bbox_outside_weights_wide[:, :, :h, :w]\n\n #fg_num = (rpn_labels_int32_wide==1).data.sum()\n #bg_num = (rpn_labels_int32_wide==0).data.sum()\n #print(\"RCNN training fg/bg: %d/%d\"%(fg_num, bg_num))\n\n if cfg.RPN.CLS_ACTIVATION == 'softmax':\n B, C, H, W = rpn_cls_logits.size()\n rpn_cls_logits = rpn_cls_logits.view(\n B, 2, C // 2, H, W).permute(0, 2, 3, 4, 1).contiguous().view(-1, 2)\n rpn_labels_int32 = rpn_labels_int32.contiguous().view(-1).long()\n # the loss is averaged over non-ignored targets\n if cfg.TRAIN.FOCAL_LOSS:\n loss_rpn_cls = focal_loss(rpn_cls_logits, rpn_labels_int32, softmax=False, size_average=False)\n else:\n \tloss_rpn_cls = F.cross_entropy(\n rpn_cls_logits, rpn_labels_int32, ignore_index=-1)\n else:\n weight = (rpn_labels_int32 >= 0).float()\n if cfg.TRAIN.FOCAL_LOSS:\n loss_rpn_cls = focal_loss(\n rpn_cls_logits.view(-1, 1), rpn_labels_int32.contiguous().view(-1, 1).float(), weight.view(-1, 1).float(), softmax=True, size_average=False)\n else:\n loss_rpn_cls = F.binary_cross_entropy_with_logits(\n rpn_cls_logits, rpn_labels_int32.float(), weight, size_average=False)\n loss_rpn_cls /= weight.sum()\n\n loss_rpn_bbox = net_utils.smooth_l1_loss(\n rpn_bbox_pred, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights,\n beta=1/9)\n\n return loss_rpn_cls, loss_rpn_bbox\n"
] |
[
[
"torch.nn.functional.sigmoid",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.functional.cross_entropy",
"torch.nn.init.normal_"
]
] |
sebaslherrera/holbertonschool-machine_learning
|
[
"a4c09230688700aee199f4099de32261104918be"
] |
[
"math/0x00-linear_algebra/9-let_the_butcher_slice_it.py"
] |
[
"#!/usr/bin/env python3\nimport numpy as np\nmatrix = np.array([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12],\n [13, 14, 15, 16, 17, 18], [19, 20, 21, 22, 23, 24]])\nmat1 = matrix[1:3]\nmat2 = matrix[:, 2:4]\nmat3 = matrix[1:, 3:]\nprint(\"The middle two rows of the matrix are:\\n{}\".format(mat1))\nprint(\"The middle two columns of the matrix are:\\n{}\".format(mat2))\nprint(\"The bottom-right, square, 3x3 matrix is:\\n{}\".format(mat3))\n"
] |
[
[
"numpy.array"
]
] |
KuangHaofei/pytorch-deepFEPE
|
[
"012651c93f948cfd793cf8bba9670ab69abc0e04"
] |
[
"deepFEPE/utils/plot_tools.py"
] |
[
"import argparse\nimport time\nimport csv\nimport yaml\nimport os\nimport logging\nfrom pathlib import Path\n\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom tensorboardX import SummaryWriter\nimport cv2\nimport matplotlib.pyplot as plt\n\n\nclass plot_results(object):\n def __init__(self, frame_list=[100], mode='base'):\n # frame_list = [0, 100, 200, 300]\n # frame_list = [100, 700, 1200]\n # frame_list = [100]\n self.frame_list = frame_list\n print(f\"mode = {mode}\")\n self.get_image_names(mode=mode)\n pass\n\n def get_image_names(self, mode='base'):\n frame_list = self.frame_list\n plot_folder = \"plots/\"\n image_name = None\n if mode == 'base':\n prefix = [\"Si-Df-k\", \"Sp-Df-fp-end-k\"]\n plot_name = \"mask_conf_\" # 'corr_all_'\n # image_name = [f\"{plot_folder}{plot_name}{prefix}{i:06}_{(i+1):06}.png\" for i in frame_list]\n elif mode == 'good' or mode == 'bad':\n prefix = [f\"Si-Df-fp-k_{mode}\", f\"Sp-Df-fp-end-k_{mode}\"]\n plot_name = \"mask_conf_\" # \"mask_conf_\" # 'corr_all_'\n elif mode == 'freeze':\n print(f\"freeze!\")\n iter_list = [0, 400, 1000]\n prefix_base = \"Sp-Df-f-end-k-freezeDf\"\n plot_name = 'corr_all_random_' # 'corr_all_', \"mask_conf_\" \"epi_dist_all_\" \"corr_all_random_\"\n print(f\"plot_name: {plot_name}\")\n # prefix = [f'{prefix_base}_{iter/1000}k_' for iter in iter_list] # 'Sp-Df-fp-end-k'\n prefix = [f'{prefix_base}_s{frame_list[0]}_{iter/1000}k' for iter in iter_list] # 'Sp-Df-fp-end-k'\n image_name = [f\"{plot_folder}{plot_name}{p}.png\" for p in prefix]\n # prefix = f'Sp-Df-f-end-k-freezeDf_s{j}_{iter/1000}k'\n # image_name = [\n # f\"{plot_folder}{plot_name}{pre}{i:06}_{(i+1):06}.png\"\n # for i in frame_list\n # for pre in prefix\n # ]\n if image_name is None:\n image_name = [\n f\"{plot_folder}{plot_name}{pre}_{i}.png\"\n for i in frame_list\n for pre in prefix\n ]\n self.prefix = prefix\n self.image_name = image_name\n self.image_data = []\n self.plot_name = plot_name\n print(image_name) \n\n def __len__(self):\n return len(self.image_name)\n\n def read_images(self):\n image_data = []\n image_name = self.image_name\n for i, file in enumerate(image_name):\n img = cv2.imread(file)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n image_data.append(img)\n print(f\"read {i}: {file}\")\n # plt.imshow(img)\n # plt.show()\n self.image_data = image_data\n pass\n\n def plot_images(\n self, row=2, col=2, col_labels=[\"Baseline - Si-Df-fp\", \"Ours - Sp-Df-fp-end\"],\n save=True,\n figsize=(48,12),\n ext='pdf'\n ):\n ## create subgraph for combinations\n # row, col = 2, 2\n img_num = row * col\n assert self.__len__() >= img_num\n image_data = self.image_data\n\n f, axarr = plt.subplots(row, col, figsize=figsize)\n # f, axarr = plt.subplots(row, col, figsize=(48, 12))\n\n axarr = axarr.reshape(-1, col)\n for i in range(img_num):\n print(f\"axarr: {axarr.shape}, i= {i}\")\n axarr[int(i / col), int(i % col)].imshow(image_data[i])\n axarr[int(i / col), int(i % col)].axis(\"off\")\n # axarr[i/2,i%2].imshow(imaget(_datas[1])\n # axarr[1,0].imshow(image_datas[2])\n # axarr[1,1].imshow(image_datas[3])\n\n for ax, col_name in zip(axarr[0], col_labels):\n ax.set_title(col_name, fontsize=figsize[0])\n\n f.tight_layout()\n # f.suptitle(f'{self.prefix}', fontsize=12)\n savefile = f\"{self.plot_name}_{str('_').join(self.prefix)}_{str('_').join([str(f) for f in self.frame_list])}\"\n if save:\n if ext == 'pdf':\n file = f\"plots/{savefile}.pdf\"\n plt.savefig(file, bbox_inches=\"tight\")\n else:\n file = f\"plots/{savefile}.png\"\n plt.savefig(file, dpi=300, bbox_inches=\"tight\")\n logging.info(f\"save image: {savefile}\")\n print(f\"save image: {file}\")\n else:\n print(f\"not saved!!\")\n # logging.info(f\"save image: {file}\")\n plt.show()\n\nif __name__ == \"__main__\":\n plot_helper = plot_class()\n plot_helper.read_images()\n # plot_helper.plot_images(row=3,col=2)\n plot_helper.plot_images(row=1,col=2)\n\n\n# class plot_class(object):\n# def __init__(self):\n# # frame_list = [0, 100, 200, 300]\n# frame_list = [100, 700, 1200]\n# # frame_list = [100]\n# prefix = ['Si-Df-k', 'Sp-Df-fp-end-k']\n# plot_folder = 'plots/'\n# plot_name = 'mask_conf_' # 'corr_all_'\n# # image_name = [f\"{plot_folder}{plot_name}{prefix}{i:06}_{(i+1):06}.png\" for i in frame_list]\n# image_name = [f\"{plot_folder}{plot_name}{pre}{i:06}_{(i+1):06}.png\" for i in frame_list for pre in prefix ]\n# self.frame_list = frame_list\n# self.prefix = prefix\n# self.image_name = image_name\n# self.image_data = []\n# print(image_name) \n# pass\n# def __len__(self):\n# return len(self.image_name)\n \n# def read_images(self):\n# image_data = []\n# image_name = self.image_name\n# for i, file in enumerate(image_name):\n# img = cv2.imread(file)\n# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n# image_data.append(img)\n# print(f\"read {i}: {file}\")\n# # plt.imshow(img)\n# # plt.show()\n# self.image_data = image_data\n# pass\n \n# def plot_images(self, row=2, col=2, col_labels=['Baseline - Si-Df-fp', 'Ours - Sp-Df-fp-end']):\n# ## create subgraph for combinations\n# # row, col = 2, 2\n# img_num = row*col\n# assert self.__len__() >= img_num\n# image_data = self.image_data\n \n# f, axarr = plt.subplots(row, col, figsize=(48, 12))\n# # f, axarr = plt.subplots(row, col, figsize=(48, 12))\n \n# axarr = axarr.reshape(-1, col)\n# for i in range(img_num):\n# print(f'axarr: {axarr.shape}, i= {i}')\n# axarr[int(i/col),int(i%col)].imshow(image_data[i])\n# axarr[int(i/col),int(i%col)].axis('off')\n# # axarr[i/2,i%2].imshow(imaget(_datas[1])\n# # axarr[1,0].imshow(image_datas[2])\n# # axarr[1,1].imshow(image_datas[3])\n\n \n# for ax, col_name in zip(axarr[0], col_labels):\n# ax.set_title(col_name)\n \n# f.tight_layout()\n# # f.suptitle(f'{self.prefix}', fontsize=12)\n# savefile = f\"{str('_').join(self.prefix)}_{str('_').join([str(f) for f in self.frame_list])}\"\n# file = f\"plots/{savefile}.png\"\n# # logging.info(f\"save image: {file}\")\n# print(f\"save image: {file}\")\n# plt.show() \n\n\n\n# def plot_imgs(imgs, titles=None, cmap='brg', ylabel='', normalize=False, ax=None, dpi=100):\n# n = len(imgs)\n# if not isinstance(cmap, list):\n# cmap = [cmap]*n\n# if ax is None:\n# fig, ax = plt.subplots(1, n, figsize=(6*n, 6), dpi=dpi)\n# if n == 1:\n# ax = [ax]\n# else:\n# if not isinstance(ax, list):\n# ax = [ax]\n# assert len(ax) == len(imgs)\n# for i in range(n):\n# if imgs[i].shape[-1] == 3:\n# imgs[i] = imgs[i][..., ::-1] # BGR to RGB\n# ax[i].imshow(imgs[i], cmap=plt.get_cmap(cmap[i]),\n# vmin=None if normalize else 0,\n# vmax=None if normalize else 1)\n# if titles:\n# ax[i].set_title(titles[i])\n# ax[i].get_yaxis().set_ticks([])\n# ax[i].get_xaxis().set_ticks([])\n# for spine in ax[i].spines.values(): # remove frame\n# spine.set_visible(False)\n# ax[0].set_ylabel(ylabel)\n# plt.tight_layout()\n\n\n# # from utils.draw import img_overlap\n# def img_overlap(img_r, img_g, img_gray): # img_b repeat\n# img = np.concatenate((img_gray, img_gray, img_gray), axis=0)\n# img[0, :, :] += img_r[0, :, :]\n# img[1, :, :] += img_g[0, :, :]\n# img[img > 1] = 1\n# img[img < 0] = 0\n# return img\n\n# def draw_keypoints(img, corners, color=(0, 255, 0), radius=3, s=3):\n# '''\n\n# :param img:\n# image:\n# numpy [H, W]\n# :param corners:\n# Points\n# numpy [N, 2]\n# :param color:\n# :param radius:\n# :param s:\n# :return:\n# overlaying image\n# numpy [H, W]\n# '''\n# img = np.repeat(cv2.resize(img, None, fx=s, fy=s)[..., np.newaxis], 3, -1)\n# for c in np.stack(corners).T:\n# # cv2.circle(img, tuple(s * np.flip(c, 0)), radius, color, thickness=-1)\n# cv2.circle(img, tuple((s * c[:2]).astype(int)), radius, color, thickness=-1)\n# return img\n\n# # def draw_keypoints(img, corners, color=(0, 255, 0), radius=3, s=3):\n# # '''\n\n# # :param img:\n# # np (H, W)\n# # :param corners:\n# # np (3, N)\n# # :param color:\n# # :param radius:\n# # :param s:\n# # :return:\n# # '''\n# # img = np.repeat(cv2.resize(img, None, fx=s, fy=s)[..., np.newaxis], 3, -1)\n# # for c in np.stack(corners).T:\n# # # cv2.circle(img, tuple(s * np.flip(c, 0)), radius, color, thickness=-1)\n# # cv2.circle(img, tuple((s*c[:2]).astype(int)), radius, color, thickness=-1)\n# # return img\n\n# def draw_matches(rgb1, rgb2, match_pairs, filename='matches.png', show=False):\n# '''\n\n# :param rgb1:\n# image1\n# numpy (H, W)\n# :param rgb2:\n# image2\n# numpy (H, W)\n# :param match_pairs:\n# numpy (keypoiny1 x, keypoint1 y, keypoint2 x, keypoint 2 y)\n# :return:\n# None\n# '''\n# from matplotlib import pyplot as plt\n\n# h1, w1 = rgb1.shape[:2]\n# h2, w2 = rgb2.shape[:2]\n# canvas = np.zeros((max(h1, h2), w1 + w2, 3), dtype=rgb1.dtype)\n# canvas[:h1, :w1] = rgb1[:,:,np.newaxis]\n# canvas[:h2, w1:] = rgb2[:,:,np.newaxis]\n# # fig = plt.figure(frameon=False)\n# fig = plt.imshow(canvas)\n\n# xs = match_pairs[:, [0, 2]]\n# xs[:, 1] += w1\n# ys = match_pairs[:, [1, 3]]\n\n# alpha = 1\n# sf = 5\n# lw = 0.5\n# # markersize = 1\n# markersize = 2\n\n# plt.plot(\n# xs.T, ys.T,\n# alpha=alpha,\n# linestyle=\"-\",\n# linewidth=lw,\n# aa=False,\n# marker='o',\n# markersize=markersize,\n# fillstyle='none',\n# color=[0.0, 0.8, 0.0],\n# );\n# plt.tight_layout()\n# plt.savefig(filename, dpi=300, bbox_inches='tight')\n# print('#Matches = {}'.format(len(match_pairs)))\n# if show:\n# plt.show()\n\n# # from utils.draw import draw_matches_cv\n# def draw_matches_cv(data):\n# keypoints1 = [cv2.KeyPoint(p[1], p[0], 1) for p in data['keypoints1']]\n# keypoints2 = [cv2.KeyPoint(p[1], p[0], 1) for p in data['keypoints2']]\n# inliers = data['inliers'].astype(bool)\n# matches = np.array(data['matches'])[inliers].tolist()\n# def to3dim(img):\n# if img.ndim == 2:\n# img = img[:, :, np.newaxis]\n# return img\n# img1 = to3dim(data['image1'])\n# img2 = to3dim(data['image2'])\n# img1 = np.concatenate([img1, img1, img1], axis=2)\n# img2 = np.concatenate([img2, img2, img2], axis=2)\n# return cv2.drawMatches(img1, keypoints1, img2, keypoints2, matches,\n# None, matchColor=(0,255,0), singlePointColor=(0, 0, 255))\n\n\n# def drawBox(points, img, offset=np.array([0,0]), color=(0,255,0)):\n# # print(\"origin\", points)\n# offset = offset[::-1]\n# points = points + offset\n# points = points.astype(int)\n# for i in range(len(points)):\n# img = img + cv2.line(np.zeros_like(img),tuple(points[-1+i]), tuple(points[i]), color,5)\n# return img\n"
] |
[
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.subplots"
]
] |
XiangLiK/cv_course
|
[
"da7c2318fd4128bbdab96db26ddbb2524f37d0a0",
"da7c2318fd4128bbdab96db26ddbb2524f37d0a0"
] |
[
"chapter_03/example-3_1.py",
"chapter_07e/face_detection/config_farm/acc_model.py"
] |
[
"#-*-coding:utf-8-*-\r\n# date:2020-03-28\r\n# Author: xiang li\r\n\r\nimport torch # 加载torch库\r\nimport numpy as np # 加载Numpy库\r\nif __name__ == \"__main__\":\r\n print(torch.__version__)# 查看 torch 版本\r\n print('-----------------------')\r\n y = torch.rand(2,3)# 随机矩阵\r\n print(y)\r\n print(y.size())\r\n print('-----------------------')\r\n print(torch.zeros(2,2))#全0矩阵\r\n print('-----------------------')\r\n print(torch.ones(2,2))#全1矩阵\r\n print('-----------------------')\r\n print(torch.eye(3,3))# 单位矩阵\r\n print('-----------------------')\r\n print(torch.rand_like(input = y, dtype = torch.double))# 输出和input矩阵相同size的随机矩阵\r\n",
"import torch\r\nimport torch.nn as nn\r\nimport torchvision\r\nimport time\r\nimport numpy as np\r\nimport sys\r\n\r\n\r\n\r\ndef get_model_op(model_,print_flag = False):\r\n print('/********************* modules *******************/')\r\n op_dict = {}\r\n idx = 0\r\n for m in model_.modules():\r\n idx += 1\r\n if isinstance(m, nn.Conv2d):\r\n if 'Conv2d' not in op_dict.keys():\r\n op_dict['Conv2d'] = 1\r\n else:\r\n op_dict['Conv2d'] += 1\r\n if print_flag:\r\n print('{}) {}'.format(idx,m))\r\n pass\r\n elif isinstance(m, nn.BatchNorm2d):\r\n if 'BatchNorm2d' not in op_dict.keys():\r\n op_dict['BatchNorm2d'] = 1\r\n else:\r\n op_dict['BatchNorm2d'] += 1\r\n if print_flag:\r\n print('{}) {}'.format(idx,m))\r\n pass\r\n elif isinstance(m, nn.Linear):\r\n if 'Linear' not in op_dict.keys():\r\n op_dict['Linear'] = 1\r\n else:\r\n op_dict['Linear'] += 1\r\n if print_flag:\r\n print('{}) {}'.format(idx,m))\r\n pass\r\n elif isinstance(m, nn.Sequential):\r\n if print_flag:\r\n print('*******************{}) {}'.format(idx,m))\r\n for n in m:\r\n if print_flag:\r\n print('{}) {}'.format(idx,n))\r\n if 'Conv2d' not in op_dict.keys():\r\n op_dict['Conv2d'] = 1\r\n else:\r\n op_dict['Conv2d'] += 1\r\n if 'BatchNorm2d' not in op_dict.keys():\r\n op_dict['BatchNorm2d'] = 1\r\n else:\r\n op_dict['BatchNorm2d'] += 1\r\n if 'Linear' not in op_dict.keys():\r\n op_dict['Linear'] = 1\r\n else:\r\n op_dict['Linear'] += 1\r\n if 'ReLU6' not in op_dict.keys():\r\n op_dict['ReLU6'] = 1\r\n else:\r\n op_dict['ReLU6'] += 1\r\n pass\r\n elif isinstance(m, nn.ReLU6):\r\n if print_flag:\r\n print('{}) {}'.format(idx,m))\r\n if 'ReLU6' not in op_dict.keys():\r\n op_dict['ReLU6'] = 1\r\n else:\r\n op_dict['ReLU6'] += 1\r\n pass\r\n elif isinstance(m, nn.Module):\r\n if print_flag:\r\n print('{}) {}'.format(idx,m))\r\n for n in m.modules():\r\n if isinstance(n, nn.Conv2d):\r\n if print_flag:\r\n print('{}) {}'.format(idx,n))\r\n if 'Conv2d' not in op_dict.keys():\r\n op_dict['Conv2d'] = 1\r\n else:\r\n op_dict['Conv2d'] += 1\r\n if 'BatchNorm2d' not in op_dict.keys():\r\n op_dict['BatchNorm2d'] = 1\r\n else:\r\n op_dict['BatchNorm2d'] += 1\r\n if 'Linear' not in op_dict.keys():\r\n op_dict['Linear'] = 1\r\n else:\r\n op_dict['Linear'] += 1\r\n if 'ReLU6' not in op_dict.keys():\r\n op_dict['ReLU6'] = 1\r\n else:\r\n op_dict['ReLU6'] += 1\r\n pass\r\n pass\r\n\r\n else:\r\n if print_flag:\r\n print('{}) {}'.format(idx,m))\r\n pass\r\n\r\n # print('\\n/********************** {} ********************/\\n'.format(ops.network))\r\n for key in op_dict.keys():\r\n print(' operation - {} : {}'.format(key,op_dict[key]))\r\n\r\nclass DummyModule(nn.Module):\r\n def __init__(self):\r\n super(DummyModule, self).__init__()\r\n\r\n def forward(self, x):\r\n return x\r\n\r\ndef fuse(conv, bn):\r\n # https://tehnokv.com/posts/fusing-batchnorm-and-conv/\r\n with torch.no_grad():\r\n # init\r\n if isinstance(conv, nn.Conv2d):\r\n fusedconv = torch.nn.Conv2d(conv.in_channels,\r\n conv.out_channels,\r\n kernel_size=conv.kernel_size,\r\n stride=conv.stride,\r\n padding=conv.padding,\r\n bias=True)\r\n elif isinstance(conv, nn.ConvTranspose2d): # not supprot nn.ConvTranspose2d\r\n fusedconv = nn.ConvTranspose2d(\r\n conv.in_channels,\r\n conv.out_channels,\r\n kernel_size=conv.kernel_size,\r\n stride=conv.stride,\r\n padding=conv.padding,\r\n output_padding=conv.output_padding,\r\n bias=True)\r\n else:\r\n print(\"error\")\r\n exit()\r\n\r\n # prepare filters\r\n w_conv = conv.weight.clone().view(conv.out_channels, -1)\r\n w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))\r\n fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.size()))\r\n\r\n # prepare spatial bias\r\n if conv.bias is not None:\r\n b_conv = conv.bias\r\n #b_conv = conv.bias.mul(bn.weight.div(torch.sqrt(bn.running_var + bn.eps))) # maybe, you should this one ?\r\n else:\r\n b_conv = torch.zeros(conv.weight.size(0))\r\n b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))\r\n fusedconv.bias.copy_(b_conv + b_bn)\r\n\r\n return fusedconv\r\n\r\n\r\ndef fuse_module(m):\r\n children = list(m.named_children())\r\n c = None\r\n cn = None\r\n\r\n for name, child in children:\r\n # print(\"name {}, child {}\".format(name, child))\r\n if isinstance(child, nn.BatchNorm2d) and c is not None:\r\n bc = fuse(c, child)\r\n m._modules[cn] = bc\r\n # print('DummyModule() : ',DummyModule())\r\n m._modules[name] = DummyModule()\r\n c = None\r\n elif isinstance(child, nn.Conv2d):\r\n c = child\r\n cn = name\r\n else:\r\n fuse_module(child)\r\n\r\ndef test_net(ops,m):\r\n\r\n use_cuda = torch.cuda.is_available()\r\n use_cpu = False\r\n if ops.force_cpu or use_cuda == False:\r\n p = torch.randn([1, 3, 256, 256])\r\n device = torch.device(\"cpu\")\r\n use_cpu = True\r\n else:\r\n p = torch.randn([1, 3, 256, 256]).cuda()\r\n device = torch.device(\"cuda:0\")\r\n\r\n count = 50\r\n time_org = []\r\n m_o = m.to(device)\r\n get_model_op(m_o)\r\n # print(m)\r\n for i in range(count):\r\n s1 = time.time()\r\n if use_cpu:\r\n o_output = m_o(p)\r\n else:\r\n o_output = m_o(p).cpu()\r\n s2 = time.time()\r\n time_org.append(s2 - s1)\r\n print(\"Original time: \", s2 - s1)\r\n print('------------------------------------>>>>')\r\n\r\n fuse_module(m.to(torch.device(\"cpu\")))\r\n\r\n # print(m)\r\n\r\n m_f = m.to(device)\r\n get_model_op(m_f)\r\n\r\n time_fuse = []\r\n for i in range(count):\r\n s1 = time.time()\r\n if use_cpu:\r\n f_output = m_f(p)\r\n else:\r\n f_output = m_f(p).cpu()\r\n s2 = time.time()\r\n time_fuse.append(s2 - s1)\r\n print(\"Fused time: \", s2 - s1)\r\n\r\n print(\"-\" * 50)\r\n print(\"org time:\", np.mean(time_org))\r\n print(\"fuse time:\", np.mean(time_fuse))\r\n for o in o_output:\r\n print(\"org size:\", o.size())\r\n for o in f_output:\r\n print(\"fuse size:\", o.size())\r\n for i in range(len(o_output)):\r\n assert o_output[i].size()==f_output[i].size()\r\n print(\"output[{}] max abs diff: {}\".format(i, (o_output[i] - f_output[i]).abs().max().item()))\r\n print(\"output[{}] MSE diff: {}\".format(i, nn.MSELoss()(o_output[i], f_output[i]).item()))\r\n\r\n\r\ndef acc_model(ops,m):\r\n print('\\n-------------------------------->>> before acc model')\r\n get_model_op(m)\r\n fuse_module(m)\r\n print('\\n-------------------------------->>> after acc model')\r\n get_model_op(m)\r\n\r\n return m\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n import os\r\n import argparse\r\n from models.resnet import resnet18, resnet34, resnet50, resnet101, resnet152\r\n\r\n parser = argparse.ArgumentParser(description=' Acc Model')\r\n\r\n parser.add_argument('--network', type=str, default='resnet_101',\r\n help='Backbone network : resnet_18,resnet_34,resnet_50,resnet_101,resnet_152,mobilenetv2')\r\n parser.add_argument('--model', type=str, default = './resnet101/model_epoch-1300.pth',\r\n help = 'model') # 模型路径\r\n parser.add_argument('--input_shape', type=tuple , default = (1,3,256,256),\r\n help = 'input_shape') #\r\n parser.add_argument('--num_classes', type=int , default = 196,\r\n help = 'num_classes') # 模型输入图片颜色偏置\r\n parser.add_argument('--force_cpu', type=bool, default = False,\r\n help = 'force_cpu') # 前向推断硬件选择\r\n parser.add_argument('--GPUS', type=str, default = '0',\r\n help = 'GPUS') # GPU选择\r\n\r\n print('\\n/******************* {} ******************/\\n'.format(parser.description))\r\n #--------------------------------------------------------------------------\r\n ops = parser.parse_args()# 解析添加参数\r\n #--------------------------------------------------------------------------\r\n print('----------------------------------')\r\n\r\n unparsed = vars(ops) # parse_args()方法的返回值为namespace,用vars()内建函数化为字典\r\n for key in unparsed.keys():\r\n print('{} : {}'.format(key,unparsed[key]))\r\n\r\n os.environ['CUDA_VISIBLE_DEVICES'] = ops.GPUS\r\n #---------------------------------------------------------------- 构建 landmarks 模型\r\n if ops.network == 'resnet_18':\r\n model_=resnet18(num_classes=ops.num_classes, img_size=ops.input_shape[2])\r\n elif ops.network == 'resnet_34':\r\n model_=resnet34(num_classes=ops.num_classes, img_size=ops.input_shape[2])\r\n elif ops.network == 'resnet_50':\r\n model_=resnet50(num_classes=ops.num_classes, img_size=ops.input_shape[2])\r\n elif ops.network == 'resnet_101':\r\n model_=resnet101(num_classes=ops.num_classes, img_size=ops.input_shape[2])\r\n elif ops.network == 'resnet_152':\r\n model_=resnet152(num_classes=ops.num_classes, img_size=ops.input_shape[2])\r\n elif ops.network == 'mobilenetv2':\r\n model_=MobileNetV2(n_class =ops.num_classes, input_size=ops.input_shape[2])\r\n else:\r\n print('error no the struct model : {}'.format(ops.network))\r\n\r\n # 加载测试模型\r\n if os.access(ops.model,os.F_OK):# checkpoint\r\n chkpt = torch.load(ops.model, map_location=lambda storage, loc: storage)\r\n # chkpt = torch.load(ops.model)\r\n model_.load_state_dict(chkpt)\r\n print(' \\nload model : {}'.format(ops.model))\r\n\r\n model_.eval()\r\n test_net(ops,model_)\r\n"
] |
[
[
"torch.zeros",
"torch.rand",
"torch.rand_like",
"torch.ones",
"torch.eye"
],
[
"torch.device",
"torch.sqrt",
"torch.nn.MSELoss",
"torch.no_grad",
"torch.nn.ConvTranspose2d",
"numpy.mean",
"torch.mm",
"torch.nn.Conv2d",
"torch.cuda.is_available",
"torch.load",
"torch.randn"
]
] |
samcom12/anuga_core
|
[
"f4378114dbf02d666fe6423de45798add5c42806",
"f4378114dbf02d666fe6423de45798add5c42806",
"f4378114dbf02d666fe6423de45798add5c42806"
] |
[
"validation_tests/analytical_exact/transcritical_without_shock/analytical_without_shock.py",
"anuga/coordinate_transforms/setup.py",
"anuga/fit_interpolate/setup.py"
] |
[
"\"\"\"\r\nTranscritical flow over a bump without a shock.\r\nRef1: Houghton & Kasahara, Nonlinear shallow fluid flow over an isolated ridge.\r\nComm. Pure and Applied Math. DOI:10.1002/cpa.3160210103\r\n\r\nRef2: Delestre et al, 2012, SWASHES: a compilation of shallow water\r\nanalytic solutions..., Int J Numer Meth Fluids, DOI:10.1002/fld.3741\r\n\r\nSudi Mungkasi, ANU 2012\r\n\"\"\"\r\nfrom numpy import zeros, linspace\r\nfrom scipy.optimize import fsolve\r\nfrom pylab import plot, show\r\nfrom anuga import g\r\n\r\n\r\nq0 = 1.53 # This is the imposed momentum\r\nh_d = 0.66 # This is the water height downstream\r\n\r\ndef analytic_sol(x):\r\n def elevation(x):\r\n z_b = zeros(len(x))\r\n for i in range(len(x)):\r\n if (8.0 <= x[i] <= 12.0):\r\n z_b[i] = 0.2 - 0.05*(x[i]-10.0)**2.0\r\n else:\r\n z_b[i] = 0.0\r\n return z_b\r\n z = elevation(x)\r\n zM= max(z)\r\n\r\n def find_hM(hM): #to find the water height at the maxima of the bump\r\n return h_d**3 + (-q0**2/(2*g*hM**2)-hM-zM)*h_d**2 + q0**2/(2*g)\r\n hM = fsolve(find_hM, 0.5)\r\n\r\n def find_h(h): #to find the water height at every spatial point after hM is found\r\n return h**3 + (zb-q0**2/(2*g*hM**2)-hM-zM)*h**2 + q0**2/(2*g)\r\n h = zeros(len(x))\r\n for i in range(len(x)):\r\n zb = z[i]\r\n #h[i] = fsolve(find_h, 1.0)\r\n if x[i] < 10:\r\n h[i] = fsolve(find_h, 1.0)\r\n else:\r\n h[i] = fsolve(find_h, 0.4)\r\n return h, z\r\n\r\n##N = 401\r\n##L = 25.\r\n##x = linspace(0.0,L,N)\r\n##h,z=analytic_sol(x)\r\n##plot(x,h+z, x,z)\r\n##plot(x, 1.53/h)\r\n##show()\r\n",
"from __future__ import division, print_function\n\nimport os\nimport sys\n\nfrom os.path import join\n\ndef configuration(parent_package='',top_path=None):\n \n from numpy.distutils.misc_util import Configuration\n from numpy.distutils.system_info import get_info\n \n config = Configuration('coordinate_transforms', parent_package, top_path)\n\n config.add_data_dir('tests')\n\n return config\n \nif __name__ == '__main__':\n from numpy.distutils.core import setup\n setup(configuration=configuration)\n",
"from __future__ import division, print_function\n\nimport os\nimport sys\n\nfrom os.path import join\nfrom Cython.Build import cythonize\nimport Cython.Compiler.Options\nCython.Compiler.Options.annotate = True\n\ndef configuration(parent_package='',top_path=None):\n \n from numpy.distutils.misc_util import Configuration\n from numpy.distutils.system_info import get_info\n \n config = Configuration('fit_interpolate', parent_package, top_path)\n\n config.add_data_dir('tests')\n\n\n #util_dir = os.path.abspath(join(os.path.dirname(__file__),'..','utilities'))\n \n util_dir = join('..','utilities')\n \n util_srcs = [join(util_dir,'quad_tree.c'),\n join(util_dir,'sparse_dok.c'),\n join(util_dir,'sparse_csr.c')]\n \n if sys.platform == 'darwin':\n extra_args = None\n else:\n extra_args = ['-fopenmp']\n\n config.add_extension('fitsmooth',\n sources=['fitsmooth_ext.pyx']+util_srcs,\n include_dirs=[util_dir],\n extra_compile_args=extra_args,\n extra_link_args=extra_args)\n\n config.ext_modules = cythonize(config.ext_modules, annotate=True)\n\n\n return config\n\nif __name__ == '__main__':\n from numpy.distutils.core import setup\n setup(configuration=configuration)\n"
] |
[
[
"scipy.optimize.fsolve"
],
[
"numpy.distutils.core.setup",
"numpy.distutils.misc_util.Configuration"
],
[
"numpy.distutils.core.setup",
"numpy.distutils.misc_util.Configuration"
]
] |
JaesikKim/HiG2Vec
|
[
"62803d421a29336d89d0a1336054b33672434fe3"
] |
[
"evalGene/score_prediction_NN.py"
] |
[
"import torch as th\nimport torch.nn as nn\nimport argparse\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error, r2_score\nfrom torch.utils.data import DataLoader, TensorDataset\nimport pandas as pd\nimport numpy as np\nimport copy\n\nclass Net(nn.Module):\n def __init__(self, dim):\n super(Net, self).__init__()\n self.main = nn.Sequential(\n nn.Linear(dim, int(dim/2)),\n nn.BatchNorm1d(int(dim/2)),\n nn.Dropout(),\n nn.ReLU(),\n nn.Linear(int(dim/2), 1)\n )\n def forward(self, x):\n out = self.main(x)\n return out.view(-1)\n\nclass Scheduler():\n def __init__(self, optimizer, init_lr, n_warmup, epochs):\n self.optim = optimizer\n self.n_warmup = n_warmup\n self.lr_multiplier = 0.1\n self.init_lr = init_lr\n self.total_epochs = epochs\n\n def zero_grad(self):\n self.optim.zero_grad()\n\n def step_and_update_lr(self, curr_eph):\n self.update_lr(curr_eph)\n self.optim.step()\n \n def update_lr(self, curr_eph):\n if curr_eph < self.n_warmup:\n lr = self.init_lr * self.lr_multiplier \n else:\n lr = self.init_lr * max(0.0,float(self.total_epochs-curr_eph))/float(max(1.0,self.total_epochs-self.n_warmup))\n for param_group in self.optim.param_groups:\n param_group['lr'] = lr\n\ndef load_data(samples, objects):\n x_ls = []\n y_ls = []\n for i in range(len(samples)):\n g1 = samples.iloc[i,0]\n g2 = samples.iloc[i,1]\n if g1 in objects and g2 in objects:\n g1i = objects.index(g1)\n g2i = objects.index(g2)\n x_ls.append([g1i, g2i])\n y_ls.append(samples.iloc[i,2])\n return np.array(x_ls), np.array(y_ls)\n\ndef map_to_vec(samples, embeddings):\n x_ls = []\n for i in range(len(samples)):\n x_ls.append(np.concatenate((embeddings[int(samples[i,0].item())], embeddings[int(samples[i,1].item())])).tolist())\n return th.FloatTensor(x_ls)\n \ndef main():\n parser = argparse.ArgumentParser(description='Predict protein interaction')\n parser.add_argument('-model', help='Embedding model', type=str)\n parser.add_argument('-dim', help='Embedding dimension', type=int)\n parser.add_argument('-dset', help='protein-protein interactions', type=str)\n parser.add_argument('-fout', help='Prediction output', type=str)\n parser.add_argument('-lr', help='Learning rate', type=float)\n parser.add_argument('-gpu', help='GPU id', type=int, default=0)\n parser.add_argument('-burnin', help='Epochs of burn in', type=int, default=20)\n parser.add_argument('-epochs', help='Number of epochs', type=int, default=200)\n parser.add_argument('-batchsize', help='Batchsize', type=int, default=50)\n parser.add_argument('-print_each', help='Print loss each n-th epoch', type=int, default=10)\n opt = parser.parse_args()\n\n # load embeddings\n if opt.model[-3:] == \"pth\":\n model = th.load(opt.model, map_location=\"cpu\")\n objects, embeddings = model['objects'], model['embeddings'].numpy()\n\n else:\n model = np.load(opt.embeddings, allow_pickle=True).item()\n objects, embeddings = model['objects'], model['embeddings']\n\n # dataset processing\n print(\"... load data ...\")\n if opt.dset[-3:] == \"tsv\":\n data = pd.read_csv(opt.dset, sep=\"\\t\")\n else:\n data = pd.read_csv(opt.dset)\n\n device = th.device('cuda:'+str(opt.gpu) if th.cuda.is_available() else 'cpu')\n X, y = load_data(data, objects)\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.25, random_state=42)\n\n net = Net(2*opt.dim).to(device)\n criterion = nn.MSELoss()\n optimizer = th.optim.Adam(net.parameters(), lr=opt.lr)\n scheduler = Scheduler(optimizer, opt.lr, opt.burnin, opt.epochs)\n\n # Dataloader\n train_dataset = TensorDataset(th.FloatTensor(X_train), th.FloatTensor(y_train.astype('float64')))\n val_dataset = TensorDataset(th.FloatTensor(X_val), th.FloatTensor(y_val.astype('float64')))\n test_dataset = TensorDataset(th.FloatTensor(X_test), th.FloatTensor(y_test.astype('float64')))\n train_loader = DataLoader(\n train_dataset,\n batch_size=opt.batchsize,\n shuffle=True,\n )\n val_loader = DataLoader(\n val_dataset,\n batch_size=opt.batchsize,\n shuffle=False,\n )\n test_loader = DataLoader(\n test_dataset,\n batch_size=opt.batchsize,\n shuffle=False,\n )\n\n # Train the model\n print(\"... Train Network ...\")\n opt_eph = 0\n opt_loss = np.inf\n opt_model_state_dict = net.state_dict()\n for epoch in range(opt.epochs):\n epoch_loss = []\n net.train()\n for samples, targets in train_loader:\n samples = map_to_vec(samples, embeddings)\n samples = samples.to(device)\n targets = targets.to(device)\n preds = net(samples)\n loss = criterion(preds, targets)\n scheduler.zero_grad()\n loss.backward()\n scheduler.step_and_update_lr(epoch)\n epoch_loss.append(loss.item())\n with th.no_grad():\n net.eval()\n val_loss = []\n for samples, labels in val_loader:\n samples = map_to_vec(samples, embeddings)\n samples = samples.to(device)\n labels = labels.to(device)\n preds = net(samples)\n loss = criterion(preds, labels)\n val_loss.append(loss.item())\n if np.mean(val_loss) < opt_loss:\n opt_loss = np.mean(val_loss)\n opt_eph = epoch\n opt_model_state_dict = copy.deepcopy(net.state_dict())\n\n if (epoch+1) % opt.print_each == 0:\n print(\"Epoch [{}/{}] Train Loss: {:.3f} Val Loss: {:.3f}\".format(epoch+1, opt.epochs, np.mean(epoch_loss), np.mean(val_loss)))\n\n # Save the test result\n net.load_state_dict(opt_model_state_dict)\n print(\"Optimal tuning: Epoch {}, Val Loss: {:.3f}\".format(opt_eph+1, opt_loss))\n y = []\n yhat = []\n with th.no_grad():\n net.eval()\n for samples, targets in test_loader:\n samples = map_to_vec(samples, embeddings)\n samples = samples.to(device)\n preds = net(samples)\n yhat += preds.cpu().tolist()\n y += targets.tolist()\n print(\"R2: \"+str(r2_score(y, yhat)))\n print(\"RMSE: \"+str(np.sqrt(mean_squared_error(y, yhat))))\n pd.DataFrame({'y' : y, 'yhat' : yhat}).to_csv(opt.fout, index=False)\n\n \nif __name__ == '__main__':\n main()\n\n"
] |
[
[
"numpy.array",
"torch.nn.Dropout",
"sklearn.metrics.mean_squared_error",
"torch.nn.MSELoss",
"pandas.DataFrame",
"torch.FloatTensor",
"torch.no_grad",
"numpy.load",
"sklearn.metrics.r2_score",
"numpy.mean",
"torch.nn.ReLU",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"torch.load",
"sklearn.model_selection.train_test_split",
"pandas.read_csv"
]
] |
RoosterQMonee/GTAG-PyAI
|
[
"1bef3cfc85da034f9129a008bd6c5e9114ce3cfd"
] |
[
"Chat/commands/exe_command.py"
] |
[
"import speech_recognition as sr\nfrom pydub import AudioSegment\nimport os\nfrom datetime import date\nimport sounddevice as sd\nfrom scipy.io.wavfile import write\nfrom random import choice, randint\nimport pyttsx3\nimport time\nimport webbrowser\nfrom playsound import playsound\n\n# Commands\n\nhello = [\"hi\", \"Hi\", \"hello\", \"Hello\", \"wsg\", \"Wsg\", \"WSG\", \"sup\", \"Sup\", \"hey\", \"Hey\", \"hi!\", \"Hi!\", \"hello!\",\n \"Hello!\", \"wsg!\", \"Wsg!\", \"WSG!\", \"sup!\", \"Sup!\", \"hey!\", \"Hey!\", \"hi :)\", \"Hi :)\", \"hello :)\", \"Hello :)\",\n \"wsg :)\", \"Wsg :)\", \"WSG :)\", \"sup :)\", \"Sup :)\", \"hey :)\", \"Hey :)\", \"hi! :)\", \"Hi! :)\", \"hello! :)\",\n \"Hello! :)\", \"wsg! :)\", \"Wsg! :)\", \"WSG! :)\", \"sup! :)\", \"Sup! :)\", \"hey! :)\", \"Hey! :)\", \"Ello\", \"ello\",\n \"'Ello\", \"'ello\"]\nbye = [\"bye\", \"Bye\", \"goodbye\", \"Goodbye\", \"good bye\", \"Good Bye\", \"see you\", \"See you\", \"later\", \"Later\", \"byee\",\n \"Byee\", \"byeee\", \"Byeee\"]\n\ninsult = [\"fucktard\", \"idot\", \"idiot\", \"dumbass\", \"motherfucker\", \"stupid\", \"gay\", \"fucker\", \"Fucktard\", \"Idot\",\n \"Idiot\", \"Dumbass\", \"Motherfucker\", \"Stupid\", \"Gay\", \"Fucker\" \"ur fat\", \"Ur fat\", \"your fat\", \"Your fat\",\n \"youre fat\", \"youre fat\", \"faggot\", \"retard\", \"bitch\", \"whore\", \"thot\", \"fat\", \"fatty\", \"ur gay\", \"Ur gay\",\n \"your gay\", \"youre gay\", \"Youre gay\", \"Fag\", \"fag\", \"Loser\", \"loser\"]\ncompliment = [\"gg\", \"good job\", \"nice\", \"great\", \"awesome\", \"good\", \"your hot\", \"ur hot\", \"youre hot\", \"youre awesome\",\n \"youre cool\", \"Nice\"]\n\nhi = [\"Sup\", \"Hello\", \"Hi\", \"good morning\", \"Good morning\", \"Good afternoon\", \"good afternoon\", \"good evening\",\n \"Good evening\"]\nhi2 = [\"Sup\", \"Hello\", \"Hi\"]\ngn = [\"Good night\", \"good night\"]\n\nyes = [\"yes\", \"Sure!\", \"sure\", \"of course\", \"yeah\"]\nno = [\"yeah no\", \"no\", \"heck no\"]\n\nthankYou = [\"thank you\", \"Thank you\", \"Thanks\", \"thanks\", \"Thank you\", \"thank you\", \"thx!\", \"Thx!\", \"Ty!\", \"ty!\",\n \"Thanks!\", \"thanks!\", \"Thank u\", \"thank u\"]\n\nstartTimer = [\"Can you start a timer\", \"Can you start a timer?\", \"can you start a timer\", \"can you start a timer?\",\n \"please start a timer\", \"Please start a timer\", \"timer start\", \"Timer start\", \"start timer\",\n \"Start timer\", \"can you please start a timer?\", \"can you start a timer please\",\n \"Can you start a timer please\", \"can you start a timer please?\", \"Can you start a timer please?\"]\nendTimer = [\"End the timer please\", \"end the timer please\", \"please end the timer\", \"Please end the timer\", \"timer end\",\n \"Timer end\", \"End timer\", \"end timer\", \"Stop the timer please\", \"stop the timer please\",\n \"please stop the timer\", \"Please stop the timer\", \"timer stop\", \"Timer stop\", \"Stop timer\", \"stop timer\"]\n\nhowMany = [\"How many\", \"how many\", \"how many?\", \"How many?\"]\ncanIJoin = [\"can i join\", \"Can i join\", \"Can i join?\", \"can i join?\", \"can I join\", \"Can I join\", \"Can I join?\",\n \"can I join?\"]\nhowAreYou = [\"How are you\", \"how are you\", \"How are you?\", \"how are you?\", \"How are you doing\", \"how are you doing\",\n \"how are you doing?\", \"How are you doing?\", \"How are u\", \"how are u\", \"How are u?\", \"how are u?\"]\nhowImDoing = [\"Ok so far\", \"Pretty good\", \"Good\", \"Great\"]\n\nwyd = [\"What are you doing\", \"what are you doing\", \"Wyd\", \"wyd\", \"WYD\", \"What are you doing?\", \"what are you doing?\",\n \"Wyd?\", \"wyd?\", \"WYD?\"]\nwid = [\"Smoking crack\", \"Coding\", \"Talking to people\", \"Nothing right now\", \"Playing piano\", \"Invading poland\",\n \"Making tacos\"]\n\ninvpoland = [\"wanna go invade poland\", \"Wanna go invade poland\", \"Wanna go invade poland?\", \"wanna go invade poland?\",\n \"want to go invade poland\"]\nily = [\"i love you\", \"I love you\", \"ily\", \"Ily\", \"ILY\", \"i <3 you\", \"I <3 you\", \"i <3 u\", \"i love u\", \"I love u\"]\nisFren = [\"Are you a friend\", \"are you a friend\", \"Are you a friend?\", \"are you a friend?\", \"Are you fren\",\n \"are you fren\", \"Are you a fren?\", \"are you a fren?\", \"Are you a fren\", \"are you a fren\", \"Are you a fren?\",\n \"are you a fren?\", \"Are you fren?\", \"are you fren?\", \"are you fren\", \"Are you fren\"]\n\nwhatCanYouDo = [\"What can you do\", \"what can you do\", \"what can you do?\", \"What can you do?\", \"What do you do?\",\n \"what do you do?\", \"cmd use\", \"Cmd use\", \"!use\"]\ntheDate = [\"What is the date\", \"what is the date\", \"what is today\", \"What is today\", \"can you please tell me the date\",\n \"Can you please tell me the date\", \"what is the date today\", \"What is the date today\", \"What is the date?\",\n \"what is the date?\", \"what is today?\", \"What is today?\", \"can you please tell me the date?\",\n \"Can you please tell me the date?\", \"what is the date today?\", \"What is the date today?\"]\n\n\n\nenable_speech = [\"enable speech\", \"speech enable\", \"speech on\"]\ndisable_speech = [\"disable speech\", \"speech disable\", \"speech off\"]\n\nenable_man = [\"enable manual\", \"manual enable\", \"manual on\"]\ndisable_man = [\"disable manual\", \"manual disable\", \"manual off\"]\n\nopenSite = [\"Open site\", \"open site\", \"website\", \"site\", \"site open\"]\n\nengine = pyttsx3.init()\nfs = 44100\nseconds = 3\n\nstrtTime = 0\nendtime = 0\n\nmanual = False\nspeech = True\nbot_name = ['ivan', 'hey ivan', 'boot ivan', 'help ivan', 'Yo ivan wake up']\ntoSay = ''\ncount = 0\nwindow = Tk()\n\ntry:\n os.remove('output.wav', 'transcript.wav')\nexcept:\n pass\n\nprint(\"Started!\")\n\n\ndef main():\n global count\n while count < 3:\n myrecording = sd.rec(int(seconds * fs), samplerate=fs, channels=2)\n sd.wait()\n write('output.wav', fs, myrecording) # Save as WAV file\n\n sound = AudioSegment.from_wav('output.wav')\n sound.export('transcript.wav', format=\"wav\")\n\n AUDIO_FILE = \"transcript.wav\"\n\n r = sr.Recognizer()\n with sr.AudioFile(AUDIO_FILE) as source:\n global speech\n global manual\n global strtTime\n global endtime\n global toSay\n audio = r.record(source)\n try:\n transcribed = r.recognize_google(audio)\n except:\n transcribed = \"Sorry, i did not understand\"\n engine.say(transcribed)\n engine.runAndWait()\n if manual == True:\n transcribed = input(\"Manual Command> \")\n\n try:\n print(\"Transcription: \" + transcribed)\n text = transcribed.lower()\n\n if text in theDate:\n toSay = (date.today())\n\n elif text in openSite:\n engine.say(\"What site do you want to open?\")\n engine.runAndWait()\n\n myrecording = sd.rec(int(seconds * fs), samplerate=fs, channels=2)\n sd.wait()\n write('output.wav', fs, myrecording) # Save as WAV file\n\n AUDIO_FILE = \"output.wav\"\n\n r = sr.Recognizer()\n with sr.AudioFile(AUDIO_FILE) as source:\n audio = r.record(source)\n speech = True\n try:\n transcribed = r.recognize_google(audio)\n except:\n transcribed = \"I couldn't understand what you said\"\n engine.say(transcribed)\n engine.runAndWait()\n\n print(transcribed)\n engine.say(\"Opening site.\")\n engine.runAndWait()\n\n if transcribed != \"I couldn't understand what you said\":\n url = f'https://www.{transcribed}.org'\n webbrowser.open(url)\n\n if transcribed.lower() != 'python':\n url = f'https://www.{transcribed}.com'\n webbrowser.open(url)\n\n elif text in compliment:\n toSay = choice(thankYou)\n\n elif text in whatCanYouDo:\n toSay = f\"I am {bot_name}. I can answer questions and run commands as you wish! Just remember i was made by a thirteen year old and a twelve year old\"\n\n elif text in isFren:\n toSay = \"Of course, im always here to help\"\n\n elif text in canIJoin:\n toSay = 'Sure'\n\n elif text in insult:\n toSay = \"You do know i don't get offended, right?\"\n\n elif text in enable_man:\n manual = True\n\n elif text in disable_man:\n manual = False\n\n elif text in ily:\n playsound('yugay.wav')\n\n elif text in wyd:\n toSay = choice(wid)\n\n elif text in thankYou:\n toSay = \"You're welcome\"\n\n elif text in howMany:\n toSay = str(randint(1, 50))\n\n elif text in howAreYou:\n toSay = choice(howImDoing)\n\n elif text in invpoland:\n toSay = \"Sure\"\n\n elif text in hi:\n toSay = choice(hi2)\n\n elif text in hello:\n toSay = choice(hi2)\n\n elif text in bye:\n toSay = choice(bye)\n\n elif text in startTimer:\n strtTime == time.time()\n toSay = 'Ok'\n\n elif text in endTimer:\n endtime == time.time()\n toSay = (f'Ok, Time is {str(endtime - strtTime)}')\n\n elif text in enable_speech:\n global speech\n speech = True\n toSay = \"Ok\"\n\n elif text in disable_speech:\n global speech\n speech = False\n toSay = \"Ok\"\n \n elif text == 'what is the time':\n t = time.localtime()\n current_time = time.strftime(\"%H:%M:%S\", t)\n print(current_time)\n\n else:\n toSay = \"Unknown command\"\n\n print(toSay)\n\n if speech == True:\n engine.say(toSay)\n engine.runAndWait()\n\n else:\n count += 1\n pass\n\n input(\"\")\n except:\n pass\n # input(\"Continue? \")\n count = 0\n\n\nwhile True:\n myrecording = sd.rec(int(seconds * fs), samplerate=fs, channels=2)\n sd.wait()\n write('output.wav', fs, myrecording)\n\n sound = AudioSegment.from_wav(\"output.wav\")\n sound.export(\"transcript.wav\", format=\"wav\")\n AUDIO_FILE = \"transcript.wav\"\n\n r = sr.Recognizer()\n with sr.AudioFile(AUDIO_FILE) as source:\n audio = r.record(source)\n speech = True\n try:\n transcribed = r.recognize_google(audio)\n except:\n pass\n\n try:\n if transcribed.lower() in bot_name and transcribed:\n print(\"Voice Acivated\")\n engine.say(f\"Hello {os.getenv('USERNAME')}, how may i help\")\n engine.runAndWait()\n\n main()\n except:\n pass\n"
] |
[
[
"scipy.io.wavfile.write"
]
] |
snikhil17/mlzoomcamp
|
[
"dd04a23aa1ed506247adf9922c73069ad211044d"
] |
[
"9_Serverless/lambda_function.py"
] |
[
"import tflite_runtime.interpreter as tflite\r\nfrom PIL import Image\r\nfrom io import BytesIO\r\nfrom urllib import request\r\nimport numpy as np\r\n\r\n\r\n#import model\r\ninterpreter = tflite.Interpreter(model_path='cats-dogs-v2.tflite')\r\ninterpreter.allocate_tensors()\r\n\r\n\r\n# get input and output index\r\ninput_index = interpreter.get_input_details()[0]['index']\r\noutput_index = interpreter.get_output_details()[0]['index']\r\n\r\n\r\ndef download_image(url):\r\n with request.urlopen(url) as resp:\r\n buffer = resp.read()\r\n stream = BytesIO(buffer)\r\n img = Image.open(stream)\r\n return img\r\n\r\n\r\ndef prepare_image(img, target_size):\r\n if img.mode != 'RGB':\r\n img = img.convert('RGB')\r\n img = img.resize(target_size, Image.NEAREST)\r\n\r\n return img\r\n\r\n# url = 'https://upload.wikimedia.org/wikipedia/commons/1/18/Vombatus_ursinus_-Maria_Island_National_Park.jpg'\r\n\r\n\r\ndef preprocessor(img):\r\n x = np.array(img, dtype='float32') / 255\r\n\r\n return np.array([x])\r\n\r\n\r\ndef predict(url):\r\n\r\n img = download_image(url)\r\n img = prepare_image(img, (150, 150))\r\n X = preprocessor(img)\r\n\r\n interpreter.set_tensor(input_index, X)\r\n interpreter.invoke()\r\n preds = interpreter.get_tensor(output_index)\r\n\r\n float_predictions = preds[0].tolist()\r\n label = ['cat' if preds[0] < 0.5 else 'dog']\r\n\r\n return dict(zip(label, float_predictions))\r\n\r\n\r\ndef lambda_handler(event, context):\r\n url = event['url']\r\n result = predict(url)\r\n return result\r\n"
] |
[
[
"numpy.array"
]
] |
cmccully/pysalt
|
[
"d67262a42114bd359efc6ec23fc2d05be66d2025"
] |
[
"saltred/saltmosaic.py"
] |
[
"#!/usr/bin/env python\n\n# LICENSE\n# Copyright (c) 2014, South African Astronomical Observatory (SAAO)\n# All rights reserved. See License file for more details\n\n\"\"\"\nSALTMOSAIC is a task to apply the CCD geometric corrections to MEF style SALT\ndata.\n\nAuthor Version Date\n-----------------------------------------------\nMartin Still (SAAO) 0.1 16 Oct 2006\nSM Crawford (SAAO) 0.2 19 Mar 2006\n\nUpdates\n--------------------\n20120319 - Update to new error handling\n - Changed the mosaic to use the whole frame and not trim some data\n off\n20141111 - Added option to replace the masked regions\n\"\"\"\n\nimport os\nimport time\nimport numpy\nfrom scipy import ndimage as nd\nimport pyfits\nfrom pyraf import iraf\n\nfrom math import cos, sin, pi\nfrom scipy.ndimage import geometric_transform\n\nimport saltsafekey as saltkey\nimport saltsafeio as saltio\nimport saltsafestring as saltstring\nfrom saltsafelog import logging, history\n\nfrom salterror import SaltError\n\ndebug = True\n\n\n# -----------------------------------------------------------\n# core routine\n\ndef saltmosaic(images, outimages, outpref, geomfile, interp='linear',\n geotran=True, fill=False, cleanup=True, clobber=False,\n logfile=None, verbose=True):\n\n # Start the logging\n with logging(logfile, debug) as log:\n\n # Check the input images\n infiles = saltio.argunpack('Input', images)\n\n # create list of output files\n outfiles = saltio.listparse('Outfile', outimages, outpref, infiles, '')\n\n # verify that the input and output lists are the same length\n saltio.comparelists(infiles, outfiles, 'Input', 'output')\n\n # does CCD geometry definition file exist\n geomfilefile = geomfile.strip()\n saltio.fileexists(geomfile)\n\n gap = 0\n xshift = [0, 0]\n yshift = [0, 0]\n rotation = [0, 0]\n gap, xshift, yshift, rotation = saltio.readccdgeom(geomfile)\n\n # open each raw image file and apply the transformation to it\n for img, oimg in zip(infiles, outfiles):\n\n # open the structure\n struct = saltio.openfits(img)\n\n # create the mosaic\n ostruct = make_mosaic(\n struct,\n gap,\n xshift,\n yshift,\n rotation,\n interp_type=interp,\n geotran=geotran,\n fill=fill,\n cleanup=cleanup,\n log=log,\n verbose=verbose)\n\n # update the header information\n # housekeeping keywords\n fname, hist = history(\n level=1, wrap=False, exclude=[\n 'images', 'outimages', 'outpref'])\n saltkey.housekeeping(\n ostruct[0],\n 'SMOSAIC',\n 'Images have been mosaicked ',\n hist)\n\n # write the image out\n saltio.writefits(ostruct, oimg, clobber=clobber)\n\n # close the files\n saltio.closefits(struct)\n saltio.closefits(ostruct)\n\n\ndef make_mosaic(struct, gap, xshift, yshift, rotation, interp_type='linear',\n boundary='constant', constant=0, geotran=True, fill=False,\n cleanup=True, log=None, verbose=False):\n \"\"\"Given a SALT image struct, combine each of the individual amplifiers and\n apply the geometric CCD transformations to the image\n \"\"\"\n\n # get the name of the file\n infile = saltkey.getimagename(struct[0], base=True)\n outpath = './'\n\n # identify instrument\n instrume, keyprep, keygain, keybias, keyxtalk, keyslot = \\\n saltkey.instrumid(struct)\n\n # how many amplifiers?\n nsciext = saltkey.get('NSCIEXT', struct[0])\n nextend = saltkey.get('NEXTEND', struct[0])\n nccds = saltkey.get('NCCDS', struct[0])\n amplifiers = nccds * 2\n\n if nextend > nsciext:\n varframe = True\n else:\n varframe = False\n\n # CCD geometry coefficients\n if (instrume == 'RSS' or instrume == 'PFIS'):\n xsh = [0., xshift[0], 0., xshift[1]]\n ysh = [0., yshift[0], 0., yshift[1]]\n rot = [0., rotation[0], 0., rotation[1]]\n elif instrume == 'SALTICAM':\n xsh = [0., xshift[0], 0.]\n ysh = [0., yshift[0], 0.]\n rot = [0., rotation[0], 0]\n\n # how many extensions?\n nextend = saltkey.get('NEXTEND', struct[0])\n\n # CCD on-chip binning\n xbin, ybin = saltkey.ccdbin(struct[0])\n\n # create temporary primary extension\n outstruct = []\n outstruct.append(struct[0])\n # define temporary FITS file store tiled CCDs\n\n tilefile = saltio.tmpfile(outpath)\n tilefile += 'tile.fits'\n if varframe:\n tilehdu = [None] * (3 * int(nsciext / 2) + 1)\n else:\n tilehdu = [None] * int(nsciext / 2 + 1)\n tilehdu[0] = pyfits.PrimaryHDU()\n tilehdu[0].header = struct[0].header\n\n if log:\n log.message('', with_stdout=verbose)\n\n # iterate over amplifiers, stich them to produce file of CCD images\n for i in range(int(nsciext / 2)):\n hdu = i * 2 + 1\n # amplifier = hdu%amplifiers\n # if (amplifier == 0): amplifier = amplifiers\n\n # read DATASEC keywords\n datasec1 = saltkey.get('DATASEC', struct[hdu])\n datasec2 = saltkey.get('DATASEC', struct[hdu + 1])\n xdsec1, ydsec1 = saltstring.secsplit(datasec1)\n xdsec2, ydsec2 = saltstring.secsplit(datasec2)\n\n # read images\n imdata1 = saltio.readimage(struct, hdu)\n imdata2 = saltio.readimage(struct, hdu + 1)\n\n # tile 2n amplifiers to yield n CCD images\n outdata = numpy.zeros((ydsec1[1] +\n abs(ysh[i +\n 1] /\n ybin), xdsec1[1] +\n xdsec2[1] +\n abs(xsh[i +\n 1] /\n xbin)), numpy.float32)\n\n # set up the variance frame\n if varframe:\n vardata = outdata.copy()\n vdata1 = saltio.readimage(struct, struct[hdu].header['VAREXT'])\n vdata2 = saltio.readimage(struct, struct[hdu + 1].header['VAREXT'])\n\n bpmdata = outdata.copy()\n bdata1 = saltio.readimage(struct, struct[hdu].header['BPMEXT'])\n bdata2 = saltio.readimage(struct, struct[hdu + 1].header['BPMEXT'])\n\n x1 = xdsec1[0] - 1\n if x1 != 0:\n msg = 'The data in %s have not been trimmed prior to mosaicking.' \\\n % infile\n log.error(msg)\n if xsh[i + 1] < 0:\n x1 += abs(xsh[i + 1] / xbin)\n x2 = x1 + xdsec1[1]\n y1 = ydsec1[0] - 1\n if ysh[i + 1] < 0:\n y1 += abs(ysh[i + 1] / ybin)\n y2 = y1 + ydsec1[1]\n outdata[y1:y2, x1:x2] =\\\n imdata1[ydsec1[0] - 1:ydsec1[1], xdsec1[0] - 1:xdsec1[1]]\n\n if varframe:\n vardata[y1:y2, x1:x2] =\\\n vdata1[ydsec1[0] - 1:ydsec1[1], xdsec1[0] - 1:xdsec1[1]]\n bpmdata[y1:y2, x1:x2] =\\\n bdata1[ydsec1[0] - 1:ydsec1[1], xdsec1[0] - 1:xdsec1[1]]\n\n x1 = x2\n x2 = x1 + xdsec2[1]\n y1 = ydsec2[0] - 1\n if ysh[i + 1] < 0:\n y1 += abs(ysh[i + 1] / ybin)\n y2 = y1 + ydsec2[1]\n outdata[y1:y2, x1:x2] =\\\n imdata2[ydsec1[0] - 1:ydsec1[1], xdsec1[0] - 1:xdsec1[1]]\n\n if varframe:\n vardata[y1:y2, x1:x2] =\\\n vdata2[ydsec1[0] - 1:ydsec1[1], xdsec1[0] - 1:xdsec1[1]]\n bpmdata[y1:y2, x1:x2] =\\\n bdata2[ydsec1[0] - 1:ydsec1[1], xdsec1[0] - 1:xdsec1[1]]\n\n # size of new image\n naxis1 = str(xdsec1[1] + xdsec2[1])\n naxis2 = str(ydsec1[1])\n\n # add image and keywords to HDU list\n tilehdu[i + 1] = pyfits.ImageHDU(outdata)\n tilehdu[i + 1].header = struct[hdu].header\n tilehdu[\n i + 1].header['DATASEC'] = '[1:' + naxis1 + ',1:' + naxis2 + ']'\n\n if varframe:\n vext = i + 1 + int(nsciext / 2.)\n tilehdu[vext] = pyfits.ImageHDU(vardata)\n tilehdu[vext].header = struct[struct[hdu].header['VAREXT']].header\n tilehdu[vext].header[\n 'DATASEC'] = '[1:' + naxis1 + ',1:' + naxis2 + ']'\n\n bext = i + 1 + 2 * int(nsciext / 2.)\n tilehdu[bext] = pyfits.ImageHDU(bpmdata)\n tilehdu[bext].header = struct[struct[hdu].header['BPMEXT']].header\n tilehdu[bext].header[\n 'DATASEC'] = '[1:' + naxis1 + ',1:' + naxis2 + ']'\n\n # image tile log message #1\n if log:\n message = os.path.basename(infile) + '[' + str(hdu) + ']['\n message += str(xdsec1[0]) + ':' + str(xdsec1[1]) + ','\n message += str(ydsec1[0]) + ':' + str(ydsec1[1]) + '] --> '\n message += os.path.basename(tilefile) + '[' + str(i + 1) + ']['\n message += str(xdsec1[0]) + ':' + str(xdsec1[1]) + ','\n message += str(ydsec1[0]) + ':' + str(ydsec1[1]) + ']'\n log.message(message, with_stdout=verbose, with_header=False)\n message = os.path.basename(infile) + '[' + str(hdu + 1) + ']['\n message += str(xdsec1[0]) + ':' + str(xdsec1[1]) + ','\n message += str(ydsec1[0]) + ':' + str(ydsec1[1]) + '] --> '\n message += os.path.basename(tilefile) + '[' + str(i + 1) + ']['\n message += str(xdsec1[1] + 1) + ':' + \\\n str(xdsec1[1] + xdsec2[1]) + ','\n message += str(ydsec2[0]) + ':' + str(ydsec2[1]) + ']'\n log.message(message, with_stdout=verbose, with_header=False)\n\n # write temporary file of tiled CCDs\n hdulist = pyfits.HDUList(tilehdu)\n hdulist.writeto(tilefile)\n\n # iterate over CCDs, transform and rotate images\n yrot = [None] * 4\n xrot = [None] * 4\n\n tranfile = [' ']\n tranhdu = [0]\n if varframe:\n tranfile = [''] * (3 * int(nsciext / 2) + 1)\n tranhdu = [0] * (3 * int(nsciext / 2) + 1)\n else:\n tranfile = [''] * int(nsciext / 2 + 1)\n tranhdu = [0] * int(nsciext / 2 + 1)\n\n # this is hardwired for SALT where the second CCD is considered the\n # fiducial\n for hdu in range(1, int(nsciext / 2 + 1)):\n tranfile[hdu] = saltio.tmpfile(outpath)\n tranfile[hdu] += 'tran.fits'\n if varframe:\n tranfile[hdu + nccds] = saltio.tmpfile(outpath) + 'tran.fits'\n tranfile[hdu + 2 * nccds] = saltio.tmpfile(outpath) + 'tran.fits'\n\n ccd = hdu % nccds\n if (ccd == 0):\n ccd = nccds\n\n # correct rotation for CCD binning\n yrot[ccd] = rot[ccd] * ybin / xbin\n xrot[ccd] = rot[ccd] * xbin / ybin\n dxshift = xbin * int(float(int(gap) / xbin) + 0.5) - gap\n\n # transformation using geotran IRAF task\n # if (ccd == 1):\n if (ccd != 2):\n\n if geotran:\n message = '\\nSALTMOSAIC -- geotran ' + tilefile + \\\n '[' + str(ccd) + '] ' + tranfile[hdu]\n message += ' \\\"\\\" \\\"\\\" xshift=' + \\\n str((xsh[ccd] + (2 - ccd) * dxshift) / xbin) + ' '\n message += 'yshift=' + \\\n str(ysh[ccd] / ybin) + ' xrotation=' + str(xrot[ccd]) + ' '\n message += 'yrotation=' + \\\n str(yrot[ccd]) + ' xmag=1 ymag=1 xmin=\\'INDEF\\''\n message += 'xmax=\\'INDEF\\' ymin=\\'INDEF\\' ymax=\\'INDEF\\' '\n message += 'ncols=\\'INDEF\\' '\n message += 'nlines=\\'INDEF\\' verbose=\\'no\\' '\n message += 'fluxconserve=\\'yes\\' nxblock=2048 '\n message += 'nyblock=2048 interpolant=\\'' + \\\n interp_type + '\\' boundary=\\'constant\\' constant=0'\n log.message(message, with_stdout=verbose)\n\n yd, xd = tilehdu[ccd].data.shape\n ncols = 'INDEF' # ncols=xd+abs(xsh[ccd]/xbin)\n nlines = 'INDEF' # nlines=yd+abs(ysh[ccd]/ybin)\n geo_xshift = xsh[ccd] + (2 - ccd) * dxshift / xbin\n geo_yshift = ysh[ccd] / ybin\n iraf.images.immatch.geotran(tilefile + \"[\" + str(ccd) + \"]\",\n tranfile[hdu],\n \"\",\n \"\",\n xshift=geo_xshift,\n yshift=geo_yshift,\n xrotation=xrot[ccd],\n yrotation=yrot[ccd],\n xmag=1, ymag=1, xmin='INDEF',\n xmax='INDEF', ymin='INDEF',\n ymax='INDEF', ncols=ncols,\n nlines=nlines, verbose='no',\n fluxconserve='yes', nxblock=2048,\n nyblock=2048, interpolant=\"linear\",\n boundary=\"constant\", constant=0)\n if varframe:\n var_infile = tilefile + \"[\" + str(ccd + nccds) + \"]\"\n iraf.images.immatch.geotran(var_infile,\n tranfile[hdu + nccds],\n \"\",\n \"\",\n xshift=geo_xshift,\n yshift=geo_yshift,\n xrotation=xrot[ccd],\n yrotation=yrot[ccd],\n xmag=1, ymag=1, xmin='INDEF',\n xmax='INDEF', ymin='INDEF',\n ymax='INDEF', ncols=ncols,\n nlines=nlines, verbose='no',\n fluxconserve='yes',\n nxblock=2048, nyblock=2048,\n interpolant=\"linear\",\n boundary=\"constant\",\n constant=0)\n var2_infile = tilefile + \"[\" + str(ccd + 2 * nccds) + \"]\"\n iraf.images.immatch.geotran(var2_infile,\n tranfile[hdu + 2 * nccds],\n \"\",\n \"\",\n xshift=geo_xshift,\n yshift=geo_yshift,\n xrotation=xrot[ccd],\n yrotation=yrot[ccd],\n xmag=1, ymag=1, xmin='INDEF',\n xmax='INDEF', ymin='INDEF',\n ymax='INDEF', ncols=ncols,\n nlines=nlines, verbose='no',\n fluxconserve='yes',\n nxblock=2048, nyblock=2048,\n interpolant=\"linear\",\n boundary=\"constant\",\n constant=0)\n\n # open the file and copy the data to tranhdu\n tstruct = pyfits.open(tranfile[hdu])\n tranhdu[hdu] = tstruct[0].data\n tstruct.close()\n if varframe:\n tranhdu[\n hdu +\n nccds] = pyfits.open(\n tranfile[\n hdu +\n nccds])[0].data\n tranhdu[\n hdu +\n 2 *\n nccds] = pyfits.open(\n tranfile[\n hdu +\n 2 *\n nccds])[0].data\n\n else:\n log.message(\n \"Transform CCD #%i using dx=%s, dy=%s, rot=%s\" %\n (ccd,\n xsh[ccd] /\n 2.0,\n ysh[ccd] /\n 2.0,\n xrot[ccd]),\n with_stdout=verbose,\n with_header=False)\n tranhdu[hdu] = geometric_transform(\n tilehdu[ccd].data,\n tran_func,\n prefilter=False,\n order=1,\n extra_arguments=(\n xsh[ccd] / 2,\n ysh[ccd] / 2,\n 1,\n 1,\n xrot[ccd],\n yrot[ccd]))\n tstruct = pyfits.PrimaryHDU(tranhdu[hdu])\n tstruct.writeto(tranfile[hdu])\n if varframe:\n tranhdu[hdu + nccds] = geometric_transform(\n tilehdu[hdu + 3].data,\n tran_func,\n prefilter=False,\n order=1,\n extra_arguments=(\n xsh[ccd] / 2, ysh[ccd] / 2,\n 1, 1,\n xrot[ccd], yrot[ccd]))\n tranhdu[hdu + 2 * nccds] = geometric_transform(\n tilehdu[hdu + 6].data,\n tran_func,\n prefilter=False,\n order=1,\n extra_arguments=(\n xsh[ccd] / 2, ysh[ccd] / 2,\n 1, 1,\n xrot[ccd], yrot[ccd]))\n\n else:\n log.message(\n \"Transform CCD #%i using dx=%s, dy=%s, rot=%s\" %\n (ccd, 0, 0, 0), with_stdout=verbose, with_header=False)\n tranhdu[hdu] = tilehdu[ccd].data\n if varframe:\n tranhdu[hdu + nccds] = tilehdu[ccd + nccds].data\n tranhdu[hdu + 2 * nccds] = tilehdu[ccd + 2 * nccds].data\n\n # open outfile\n if varframe:\n outlist = 4 * [None]\n else:\n outlist = 2 * [None]\n\n # outstruct[0] = pyfits.PrimaryHDU()\n outlist[0] = struct[0].copy()\n naxis1 = int(gap / xbin * (nccds - 1))\n naxis2 = 0\n for i in range(1, nccds + 1):\n yw, xw = tranhdu[i].shape\n naxis1 += xw + int(abs(xsh[ccd] / xbin)) + 1\n naxis2 = max(naxis2, yw)\n outdata = numpy.zeros((naxis2, naxis1), numpy.float32)\n outdata.shape = naxis2, naxis1\n if varframe:\n vardata = outdata * 0\n bpmdata = outdata * 0 + 1\n\n # iterate over CCDs, stich them to produce a full image\n hdu = 0\n totxshift = 0\n for hdu in range(1, nccds + 1):\n\n # read DATASEC keywords\n ydsec, xdsec = tranhdu[hdu].shape\n\n # define size and shape of final image\n # tile CCDs to yield mosaiced image\n x1 = int((hdu - 1) * (xdsec + gap / xbin)) + int(totxshift)\n x2 = xdsec + x1\n y1 = int(0)\n y2 = int(ydsec)\n outdata[y1:y2, x1:x2] = tranhdu[hdu]\n totxshift += int(abs(xsh[hdu] / xbin)) + 1\n if varframe:\n vardata[y1:y2, x1:x2] = tranhdu[hdu + nccds]\n bpmdata[y1:y2, x1:x2] = tranhdu[hdu + 2 * nccds]\n\n # make sure to cover up all the gaps include bad areas\n if varframe:\n baddata = (outdata == 0)\n baddata = nd.maximum_filter(baddata, size=3)\n bpmdata[baddata] = 1\n \n\n # fill in the gaps if requested\n if fill:\n if varframe:\n outdata = fill_gaps(outdata, bpmdata)\n else:\n outdata = fill_gaps(outdata, 0)\n\n # add to the file\n outlist[1] = pyfits.ImageHDU(outdata)\n if varframe:\n outlist[2] = pyfits.ImageHDU(vardata)\n outlist[3] = pyfits.ImageHDU(bpmdata)\n\n # create the image structure\n outstruct = pyfits.HDUList(outlist)\n\n # update the head informaation\n # housekeeping keywords\n saltkey.put('NEXTEND', 2, outstruct[0])\n saltkey.new('EXTNAME', 'SCI', 'Extension name', outstruct[1])\n saltkey.new('EXTVER', 1, 'Extension number', outstruct[1])\n if varframe:\n saltkey.new('VAREXT', 2, 'Variance frame extension', outstruct[1])\n saltkey.new('BPMEXT', 3, 'BPM Extension', outstruct[1])\n\n try:\n saltkey.copy(struct[1], outstruct[1], 'CCDSUM')\n except:\n pass\n\n # Add keywords associated with geometry\n gstr = '%i %f %f %f %f %f %f' % (gap,\n xshift[0],\n yshift[0],\n rotation[0],\n xshift[1],\n yshift[1],\n rotation[1])\n saltkey.new('SALTGEOM', gstr, 'SALT geometry coefficients', outstruct[0])\n\n # WCS keywords\n saltkey.new('CRPIX1', 0, 'WCS: X reference pixel', outstruct[1])\n saltkey.new('CRPIX2', 0, 'WCS: Y reference pixel', outstruct[1])\n saltkey.new(\n 'CRVAL1',\n float(xbin),\n 'WCS: X reference coordinate value',\n outstruct[1])\n saltkey.new(\n 'CRVAL2',\n float(ybin),\n 'WCS: Y reference coordinate value',\n outstruct[1])\n saltkey.new('CDELT1', float(xbin), 'WCS: X pixel size', outstruct[1])\n saltkey.new('CDELT2', float(ybin), 'WCS: Y pixel size', outstruct[1])\n saltkey.new('CTYPE1', 'pixel', 'X type', outstruct[1])\n saltkey.new('CTYPE2', 'pixel', 'Y type', outstruct[1])\n\n # cleanup temporary files\n if cleanup:\n for tfile in tranfile:\n if os.path.isfile(tfile):\n saltio.delete(tfile)\n if os.path.isfile(tilefile):\n status = saltio.delete(tilefile)\n\n # return the file\n return outstruct\n\n\ndef fill_gaps(data, mask):\n \"\"\"Interpolate in the gaps in the data\n\n Parameters\n ----------\n data: np.ndarray\n data to have values filled in for\n\n mask: float or nd.ndarray\n If an nd.ndarray, it will be assumed to be a mask\n with values equal to 1 where they should be interpolated\n over. If a float, pixels with that value will be replaced\n\n \"\"\"\n ys, xs = data.shape\n if isinstance(mask, numpy.ndarray):\n mask = (mask == 0)\n for i in range(ys):\n x = numpy.arange(xs)\n rdata = data[i, :]\n rmask = mask[i, :]\n rmask = nd.minimum_filter(rmask, size=3)\n rdata = numpy.interp(x, x[rmask], rdata[rmask])\n data[i, rmask == 0] = rdata[rmask == 0]\n else:\n mask = (data != mask)\n for i in range(ys):\n x = numpy.arange(xs)\n rdata = data[i, :]\n rmask = mask[i, :]\n rmask = nd.minimum_filter(rmask, size=3)\n rdata = numpy.interp(x, x[rmask], rdata[rmask])\n data[i, rmask == 0] = rdata[rmask == 0]\n\n return data\n\n\ndef tran_func(a, xshift, yshift, xmag, ymag, xrot, yrot):\n xtran = ymag * a[0] * cos(yrot * pi / 180.0) \\\n - xmag * a[1] * sin(xrot * pi / 180) \\\n - yshift\n ytran = ymag * a[0] * sin(yrot * pi / 180.0) \\\n + xmag * a[1] * cos(xrot * pi / 180) \\\n - xshift\n return xtran, ytran\n\n\n# -----------------------------------------------------------\n# main code\nif not iraf.deftask('saltmosaic'):\n parfile = iraf.osfn(\"saltred$saltmosaic.par\")\n t = iraf.IrafTaskFactory(\n taskname=\"saltmosaic\",\n value=parfile,\n function=saltmosaic,\n pkgname='saltred')\n"
] |
[
[
"scipy.ndimage.minimum_filter",
"numpy.zeros",
"numpy.interp",
"scipy.ndimage.geometric_transform",
"numpy.arange",
"scipy.ndimage.maximum_filter"
]
] |
tmpaul06/dgl
|
[
"8f458464b0e14c78978db4b91590e8ca718c5ec6"
] |
[
"transformer/dataset/utils.py"
] |
[
"import numpy as np\nimport torch as th\nimport os\nfrom dgl.data.utils import *\nimport spacy\nfrom tqdm import tqdm\n\nnlp = spacy.load('en')\n\n_urls = {\n 'wmt': 'https://s3.us-east-2.amazonaws.com/dgl.ai/dataset/wmt14bpe_de_en.zip',\n 'scripts': 'https://s3.us-east-2.amazonaws.com/dgl.ai/dataset/transformer_scripts.zip',\n}\n\n\ndef store_dependency_parses(in_filename, out_filename):\n \"\"\"Create dependency parses in advance so that training is fast\"\"\"\n with open(in_filename, 'r') as f:\n input_lines = f.readlines()\n\n print('Preparing dependency tokens for {} sentences using {}'.format(len(input_lines), in_filename))\n # Batch write\n batch_size = min(max(len(input_lines) // 100, 100), 500)\n with open(out_filename, 'w') as out_f:\n for i in tqdm(range(0, len(input_lines), batch_size)):\n lines = input_lines[i:(i + batch_size + 1)]\n out_lines = list()\n for line in lines:\n # Replace @ with ''. This is a cheap hack\n line = line.replace('@', '').strip()\n if not line:\n continue\n tokens = nlp(line)\n\n line_deps = list()\n for tok in tokens:\n line_deps.append(str((tok.i, tok.head.i)).replace(' ', ''))\n out_lines.append(' '.join(line_deps))\n out_f.write('\\n'.join(out_lines))\n\n\ndef prepare_dataset(dataset_name):\n \"download and generate datasets\"\n script_dir = os.path.join('scripts')\n if not os.path.exists(script_dir):\n download(_urls['scripts'], path='scripts.zip')\n extract_archive('scripts.zip', 'scripts')\n\n directory = os.path.join('data', dataset_name)\n if not os.path.exists(directory):\n os.makedirs(directory)\n else:\n return\n if dataset_name == 'multi30k':\n os.system('bash scripts/prepare-multi30k.sh')\n # Pre-create dependency parses for train, valid and test\n for fi in ['train', 'val', 'test2016']:\n store_dependency_parses('data/multi30k/{}.en.atok'.format(fi), 'data/multi30k/{}_deps.en.atok'.format(fi))\n elif dataset_name == 'wmt14':\n download(_urls['wmt'], path='wmt14.zip')\n os.system('bash scripts/prepare-wmt14.sh')\n elif dataset_name == 'copy' or dataset_name == 'tiny_copy':\n train_size = 9000\n valid_size = 1000\n test_size = 1000\n char_list = [chr(i) for i in range(ord('a'), ord('z') + 1)]\n with open(os.path.join(directory, 'train.in'), 'w') as f_in,\\\n open(os.path.join(directory, 'train.out'), 'w') as f_out:\n for i, l in zip(range(train_size), np.random.normal(15, 3, train_size).astype(int)):\n l = max(l, 1)\n line = ' '.join(np.random.choice(char_list, l)) + '\\n'\n f_in.write(line)\n f_out.write(line)\n\n with open(os.path.join(directory, 'valid.in'), 'w') as f_in,\\\n open(os.path.join(directory, 'valid.out'), 'w') as f_out:\n for i, l in zip(range(valid_size), np.random.normal(15, 3, valid_size).astype(int)):\n l = max(l, 1)\n line = ' '.join(np.random.choice(char_list, l)) + '\\n'\n f_in.write(line)\n f_out.write(line)\n\n with open(os.path.join(directory, 'test.in'), 'w') as f_in,\\\n open(os.path.join(directory, 'test.out'), 'w') as f_out:\n for i, l in zip(range(test_size), np.random.normal(15, 3, test_size).astype(int)):\n l = max(l, 1)\n line = ' '.join(np.random.choice(char_list, l)) + '\\n'\n f_in.write(line)\n f_out.write(line)\n\n with open(os.path.join(directory, 'vocab.txt'), 'w') as f:\n for c in char_list:\n f.write(c + '\\n')\n\n elif dataset_name == 'sort' or dataset_name == 'tiny_sort':\n train_size = 9000\n valid_size = 1000\n test_size = 1000\n char_list = [chr(i) for i in range(ord('a'), ord('z') + 1)]\n with open(os.path.join(directory, 'train.in'), 'w') as f_in,\\\n open(os.path.join(directory, 'train.out'), 'w') as f_out:\n for i, l in zip(range(train_size), np.random.normal(15, 3, train_size).astype(int)):\n l = max(l, 1)\n seq = np.random.choice(char_list, l)\n f_in.write(' '.join(seq) + '\\n')\n f_out.write(' '.join(np.sort(seq)) + '\\n')\n\n with open(os.path.join(directory, 'valid.in'), 'w') as f_in,\\\n open(os.path.join(directory, 'valid.out'), 'w') as f_out:\n for i, l in zip(range(valid_size), np.random.normal(15, 3, valid_size).astype(int)):\n l = max(l, 1)\n seq = np.random.choice(char_list, l)\n f_in.write(' '.join(seq) + '\\n')\n f_out.write(' '.join(np.sort(seq)) + '\\n')\n\n with open(os.path.join(directory, 'test.in'), 'w') as f_in,\\\n open(os.path.join(directory, 'test.out'), 'w') as f_out:\n for i, l in zip(range(test_size), np.random.normal(15, 3, test_size).astype(int)):\n l = max(l, 1)\n seq = np.random.choice(char_list, l)\n f_in.write(' '.join(seq) + '\\n')\n f_out.write(' '.join(np.sort(seq)) + '\\n')\n\n with open(os.path.join(directory, 'vocab.txt'), 'w') as f:\n for c in char_list:\n f.write(c + '\\n')\n"
] |
[
[
"numpy.random.normal",
"numpy.random.choice",
"numpy.sort"
]
] |
lhoestq/DeDLOC
|
[
"36f5a6d043c3d727f9d098a35fba94aa351a5cd4",
"36f5a6d043c3d727f9d098a35fba94aa351a5cd4"
] |
[
"swav/vissl/extra_scripts/create_ucf101_data_files.py",
"swav/vissl/vissl/data/data_helper.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport argparse\nimport os\nimport ssl\nfrom contextlib import contextmanager\nfrom typing import List, Optional, Tuple\n\nfrom PIL import Image\nfrom torch.utils.data import DataLoader\nfrom torchvision.datasets.utils import download_url, extract_archive\nfrom tqdm import tqdm\n\ntry:\n from pyunpack import Archive\nexcept ImportError:\n raise ValueError(\n \"You must have pyunpack and patool installed to run this script: pip install pyunpack patool.\"\n )\n\ntry:\n import av\nexcept ImportError:\n raise ValueError(\"You must have pyav installed to run this script: pip install av.\")\n\n\ndef get_argument_parser():\n \"\"\"\n List of arguments supported by the script\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-i\",\n \"--input\",\n type=str,\n help=\"The input folder contains the expanded UCF-101 archive files\",\n )\n parser.add_argument(\n \"-o\",\n \"--output\",\n type=str,\n help=\"The output folder containing the disk_folder output\",\n )\n parser.add_argument(\n \"-d\",\n \"--download\",\n action=\"store_const\",\n const=True,\n default=False,\n help=\"To download the original dataset and decompress it in the input folder\",\n )\n return parser\n\n\n@contextmanager\ndef without_ssl_certificate_check():\n default_context = ssl._create_default_https_context\n ssl._create_default_https_context = ssl._create_unverified_context\n yield\n ssl._create_default_https_context = default_context\n\n\ndef download_dataset(root: str):\n \"\"\"\n Download the UCF101 dataset archive and expand it in the folder provided as parameter\n \"\"\"\n IMAGE_URL = \"https://www.crcv.ucf.edu/data/UCF101/UCF101.rar\"\n SPLIT_URL = (\n \"https://www.crcv.ucf.edu/data/UCF101/UCF101TrainTestSplits-RecognitionTask.zip\"\n )\n\n # Download the raw inputs of UCF101, circumventing the SSL certificate issues\n with without_ssl_certificate_check():\n download_url(url=IMAGE_URL, root=root)\n download_url(url=SPLIT_URL, root=root)\n\n # Extract the archives\n print(\"Extracting archives...\")\n Archive(os.path.join(root, \"UCF101.rar\")).extractall(root)\n extract_archive(os.path.join(root, \"UCF101TrainTestSplits-RecognitionTask.zip\"))\n\n\nclass _ExtractMiddleFrameDataset:\n \"\"\"\n Dataset used to parallelize the transformation of the dataset via a DataLoader\n \"\"\"\n\n def __init__(self, data_path: str, annotation_path: str):\n self.data_path = data_path\n self.split_info = self._read_split_info(annotation_path)\n\n @staticmethod\n def _read_split_info(file_path: str) -> List[Tuple[str, str]]:\n samples = []\n with open(file_path) as f:\n for line in f:\n category, file_name = line.strip().split(\"/\")\n file_name = file_name.split(\" \")[0]\n samples.append((category, file_name))\n return samples\n\n @staticmethod\n def _extract_middle_frame(file_path: str) -> Optional[Image.Image]:\n \"\"\"\n Extract the middle frame out of a video clip.\n \"\"\"\n with av.open(file_path) as container:\n nb_frames = container.streams.video[0].frames\n vid_stream = container.streams.video[0]\n for i, frame in enumerate(container.decode(vid_stream)):\n if i - 1 == nb_frames // 2:\n return frame.to_image()\n return None\n\n def __len__(self):\n return len(self.split_info)\n\n def __getitem__(self, idx: int) -> Tuple[Image.Image, str, str]:\n category, video_name = self.split_info[idx]\n video_path = os.path.join(self.data_path, category, video_name)\n mid_frame = self._extract_middle_frame(video_path)\n image_name = os.path.splitext(video_name)[0] + \".jpg\"\n return mid_frame, image_name, category\n\n\ndef create_disk_folder_split(annotation_path: str, data_path: str, output_path: str):\n \"\"\"\n Create one split of the disk_folder format from the file at 'annotation_path' and the data stored\n in the folder 'data_path'.\n \"\"\"\n assert os.path.exists(\n annotation_path\n ), f\"Could not find annotation path {annotation_path}\"\n assert os.path.exists(data_path), f\"Could not find data folder {data_path}\"\n\n dataset = _ExtractMiddleFrameDataset(\n data_path=data_path, annotation_path=annotation_path\n )\n loader = DataLoader(dataset, num_workers=8, batch_size=1, collate_fn=lambda x: x[0])\n for batch in tqdm(loader):\n mid_frame, image_name, category = batch\n category_folder = os.path.join(output_path, category)\n os.makedirs(category_folder, exist_ok=True)\n image_path = os.path.join(category_folder, image_name)\n with open(image_path, \"w\") as image_file:\n mid_frame.save(image_file)\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Example usage:\n\n ```\n python extra_scripts/create_ucf101_data_files.py -i /path/to/ucf101 -o /output_path/ucf101 -d\n ```\n \"\"\"\n args = get_argument_parser().parse_args()\n if args.download:\n download_dataset(args.input)\n\n data_path = os.path.join(args.input, \"UCF-101\")\n annotation_path = os.path.join(args.input, \"ucfTrainTestlist\")\n create_disk_folder_split(\n annotation_path=os.path.join(annotation_path, \"trainlist01.txt\"),\n data_path=data_path,\n output_path=os.path.join(args.output, \"train\"),\n )\n create_disk_folder_split(\n annotation_path=os.path.join(annotation_path, \"testlist01.txt\"),\n data_path=data_path,\n output_path=os.path.join(args.output, \"val\"),\n )\n",
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport contextlib\nimport logging\nimport queue\n\nimport numpy as np\nimport torch\nfrom PIL import Image\nfrom torch.utils.data import Dataset\nfrom torch.utils.data.distributed import DistributedSampler\n\n\ndef get_mean_image(crop_size):\n \"\"\"\n Helper function that returns a gray PIL image of the size specified by user.\n\n Args:\n crop_size (int): used to generate (crop_size x crop_size x 3) image.\n\n Returns:\n img: PIL Image\n \"\"\"\n img = Image.fromarray(128 * np.ones((crop_size, crop_size, 3), dtype=np.uint8))\n return img\n\n\[email protected]\ndef with_temporary_numpy_seed(sampling_seed: int):\n \"\"\"\n Context manager to run a specific portion of code with a given seed:\n resumes the numpy state after the execution of the block\n \"\"\"\n original_random_state = np.random.get_state()\n np.random.seed(sampling_seed)\n yield\n np.random.set_state(original_random_state)\n\n\ndef unbalanced_sub_sampling(\n total_num_samples: int, num_samples: int, skip_samples: int = 0, seed: int = 0\n) -> np.ndarray:\n \"\"\"\n Given an original dataset of size 'total_size', sub_sample part of the dataset such that\n the sub sampling is deterministic (identical across distributed workers)\n Return the selected indices\n \"\"\"\n with with_temporary_numpy_seed(seed):\n return np.random.choice(\n total_num_samples, size=skip_samples + num_samples, replace=False\n )[skip_samples:]\n\n\ndef balanced_sub_sampling(\n labels: np.ndarray, num_samples: int, skip_samples: int = 0, seed: int = 0\n) -> np.ndarray:\n \"\"\"\n Given all the labels of a dataset, sub_sample a part of the labels such that:\n - the number of samples of each label differs by at most one\n - the sub sampling is deterministic (identical across distributed workers)\n Return the indices of the selected labels\n \"\"\"\n groups = {}\n for i, label in enumerate(labels):\n groups.setdefault(label, []).append(i)\n\n unique_labels = sorted(groups.keys())\n skip_quotient, skip_rest = divmod(skip_samples, len(unique_labels))\n sample_quotient, sample_rest = divmod(num_samples, len(unique_labels))\n assert (\n sample_quotient > 0\n ), \"the number of samples should be at least equal to the number of classes\"\n\n with with_temporary_numpy_seed(seed):\n for i, label in enumerate(unique_labels):\n label_indices = groups[label]\n num_label_samples = sample_quotient + (1 if i < sample_rest else 0)\n skip_label_samples = skip_quotient + (1 if i < skip_rest else 0)\n permuted_indices = np.random.choice(\n label_indices,\n size=skip_label_samples + num_label_samples,\n replace=False,\n )\n groups[label] = permuted_indices[skip_label_samples:]\n\n return np.concatenate([groups[label] for label in unique_labels])\n\n\nclass StatefulDistributedSampler(DistributedSampler):\n \"\"\"\n More fine-grained state DataSampler that uses training iteration and epoch\n both for shuffling data. PyTorch DistributedSampler only uses epoch\n for the shuffling and starts sampling data from the start. In case of training\n on very large data, we train for one epoch only and when we resume training,\n we want to resume the data sampler from the training iteration.\n \"\"\"\n\n def __init__(self, dataset, batch_size=None):\n \"\"\"\n Initializes the instance of StatefulDistributedSampler. Random seed is set\n for the epoch set and data is shuffled. For starting the sampling, use\n the start_iter (set to 0 or set by checkpointing resuming) to\n sample data from the remaining images.\n\n Args:\n dataset (Dataset): Pytorch dataset that sampler will shuffle\n batch_size (int): batch size we want the sampler to sample\n \"\"\"\n super().__init__(dataset, shuffle=False)\n\n self.start_iter = 0\n self.batch_size = batch_size\n self.total_size = len(dataset) - (len(dataset) % self.num_replicas)\n self.num_samples = self.total_size // self.num_replicas\n logging.info(f\"rank: {self.rank}: Sampler created...\")\n\n def __iter__(self):\n # partition data into num_replicas and optionally shuffle within a rank\n g = torch.Generator()\n import time\n ts = time.time()\n seed = int(ts) % 100000\n g.manual_seed(seed)\n shuffling = torch.randperm(self.num_samples, generator=g).tolist()\n indices = np.array(\n list(\n range(\n (self.rank * self.num_samples), (self.rank + 1) * self.num_samples\n )\n )\n )[shuffling].tolist()\n\n # make sure we have correct number of samples per replica\n assert len(indices) == self.num_samples\n assert self.batch_size > 0, \"batch_size not set for the sampler\"\n\n # resume the sampler\n start_index = self.start_iter * self.batch_size\n indices = indices[start_index:]\n return iter(indices)\n\n def set_start_iter(self, start_iter):\n \"\"\"\n Set the iteration number from which the sampling should start. This is\n used to find the marker in the data permutation order from where the\n sampler should start sampling.\n \"\"\"\n self.start_iter = start_iter\n\n\nclass QueueDataset(Dataset):\n \"\"\"\n This class helps dealing with the invalid images in the dataset by using\n two queue. One queue is used to enqueue seen and valid images from previous\n batches. The other queue is used to dequeue. The class is implemented such\n that the same batch will never have duplicate images. If we can't dequeue a\n valid image, we return None for that instance.\n\n Args:\n queue_size: size the the queue (ideally set it to batch_size). Both queues\n will be of the same size\n \"\"\"\n\n def __init__(self, queue_size):\n\n self.queue_size = queue_size\n # we create a CPU queue to buffer the valid seen images. We use these\n # images to replace the invalid images in the minibatch\n # 2 queues (FIFO) per gpu of size = batch size per gpu (64 img):\n # a) 1st queue is used only to dequeue seen images. We get images from\n # this queue only to backfill.\n # b) 2nd queue is used only to add the new incoming valid seen images\n self.queue_init = False\n self.dequeue_images_queue = None\n self.enqueue_images_queue = None\n\n def _init_queues(self):\n self.dequeue_images_queue = queue.Queue(maxsize=self.queue_size)\n self.enqueue_images_queue = queue.Queue(maxsize=self.queue_size)\n self.queue_init = True\n logging.info(f\"QueueDataset enabled. Using queue_size: {self.queue_size}\")\n\n def _refill_dequeue_buffer(self):\n dequeue_qsize = self._get_dequeue_buffer_size()\n for _ in range(self.queue_size - dequeue_qsize):\n try:\n self.dequeue_images_queue.put(\n self.enqueue_images_queue.get(), block=True\n )\n except Exception:\n continue\n\n def _enqueue_valid_image(self, img):\n if self._get_enqueue_buffer_size() >= self.queue_size:\n return\n try:\n self.enqueue_images_queue.put(img, block=True, timeout=0.1)\n return\n except queue.Full:\n return\n\n def _dequeue_valid_image(self):\n if self._get_dequeue_buffer_size() == 0:\n return\n try:\n return self.dequeue_images_queue.get(block=True, timeout=0.1)\n except queue.Empty:\n return None\n\n def _get_enqueue_buffer_size(self):\n return self.enqueue_images_queue.qsize()\n\n def _get_dequeue_buffer_size(self):\n return self.dequeue_images_queue.qsize()\n\n def _is_large_image(self, sample):\n h, w = sample.size\n if h * w > 10000000:\n return True\n return False\n\n def on_sucess(self, sample):\n \"\"\"\n If we encounter a successful image and the queue is not full, we store it\n in the queue. One consideration we make further is: if the image is very\n large, we don't add it to the queue as otherwise the CPU memory will grow\n a lot.\n \"\"\"\n if self._is_large_image(sample):\n return\n self._enqueue_valid_image(sample)\n if self.enqueue_images_queue.full() and not self.dequeue_images_queue.full():\n self._refill_dequeue_buffer()\n\n def on_failure(self):\n \"\"\"\n If there was a failure in getting the origin image, we look into the queue\n if there is any valid seen image available. If yes, we dequeue and use this\n image in place of the failed image.\n \"\"\"\n sample, is_success = None, False\n if self._get_dequeue_buffer_size() > 0:\n sample = self._dequeue_valid_image()\n if sample is not None:\n is_success = True\n return sample, is_success\n"
] |
[
[
"torch.utils.data.DataLoader"
],
[
"numpy.concatenate",
"numpy.random.choice",
"numpy.random.seed",
"torch.Generator",
"numpy.ones",
"torch.randperm",
"numpy.random.get_state",
"numpy.random.set_state"
]
] |
tatatodd/tensorflow
|
[
"8ae7343f3d24569b4bb142ddc7b58037267a2d3c",
"e469ccf50312fef277eb0fe55b04992f7974a35e"
] |
[
"tensorflow/contrib/optimizer_v2/optimizer_v2.py",
"tensorflow/python/keras/engine/training.py"
] |
[
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Version 2 of class Optimizer.\"\"\"\n# pylint: disable=g-bad-name\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\n\nimport six\n\nfrom tensorflow.python.eager import backprop\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import gradients\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.training import distribute as distribute_lib\nfrom tensorflow.python.training import distribution_strategy_context as distribute_ctx\nfrom tensorflow.python.training import optimizer as optimizer_v1\nfrom tensorflow.python.training import slot_creator\nfrom tensorflow.python.training.checkpointable import base as checkpointable\nfrom tensorflow.python.util import nest\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass _OptimizableVariable(object):\n \"\"\"Interface for abstracting over variables in the optimizers.\"\"\"\n\n @abc.abstractmethod\n def target(self):\n \"\"\"Returns the optimization target for this variable.\"\"\"\n raise NotImplementedError(\"Calling an abstract method.\")\n\n @abc.abstractmethod\n def update_op(self, optimizer, g, *args):\n \"\"\"Returns the update ops for updating the variable.\"\"\"\n raise NotImplementedError(\"Calling an abstract method.\")\n\n\nclass _RefVariableProcessor(_OptimizableVariable):\n \"\"\"Processor for Variable.\"\"\"\n\n def __init__(self, v):\n self._v = v\n\n def target(self):\n return self._v._ref() # pylint: disable=protected-access\n\n def update_op(self, optimizer, g, *args):\n if isinstance(g, ops.Tensor):\n update_op = optimizer._apply_dense(g, self._v, *args) # pylint: disable=protected-access\n if self._v.constraint is not None:\n with ops.control_dependencies([update_op]):\n return self._v.assign(self._v.constraint(self._v))\n else:\n return update_op\n else:\n assert isinstance(g, ops.IndexedSlices), (\"Gradient \", g, \" is neither a \"\n \"tensor nor IndexedSlices.\")\n if self._v.constraint is not None:\n raise RuntimeError(\n \"Cannot use a constraint function on a sparse variable.\")\n # pylint: disable=protected-access\n return optimizer._apply_sparse_duplicate_indices(g, self._v, *args)\n\n\nclass _DenseReadResourceVariableProcessor(_OptimizableVariable):\n \"\"\"Processor for dense ResourceVariables.\"\"\"\n\n def __init__(self, v):\n self._v = v\n\n def target(self):\n return self._v\n\n def update_op(self, optimizer, g, *args):\n # pylint: disable=protected-access\n update_op = optimizer._resource_apply_dense(g, self._v.op.inputs[0], *args)\n if self._v.constraint is not None:\n with ops.control_dependencies([update_op]):\n return self._v.assign(self._v.constraint(self._v))\n else:\n return update_op\n\n\nclass _DenseResourceVariableProcessor(_OptimizableVariable):\n \"\"\"Processor for dense ResourceVariables.\"\"\"\n\n def __init__(self, v):\n self._v = v\n\n def target(self):\n return self._v\n\n def update_op(self, optimizer, g, *args):\n # pylint: disable=protected-access\n if isinstance(g, ops.IndexedSlices):\n if self._v.constraint is not None:\n raise RuntimeError(\n \"Cannot use a constraint function on a sparse variable.\")\n return optimizer._resource_apply_sparse_duplicate_indices(\n g.values, self._v, g.indices, *args)\n update_op = optimizer._resource_apply_dense(g, self._v, *args)\n if self._v.constraint is not None:\n with ops.control_dependencies([update_op]):\n return self._v.assign(self._v.constraint(self._v))\n else:\n return update_op\n\n\nclass _TensorProcessor(_OptimizableVariable):\n \"\"\"Processor for ordinary Tensors.\n\n Even though a Tensor can't really be updated, sometimes it is useful to\n compute the gradients with respect to a Tensor using the optimizer. Updating\n the Tensor is, of course, unsupported.\n \"\"\"\n\n def __init__(self, v):\n self._v = v\n\n def target(self):\n return self._v\n\n def update_op(self, optimizer, g, *args):\n raise NotImplementedError(\"Trying to update a Tensor \", self._v)\n\n\ndef _get_processor(v):\n \"\"\"The processor of v.\"\"\"\n if context.executing_eagerly():\n if isinstance(v, ops.Tensor):\n return _TensorProcessor(v)\n else:\n return _DenseResourceVariableProcessor(v)\n if v.op.type == \"VarHandleOp\":\n return _DenseResourceVariableProcessor(v)\n if isinstance(v, variables.Variable):\n return _RefVariableProcessor(v)\n if isinstance(v, ops.Tensor):\n return _TensorProcessor(v)\n raise NotImplementedError(\"Trying to optimize unsupported type \", v)\n\n\ndef _var_key_v2(var):\n \"\"\"Key for representing a primary variable, for looking up slots.\"\"\"\n # pylint: disable=protected-access\n if hasattr(var, \"_distributed_container\"):\n distributed_container = var._distributed_container()\n assert distributed_container is not None\n if context.executing_eagerly():\n return distributed_container._unique_id\n return distributed_container._shared_name\n if context.executing_eagerly():\n return var._unique_id\n return var.op.name\n\n\ndef _resolve(value, name):\n if callable(value):\n value = value()\n return ops.convert_to_tensor(value, name=name)\n\n\ndef _is_dynamic(value):\n \"\"\"Returns true if __init__ arg `value` should be re-evaluated each step.\"\"\"\n if callable(value):\n return True\n # Don't need to do anything special in graph mode, since dynamic values\n # will propagate correctly automatically.\n # TODO(josh11b): Add per-device caching across steps using variables for\n # truly static values once we add distributed support.\n if context.executing_eagerly() and isinstance(\n value, resource_variable_ops.ResourceVariable):\n return True\n return False\n\n\nclass _OptimizerV2State(object):\n \"\"\"Holds per-graph and per-step optimizer state.\n\n Use _init_with_static_hyper() to create the state for a graph, and then\n _copy_with_dynamic_hyper() to convert that to state for a particular step.\n The difference between the two is that the former only has hyper\n parameter values that are static and the latter also has values that\n can change every step (according to _is_dynamic()).\n \"\"\"\n\n def __init__(self, op_name):\n self._op_name = op_name\n\n def _init_with_static_hyper(self, hyper):\n \"\"\"Initialize a fresh state object from hyper dict.\"\"\"\n # self._hyper contains a dict from name to a dict with the Tensor values.\n # This dict starts with a single item with key \"None\" with the hyper\n # parameter value converted to a Tensor. Other items have dtype keys\n # with that Tensor cast to that dtype.\n with ops.init_scope():\n self._hyper = {\n name: {\n None: ops.convert_to_tensor(value, name=name)\n } for name, (dynamic, value) in sorted(hyper.items()) if not dynamic\n }\n self._slots = {}\n self._non_slot_dict = {}\n # Extra state to help Optimizers implement Checkpointable. Holds information\n # about variables which will be restored as soon as they're created.\n self._deferred_dependencies = {} # Non-slot variables\n self._deferred_slot_restorations = {} # Slot variables\n\n def _copy_with_dynamic_hyper(self, hyper, distribution, non_slot_devices):\n \"\"\"Create a new state object for a particular step.\"\"\"\n ret = _OptimizerV2State(self._op_name)\n # pylint: disable=protected-access\n ret._slots = self._slots\n ret._non_slot_dict = self._non_slot_dict\n ret._deferred_dependencies = self._deferred_dependencies\n ret._deferred_slot_restorations = self._deferred_slot_restorations\n ret._hyper = {\n name: {\n None: _resolve(value, name)\n } for name, (dynamic, value) in sorted(hyper.items()) if dynamic\n }\n ret._hyper.update(self._hyper)\n ret._non_slot_devices = non_slot_devices\n ret._distribution = distribution\n return ret\n\n def _variables(self):\n \"\"\"Returns a list of all variables held by self.\"\"\"\n optimizer_variables = list(self._non_slot_dict.values())\n for variable_dict in self._slots.values():\n for slot_for_variable in variable_dict.values():\n optimizer_variables.append(slot_for_variable)\n # Sort variables by name so that the return is deterministic.\n return sorted(optimizer_variables, key=lambda v: v.name)\n\n def _slot_dict(self, slot_name):\n \"\"\"Returns a dict for caching slots created under the given name.\n\n Args:\n slot_name: Name for the slot.\n\n Returns:\n A dict that maps primary `Variable` objects to the slot created\n for that variable, under the given slot name.\n \"\"\"\n named_slots = self._slots.get(slot_name, None)\n if named_slots is None:\n named_slots = {}\n self._slots[slot_name] = named_slots\n return named_slots\n\n def create_slot(self, var, val, slot_name, optional_op_name=None):\n \"\"\"Find or create a slot for a variable.\n\n Args:\n var: A `Variable` object.\n val: A `Tensor`. The initial value of the slot.\n slot_name: Name for the slot.\n optional_op_name: Name to use when scoping the Variable that needs to be\n created for the slot.\n\n Returns:\n A `Variable` object.\n \"\"\"\n named_slots = self._slot_dict(slot_name)\n var_key = _var_key_v2(var)\n if var_key not in named_slots:\n new_slot_variable = slot_creator.create_slot(\n var, val, optional_op_name or self._op_name)\n self._restore_slot_variable(\n slot_name=slot_name, variable=var, slot_variable=new_slot_variable)\n named_slots[var_key] = new_slot_variable\n return named_slots[var_key]\n\n def create_slot_with_initializer(self,\n var,\n initializer,\n shape,\n dtype,\n slot_name,\n optional_op_name=None):\n \"\"\"Find or create a slot for a variable, using an Initializer.\n\n Args:\n var: A `Variable` object.\n initializer: An `Initializer`. The initial value of the slot.\n shape: Shape of the initial value of the slot.\n dtype: Type of the value of the slot.\n slot_name: Name for the slot.\n optional_op_name: Name to use when scoping the Variable that needs to be\n created for the slot.\n\n Returns:\n A `Variable` object.\n \"\"\"\n named_slots = self._slot_dict(slot_name)\n var_key = _var_key_v2(var)\n if var_key not in named_slots:\n new_slot_variable = slot_creator.create_slot_with_initializer(\n var, initializer, shape, dtype, optional_op_name or self._op_name)\n self._restore_slot_variable(\n slot_name=slot_name, variable=var, slot_variable=new_slot_variable)\n named_slots[var_key] = new_slot_variable\n return named_slots[var_key]\n\n def zeros_slot(self, var, slot_name, optional_op_name=None):\n \"\"\"Find or create a slot initialized with 0.0.\n\n Args:\n var: A `Variable` object.\n slot_name: Name for the slot.\n optional_op_name: Name to use when scoping the Variable that needs to be\n created for the slot.\n\n Returns:\n A `Variable` object.\n \"\"\"\n named_slots = self._slot_dict(slot_name)\n var_key = _var_key_v2(var)\n if var_key not in named_slots:\n new_slot_variable = slot_creator.create_zeros_slot(\n var, optional_op_name or self._op_name)\n self._restore_slot_variable(\n slot_name=slot_name, variable=var, slot_variable=new_slot_variable)\n named_slots[var_key] = new_slot_variable\n return named_slots[var_key]\n\n def _create_or_restore_slot_variable(self,\n slot_variable_position,\n slot_name,\n variable,\n optional_op_name=None):\n \"\"\"Restore a slot variable's value, possibly creating it.\n\n Called when a variable which has an associated slot variable is created or\n restored. When executing eagerly, we create the slot variable with a\n restoring initializer.\n\n No new variables are created when graph building. Instead,\n _restore_slot_variable catches these after normal creation and adds restore\n ops to the graph. This method is nonetheless important when graph building\n for the case when a slot variable has already been created but `variable`\n has just been added to a dependency graph (causing us to realize that the\n slot variable needs to be restored).\n\n Args:\n slot_variable_position: A `checkpointable._CheckpointPosition` object\n indicating the slot variable `Checkpointable` object to be restored.\n slot_name: The name of this `Optimizer`'s slot to restore into.\n variable: The variable object this slot is being created for.\n optional_op_name: Name to use when scoping the Variable that needs to be\n created for the slot.\n \"\"\"\n slot_variable = self.get_slot(var=variable, name=slot_name)\n if (slot_variable is None and context.executing_eagerly() and\n slot_variable_position.is_simple_variable()\n # Defer slot variable creation if there is an active variable creator\n # scope. Generally we'd like to eagerly create/restore slot variables\n # when possible, but this may mean that scopes intended to catch\n # `variable` also catch its eagerly created slot variable\n # unintentionally (specifically make_template would add a dependency on\n # a slot variable if not for this case). Deferring is mostly harmless\n # (aside from double initialization), and makes variable creator scopes\n # behave the same way they do when graph building.\n and not ops.get_default_graph()._variable_creator_stack): # pylint: disable=protected-access\n initializer = checkpointable.CheckpointInitialValue(\n checkpoint_position=slot_variable_position)\n slot_variable = self.create_slot(\n var=variable,\n val=initializer,\n slot_name=slot_name,\n optional_op_name=optional_op_name)\n # Optimizers do not have unconditional dependencies on their slot\n # variables (nor do any other objects). They are only saved if the\n # variables they were created for are also saved.\n if slot_variable is not None:\n # If we've either made this slot variable, or if we've pulled out an\n # existing slot variable, we should restore it.\n slot_variable_position.restore(slot_variable)\n else:\n # We didn't make the slot variable. Defer restoring until it gets created\n # normally. We keep a list rather than the one with the highest restore\n # UID in case slot variables have their own dependencies, in which case\n # those could differ between restores.\n variable_key = _var_key_v2(variable)\n self._deferred_slot_restorations.setdefault(slot_name, {}).setdefault(\n variable_key, []).append(slot_variable_position)\n\n def get_slot(self, var, name):\n \"\"\"Return a slot named `name` created for `var` by the Optimizer.\n\n Some `Optimizer` subclasses use additional variables. For example\n `Momentum` and `Adagrad` use variables to accumulate updates. This method\n gives access to these `Variable` objects if for some reason you need them.\n\n Use `get_slot_names()` to get the list of slot names created by the\n `Optimizer`.\n\n Args:\n var: A variable passed to `minimize()` or `apply_gradients()`.\n name: A string.\n\n Returns:\n The `Variable` for the slot if it was created, `None` otherwise.\n \"\"\"\n named_slots = self._slots.get(name, None)\n if not named_slots:\n return None\n return named_slots.get(_var_key_v2(var), None)\n\n def get_slot_names(self):\n \"\"\"Return a list of the names of slots created by the `Optimizer`.\n\n See `get_slot()`.\n\n Returns:\n A list of strings.\n \"\"\"\n return sorted(self._slots.keys())\n\n def create_non_slot(self, initial_value, name, colocate_with=None):\n \"\"\"Add an extra variable, not associated with a slot.\"\"\"\n v = self._non_slot_dict.get(name, None)\n if v is None:\n if colocate_with is None:\n colocate_with = self._non_slot_devices\n with self._distribution.colocate_vars_with(colocate_with):\n # TODO(josh11b): Use get_variable() except for the legacy Adam use case.\n v = variable_scope.variable(initial_value, name=name, trainable=False)\n self._non_slot_dict[name] = v\n deferred_dependencies_list = self._deferred_dependencies.pop(name, ())\n for checkpoint_position in sorted(\n deferred_dependencies_list,\n key=lambda restore: restore.checkpoint.restore_uid,\n reverse=True):\n checkpoint_position.restore(v)\n return v\n\n def _restore_slot_variable(self, slot_name, variable, slot_variable):\n \"\"\"Restore a newly created slot variable's value.\"\"\"\n variable_key = _var_key_v2(variable)\n deferred_restorations = self._deferred_slot_restorations.get(\n slot_name, {}).pop(variable_key, [])\n # Iterate over restores, highest restore UID first to minimize the number\n # of assignments.\n deferred_restorations.sort(\n key=lambda position: position.restore_uid, reverse=True)\n for checkpoint_position in deferred_restorations:\n checkpoint_position.restore(slot_variable)\n\n def get_non_slot(self, name):\n \"\"\"Returns the non-slot variable identified by `name`.\"\"\"\n return self._non_slot_dict.get(name, None)\n\n def get_hyper(self, name, dtype=None):\n \"\"\"Returns the `name` hyper parameter, optionally cast to `dtype`.\"\"\"\n dtype_dict = self._hyper[name]\n # Do we have the value cast to dtype already cached? This should always\n # succeed when dtype is None.\n if dtype in dtype_dict:\n return dtype_dict[dtype]\n # Not cached, cast to dtype and save the result in the cache.\n result = math_ops.cast(dtype_dict[None], dtype)\n dtype_dict[dtype] = result\n return result\n\n\nclass OptimizerV2(optimizer_v1.Optimizer):\n \"\"\"Updated base class for optimizers.\n\n This class defines the API to add Ops to train a model. You never use this\n class directly, but instead instantiate one of its subclasses such as\n `GradientDescentOptimizer`, `AdagradOptimizer`, or `MomentumOptimizer`.\n\n ### Usage\n\n ```python\n # Create an optimizer with the desired parameters.\n opt = GradientDescentOptimizer(learning_rate=0.1)\n # Add Ops to the graph to minimize a cost by updating a list of variables.\n # \"cost\" is a Tensor, and the list of variables contains tf.Variable\n # objects.\n opt_op = opt.minimize(cost, var_list=<list of variables>)\n ```\n\n In the training program you will just have to run the returned Op.\n\n ```python\n # Execute opt_op to do one step of training:\n opt_op.run()\n ```\n\n ### Processing gradients before applying them.\n\n Calling `minimize()` takes care of both computing the gradients and\n applying them to the variables. If you want to process the gradients\n before applying them you can instead use the optimizer in three steps:\n\n 1. Compute the gradients with `compute_gradients()`.\n 2. Process the gradients as you wish.\n 3. Apply the processed gradients with `apply_gradients()`.\n\n Example:\n\n ```python\n # Create an optimizer.\n opt = GradientDescentOptimizer(learning_rate=0.1)\n\n # Compute the gradients for a list of variables.\n grads_and_vars = opt.compute_gradients(loss, <list of variables>)\n\n # grads_and_vars is a list of tuples (gradient, variable). Do whatever you\n # need to the 'gradient' part, for example cap them, etc.\n capped_grads_and_vars = [(MyCapper(gv[0]), gv[1]) for gv in grads_and_vars]\n\n # Ask the optimizer to apply the capped gradients.\n opt.apply_gradients(capped_grads_and_vars)\n ```\n\n ### Gating Gradients\n\n Both `minimize()` and `compute_gradients()` accept a `gate_gradients`\n argument that controls the degree of parallelism during the application of\n the gradients.\n\n The possible values are: `GATE_NONE`, `GATE_OP`, and `GATE_GRAPH`.\n\n <b>`GATE_NONE`</b>: Compute and apply gradients in parallel. This provides\n the maximum parallelism in execution, at the cost of some non-reproducibility\n in the results. For example the two gradients of `matmul` depend on the input\n values: With `GATE_NONE` one of the gradients could be applied to one of the\n inputs _before_ the other gradient is computed resulting in non-reproducible\n results.\n\n <b>`GATE_OP`</b>: For each Op, make sure all gradients are computed before\n they are used. This prevents race conditions for Ops that generate gradients\n for multiple inputs where the gradients depend on the inputs.\n\n <b>`GATE_GRAPH`</b>: Make sure all gradients for all variables are computed\n before any one of them is used. This provides the least parallelism but can\n be useful if you want to process all gradients before applying any of them.\n\n ### Slots\n\n Some optimizer subclasses, such as `MomentumOptimizer` and `AdagradOptimizer`\n allocate and manage additional variables associated with the variables to\n train. These are called <i>Slots</i>. Slots have names and you can ask the\n optimizer for the names of the slots that it uses. Once you have a slot name\n you can ask the optimizer for the variable it created to hold the slot value.\n\n This can be useful if you want to log debug a training algorithm, report stats\n about the slots, etc.\n\n ### Non-slot variables\n\n Some optimizer subclasses, such as `AdamOptimizer` have variables that\n are not associated with the variables to train, just the step itself.\n\n ### Hyper parameters\n\n These are arguments passed to the optimizer subclass constructor\n (the `__init__` method), and then passed to `self._set_hyper()`.\n They can be either regular Python values (like 1.0), tensors, or\n callables. If they are callable, the callable will be called during\n `apply_gradients()` to get the value for the hyper parameter.\n\n ### State\n\n Internal methods are passed a `state` argument with the correct\n values to use for the slot and non-slot variables, and the hyper\n parameters.\n \"\"\"\n\n # Values for gate_gradients.\n GATE_NONE = 0\n GATE_OP = 1\n GATE_GRAPH = 2\n\n def __init__(self, use_locking, name):\n \"\"\"Create a new Optimizer.\n\n This must be called by the constructors of subclasses.\n Note that Optimizer instances should not bind to a single graph,\n and so shouldn't keep Tensors as member variables. Generally\n you should be able to use the _set_hyper()/state.get_hyper()\n facility instead.\n\n Args:\n use_locking: Bool. If True apply use locks to prevent concurrent updates\n to variables.\n name: A non-empty string. The name to use for accumulators created\n for the optimizer.\n\n Raises:\n ValueError: If name is malformed.\n RuntimeError: If _create_slots has been overridden instead of\n _create_vars.\n \"\"\"\n # Note: We intentionally don't call parent __init__.\n\n # Optimizer._create_slots was replaced by _create_vars in OptimizerV2.\n if (self.__class__._create_slots.__code__ is not # pylint: disable=protected-access\n OptimizerV2._create_slots.__code__):\n raise RuntimeError(\n \"Override _create_vars instead of _create_slots when \"\n \"descending from OptimizerV2 (class %s)\" % self.__class__.__name__)\n if not name:\n raise ValueError(\"Must specify the optimizer name\")\n\n self._use_locking = use_locking\n self._name = name\n # Map from graph_key to state for that graph. We use the graph_key\n # since it works in both eager and graph mode, and gives the outer\n # graph inside functions.\n replica_context = distribute_ctx.get_replica_context()\n if replica_context is None:\n # In a cross-replica context for a DistributionStrategy, which means\n # only one Optimizer will be created, not one per replica.\n self._per_graph_state = {}\n else:\n # We use get_replica_context().merge_call() to get a single dict\n # shared across all model replicas when running with a\n # DistributionStrategy.\n self._per_graph_state = replica_context.merge_call(lambda _: {})\n\n # Hyper parameters, and whether they should be re-evaluated every step.\n self._hyper = {}\n\n def _set_hyper(self, name, value):\n self._hyper[name] = (_is_dynamic(value), value)\n\n def minimize(self,\n loss,\n global_step=None,\n var_list=None,\n gate_gradients=GATE_OP,\n aggregation_method=None,\n colocate_gradients_with_ops=False,\n name=None,\n grad_loss=None,\n stop_gradients=None,\n scale_loss_by_num_replicas=None):\n \"\"\"Add operations to minimize `loss` by updating `var_list`.\n\n This method simply combines calls `compute_gradients()` and\n `apply_gradients()`. If you want to process the gradient before applying\n them call `compute_gradients()` and `apply_gradients()` explicitly instead\n of using this function.\n\n Args:\n loss: A `Tensor` containing the value to minimize.\n global_step: Optional `Variable` to increment by one after the variables\n have been updated.\n var_list: Optional list or tuple of `Variable` objects to update to\n minimize `loss`. Defaults to the list of variables collected in the\n graph under the key `GraphKeys.TRAINABLE_VARIABLES`.\n gate_gradients: How to gate the computation of gradients. Can be\n `GATE_NONE`, `GATE_OP`, or `GATE_GRAPH`.\n aggregation_method: Specifies the method used to combine gradient terms.\n Valid values are defined in the class `AggregationMethod`.\n colocate_gradients_with_ops: If True, try colocating gradients with the\n corresponding op.\n name: Optional name for the returned operation.\n grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.\n stop_gradients: Optional. A Tensor or list of tensors not to differentiate\n through.\n scale_loss_by_num_replicas: Optional boolean. If true, scale the loss down\n by the number of replicas. By default, auto-detects whether this is\n needed.\n\n Returns:\n An Operation that updates the variables in `var_list`. If `global_step`\n was not `None`, that operation also increments `global_step`.\n\n Raises:\n ValueError: If some of the variables are not `Variable` objects.\n\n @compatibility(eager)\n When eager execution is enabled, `loss` should be a Python function that\n takes elements of `var_list` as arguments and computes the value to be\n minimized. If `var_list` is None, `loss` should take no arguments.\n Minimization (and gradient computation) is done with respect to the\n elements of `var_list` if not None, else with respect to any trainable\n variables created during the execution of the `loss` function.\n `gate_gradients`, `aggregation_method`, `colocate_gradients_with_ops` and\n `grad_loss` are ignored when eager execution is enabled.\n @end_compatibility\n \"\"\"\n grads_and_vars = self.compute_gradients(\n loss,\n var_list=var_list,\n gate_gradients=gate_gradients,\n aggregation_method=aggregation_method,\n colocate_gradients_with_ops=colocate_gradients_with_ops,\n grad_loss=grad_loss,\n stop_gradients=stop_gradients,\n scale_loss_by_num_replicas=scale_loss_by_num_replicas)\n\n vars_with_grad = [v for g, v in grads_and_vars if g is not None]\n if not vars_with_grad:\n raise ValueError(\n \"No gradients provided for any variable, check your graph for ops\"\n \" that do not support gradients, between variables %s and loss %s.\" %\n ([str(v) for _, v in grads_and_vars], loss))\n\n return self.apply_gradients(\n grads_and_vars, global_step=global_step, name=name)\n\n def compute_gradients(self,\n loss,\n var_list=None,\n gate_gradients=GATE_OP,\n aggregation_method=None,\n colocate_gradients_with_ops=False,\n grad_loss=None,\n stop_gradients=None,\n scale_loss_by_num_replicas=None):\n \"\"\"Compute gradients of `loss` for the variables in `var_list`.\n\n This is the first part of `minimize()`. It returns a list\n of (gradient, variable) pairs where \"gradient\" is the gradient\n for \"variable\". Note that \"gradient\" can be a `Tensor`, an\n `IndexedSlices`, or `None` if there is no gradient for the\n given variable.\n\n Args:\n loss: A Tensor containing the value to minimize or a callable taking no\n arguments which returns the value to minimize. When eager execution is\n enabled it must be a callable.\n var_list: Optional list or tuple of `tf.Variable` to update to minimize\n `loss`. Defaults to the list of variables collected in the graph under\n the key `GraphKeys.TRAINABLE_VARIABLES`.\n gate_gradients: How to gate the computation of gradients. Can be\n `GATE_NONE`, `GATE_OP`, or `GATE_GRAPH`.\n aggregation_method: Specifies the method used to combine gradient terms.\n Valid values are defined in the class `AggregationMethod`.\n colocate_gradients_with_ops: If True, try colocating gradients with the\n corresponding op.\n grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.\n stop_gradients: Optional. A Tensor or list of tensors not to differentiate\n through.\n scale_loss_by_num_replicas: Optional boolean. If true, scale the loss down\n by the number of replicas. By default, auto-detects whether this is\n needed.\n\n Returns:\n A list of (gradient, variable) pairs. Variable is always present, but\n gradient can be `None`.\n\n Raises:\n TypeError: If `var_list` contains anything else than `Variable` objects.\n ValueError: If some arguments are invalid.\n RuntimeError: If called with eager execution enabled and `loss` is\n not callable.\n\n @compatibility(eager)\n When eager execution is enabled, `gate_gradients`, `aggregation_method`,\n and `colocate_gradients_with_ops` are ignored.\n @end_compatibility\n \"\"\"\n # TODO(josh11b): Test that we handle weight decay in a reasonable way.\n if callable(loss):\n with backprop.GradientTape() as tape:\n if var_list is not None:\n tape.watch(var_list)\n loss_value = loss()\n\n # Scale loss for number of replicas (callable-loss case). In this case,\n # we have to be careful to call distribute_lib.get_loss_reduction()\n # *after* loss() is evaluated, so we know what loss reduction it uses.\n if scale_loss_by_num_replicas is None:\n scale_loss_by_num_replicas = (\n distribute_lib.get_loss_reduction() == variable_scope\n .VariableAggregation.MEAN)\n if scale_loss_by_num_replicas:\n num_replicas = distribute_ctx.get_distribution_strategy().num_replicas\n if num_replicas > 1:\n loss_value *= 1. / num_replicas\n\n if var_list is None:\n var_list = tape.watched_variables()\n grads = tape.gradient(loss_value, var_list, grad_loss)\n return list(zip(grads, var_list))\n if context.executing_eagerly():\n raise RuntimeError(\"`loss` passed to Optimizer.compute_gradients should \"\n \"be a function when eager execution is enabled.\")\n\n # Scale loss for number of replicas (non-callable-loss case).\n if scale_loss_by_num_replicas is None:\n scale_loss_by_num_replicas = (\n distribute_lib.get_loss_reduction() == variable_scope\n .VariableAggregation.MEAN)\n if scale_loss_by_num_replicas:\n num_replicas = distribute_ctx.get_distribution_strategy().num_replicas\n if num_replicas > 1:\n loss *= 1. / num_replicas\n\n if gate_gradients not in [\n optimizer_v1.Optimizer.GATE_NONE, optimizer_v1.Optimizer.GATE_OP,\n optimizer_v1.Optimizer.GATE_GRAPH\n ]:\n raise ValueError(\n \"gate_gradients must be one of: Optimizer.GATE_NONE, \"\n \"Optimizer.GATE_OP, Optimizer.GATE_GRAPH. Not %s\" % gate_gradients)\n self._assert_valid_dtypes([loss])\n if grad_loss is not None:\n self._assert_valid_dtypes([grad_loss])\n if var_list is None:\n var_list = (\n variables.trainable_variables() + ops.get_collection(\n ops.GraphKeys.TRAINABLE_RESOURCE_VARIABLES))\n else:\n var_list = nest.flatten(var_list)\n # pylint: disable=protected-access\n var_list += ops.get_collection(ops.GraphKeys._STREAMING_MODEL_PORTS)\n # pylint: enable=protected-access\n processors = [_get_processor(v) for v in var_list]\n if not var_list:\n raise ValueError(\"No variables to optimize.\")\n var_refs = [p.target() for p in processors]\n grads = gradients.gradients(\n loss,\n var_refs,\n grad_ys=grad_loss,\n gate_gradients=(gate_gradients == optimizer_v1.Optimizer.GATE_OP),\n aggregation_method=aggregation_method,\n colocate_gradients_with_ops=colocate_gradients_with_ops,\n stop_gradients=stop_gradients)\n if gate_gradients == optimizer_v1.Optimizer.GATE_GRAPH:\n grads = control_flow_ops.tuple(grads)\n grads_and_vars = list(zip(grads, var_list))\n self._assert_valid_dtypes([\n v for g, v in grads_and_vars\n if g is not None and v.dtype != dtypes.resource\n ])\n return grads_and_vars\n\n def apply_gradients(self, grads_and_vars, global_step=None, name=None):\n \"\"\"Apply gradients to variables.\n\n This is the second part of `minimize()`. It returns an `Operation` that\n applies gradients.\n\n Args:\n grads_and_vars: List of (gradient, variable) pairs as returned by\n `compute_gradients()`.\n global_step: Optional `Variable` to increment by one after the variables\n have been updated.\n name: Optional name for the returned operation. Default to the name\n passed to the `Optimizer` constructor.\n\n Returns:\n An `Operation` that applies the specified gradients. If `global_step`\n was not None, that operation also increments `global_step`.\n\n Raises:\n TypeError: If `grads_and_vars` is malformed.\n ValueError: If none of the variables have gradients.\n \"\"\"\n # This is a default implementation of apply_gradients() that can be shared\n # by most optimizers. It relies on the subclass implementing the following\n # methods: _create_vars(), _prepare(), _apply_dense(), and _apply_sparse().\n\n # Filter out variables with gradients of `None`.\n grads_and_vars = tuple(grads_and_vars) # Make sure repeat iteration works.\n if not grads_and_vars:\n raise ValueError(\"No variables provided.\")\n filtered = tuple((g, v) for (g, v) in grads_and_vars if g is not None)\n if not filtered:\n raise ValueError(\"No gradients provided for any variable: %s.\" %\n ([str(v) for _, v in grads_and_vars],))\n return distribute_ctx.get_replica_context().merge_call(\n self._distributed_apply, filtered, global_step=global_step, name=name)\n\n def _get_or_create_state(self, var_list=None):\n \"\"\"Either looks up or creates `_OptimizerV2State`.\n\n If any variables are available, they should be passed via the `var_list`\n argument, and these will be used to determine the graph to create/retrieve\n state for. Otherwise the returned state is for the current default graph.\n\n Args:\n var_list: A list of variables to extract a graph from.\n\n Returns:\n An `_OptimizerV2State` object.\n \"\"\"\n # Determine the graph_key from the current graph.\n eager_execution = context.executing_eagerly()\n if eager_execution or var_list is None:\n graph = ops.get_default_graph()\n else:\n graph = ops._get_graph_from_inputs(var_list) # pylint: disable=protected-access\n assert graph is not None\n graph_key = graph._graph_key # pylint: disable=protected-access\n\n # Get the per graph state by looking up the graph_key.\n if graph_key in self._per_graph_state:\n per_graph_state = self._per_graph_state[graph_key]\n else:\n per_graph_state = _OptimizerV2State(self._name)\n per_graph_state._init_with_static_hyper(self._hyper) # pylint: disable=protected-access\n self._per_graph_state[graph_key] = per_graph_state\n return per_graph_state\n\n def _distributed_apply(self, distribution, grads_and_vars, global_step, name):\n \"\"\"`apply_gradients` for use with a `DistributionStrategy`.\"\"\"\n reduced_grads = distribution.batch_reduce(\n variable_scope.VariableAggregation.SUM, grads_and_vars)\n var_list = [v for _, v in grads_and_vars]\n grads_and_vars = zip(reduced_grads, var_list)\n\n unwrapped_var_list = [x for v in var_list for x in distribution.unwrap(v)]\n eager_execution = context.executing_eagerly()\n if eager_execution:\n # Give a clear error in this case instead of \"name not supported\n # for Eager Tensors\" when we compute non_slot_devices.\n for v in unwrapped_var_list:\n if isinstance(v, ops.Tensor):\n raise NotImplementedError(\"Trying to update a Tensor \", v)\n\n with ops.name_scope(name, self._name) as name:\n per_graph_state = self._get_or_create_state(var_list=unwrapped_var_list)\n # Include the current value of any dynamic hyper parameters in `state`.\n non_slot_devices = distribution.non_slot_devices(var_list)\n state = per_graph_state._copy_with_dynamic_hyper( # pylint: disable=protected-access\n self._hyper, distribution, non_slot_devices)\n\n # Create any slot and non-slot variables we need in `state`.\n with ops.init_scope():\n self._create_vars(var_list, state)\n\n with ops.name_scope(name): # Re-enter name_scope created above\n # Give the child class a chance to do something before we start\n # applying gradients.\n self._prepare(state)\n\n def update(v, g):\n \"\"\"Update variable `v` using gradient `g`.\"\"\"\n assert v is not None\n\n # Convert the grad to Tensor or IndexedSlices if necessary, and\n # look up a processor for each variable's type.\n try:\n g = ops.convert_to_tensor_or_indexed_slices(g)\n except TypeError:\n raise TypeError(\"Gradient must be convertible to a Tensor\"\n \" or IndexedSlices, or None: %s\" % g)\n if not isinstance(g, (ops.Tensor, ops.IndexedSlices)):\n raise TypeError(\n \"Gradient must be a Tensor, IndexedSlices, or None: %s\" % g)\n processor = _get_processor(v)\n\n # We colocate all ops created in _apply_dense or _apply_sparse\n # on the same device as the variable.\n # TODO(apassos): figure out how to get the variable name here.\n scope_name = \"\" if eager_execution else v.op.name\n # device_policy is set because non-mirrored tensors will be read in\n # `update_op`.\n # TODO(josh11b): Make different state objects for each device to\n # avoid needing to set the device_policy.\n device_policy = context.context().device_policy(\n context.DEVICE_PLACEMENT_SILENT)\n with ops.name_scope(\"update_\" + scope_name), device_policy:\n return processor.update_op(self, g, state)\n\n # Use the processors to update the variables.\n update_ops = []\n for grad, var in grads_and_vars:\n update_ops.extend(distribution.update(var, update, grad, grouped=False))\n\n # Give the child class a chance to do something after applying\n # gradients\n def finish():\n # TODO(josh11b): Make different state objects for each device to\n # avoid needing to set the device_policy.\n with context.context().device_policy(context.DEVICE_PLACEMENT_SILENT):\n return self._finish(state)\n\n update_ops = control_flow_ops.group(update_ops)\n with ops.control_dependencies([update_ops]):\n finish_updates = distribution.update_non_slot(\n non_slot_devices, finish, grouped=False)\n # We said grouped=False, which means finish_updates is always a list.\n # It will be [None] when finish() returns None.\n if finish_updates == [None]:\n finish_updates = [update_ops]\n\n # Update `global_step` (if any).\n if global_step is None:\n apply_updates = distribution.group(finish_updates, name=name)\n else:\n with ops.control_dependencies(finish_updates):\n\n def update_global_step(global_step, name):\n return global_step.assign_add(1, read_value=False, name=name)\n\n apply_updates = distribution.update(global_step, update_global_step,\n name)\n\n # Add the training op to the TRAIN_OP graph collection in graph mode.\n if not eager_execution:\n if isinstance(apply_updates, ops.Tensor):\n apply_updates = apply_updates.op\n train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP)\n if apply_updates not in train_op:\n train_op.append(apply_updates)\n\n return apply_updates\n\n def get_slot(self, var, name):\n \"\"\"Return a slot named `name` created for `var` by the Optimizer.\n\n Some `Optimizer` subclasses use additional variables. For example\n `Momentum` and `Adagrad` use variables to accumulate updates. This method\n gives access to these `Variable` objects if for some reason you need them.\n\n Use `get_slot_names()` to get the list of slot names created by the\n `Optimizer`.\n\n Args:\n var: A variable passed to `minimize()` or `apply_gradients()`.\n name: A string.\n\n Returns:\n The `Variable` for the slot if it was created, `None` otherwise.\n \"\"\"\n state = self._get_state_for_var(var)\n return state.get_slot(var, name) if state is not None else None\n\n def get_slot_names(self):\n \"\"\"Return a list of the names of slots created by the `Optimizer`.\n\n See `get_slot()`.\n\n Returns:\n A list of strings.\n \"\"\"\n state = self._get_per_graph_state()\n return state.get_slot_names() if state is not None else []\n\n def variables(self):\n \"\"\"A list of variables which encode the current state of `Optimizer`.\n\n Includes slot variables and additional global variables created by the\n optimizer in the current default graph.\n\n Returns:\n A list of variables.\n \"\"\"\n state = self._get_per_graph_state()\n return state._variables() if state is not None else [] # pylint: disable=protected-access\n\n # --------------\n # Methods to be implemented by subclasses if they want to use the\n # inherited implementation of apply_gradients() or compute_gradients().\n # --------------\n def _create_vars(self, var_list, state):\n \"\"\"Create all slots needed by the variables and any non-slot variables.\n\n Args:\n var_list: A list of `Variable` objects.\n state: An object with these methods: `create_slot(var, val, slot_name,\n optional_op_name)`, `create_slot_with_initializer(` `var, initializer,\n shape, dtype, slot_name, optional_op_name)`, `zeros_slot(var, slot_name,\n optional_op_name)`, `create_non_slot_variable(initial_value, name,\n colocate_with)`, `get_hyper(name)`\n \"\"\"\n # No slots needed by default\n pass\n\n def _prepare(self, state):\n \"\"\"Code to execute before applying gradients.\n\n Note that most uses of _prepare() in Optimizer have been subsumed\n by explicit support for hyper parameters in OptimizerV2\n\n Args:\n state: An object with a `get_hyper(name)` method.\n\n Returns:\n Return value will be ignored.\n \"\"\"\n pass\n\n def _apply_dense(self, grad, var, state):\n \"\"\"Add ops to apply dense gradients to `var`.\n\n Args:\n grad: A `Tensor`.\n var: A `Variable` object.\n state: An object with `get_slot(var, name)`, `get_non_slot(self, name)`,\n and `get_hyper(name)` methods.\n\n Returns:\n An `Operation`.\n \"\"\"\n raise NotImplementedError()\n\n def _resource_apply_dense(self, grad, handle, state):\n \"\"\"Add ops to apply dense gradients to the variable `handle`.\n\n Args:\n grad: a `Tensor` representing the gradient.\n handle: a `Tensor` of dtype `resource` which points to the variable to be\n updated.\n state: An object with `get_slot(var, name)`, `get_non_slot(self, name)`,\n and `get_hyper(name)` methods.\n\n Returns:\n An `Operation` which updates the value of the variable.\n \"\"\"\n raise NotImplementedError()\n\n def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices,\n state):\n \"\"\"Add ops to apply sparse gradients to `handle`, with repeated indices.\n\n Optimizers which override this method must deal with repeated indices. See\n the docstring of `_apply_sparse_duplicate_indices` for details. By default\n the correct behavior, to sum non-unique indices and their associated\n gradients, is enforced by first pre-processing `grad` and `indices` and\n passing them on to `_resource_apply_sparse`. Optimizers which deal correctly\n with duplicate indices may instead override this method to avoid the\n overhead of summing.\n\n Args:\n grad: a `Tensor` representing the gradient for the affected indices.\n handle: a `Tensor` of dtype `resource` which points to the variable to be\n updated.\n indices: a `Tensor` of integral type representing the indices for which\n the gradient is nonzero. Indices may be repeated.\n state: An object with `get_slot(var, name)`, `get_non_slot(self, name)`,\n and `get_hyper(name)` methods.\n\n Returns:\n An `Operation` which updates the value of the variable.\n \"\"\"\n # pylint: disable=protected-access\n summed_grad, unique_indices = optimizer_v1._deduplicate_indexed_slices(\n values=grad, indices=indices)\n # pylint: enable=protected-access\n return self._resource_apply_sparse(summed_grad, handle, unique_indices,\n state)\n\n def _resource_apply_sparse(self, grad, handle, indices, state):\n \"\"\"Add ops to apply sparse gradients to the variable `handle`.\n\n Similar to `_apply_sparse`, the `indices` argument to this method has been\n de-duplicated. Optimizers which deal correctly with non-unique indices may\n instead override `_resource_apply_sparse_duplicate_indices` to avoid this\n overhead.\n\n Args:\n grad: a `Tensor` representing the gradient for the affected indices.\n handle: a `Tensor` of dtype `resource` which points to the variable to be\n updated.\n indices: a `Tensor` of integral type representing the indices for which\n the gradient is nonzero. Indices are unique.\n state: An object with `get_slot(var, name)`, `get_non_slot(self, name)`,\n and `get_hyper(name)` methods.\n\n Returns:\n An `Operation` which updates the value of the variable.\n \"\"\"\n raise NotImplementedError()\n\n def _apply_sparse_duplicate_indices(self, grad, var, state):\n \"\"\"Add ops to apply sparse gradients to `var`, with repeated sparse indices.\n\n Optimizers which override this method must deal with IndexedSlices objects\n such as the following:\n\n IndexedSlicesValue(values=[1, 1], indices=[0, 0], dense_shape=[1])\n\n The correct interpretation is:\n\n IndexedSlicesValue(values=[2], indices=[0], dense_shape=[1])\n\n Many optimizers deal incorrectly with repeated indices when updating based\n on sparse gradients (e.g. summing squares rather than squaring the sum, or\n applying momentum terms multiple times). Adding first is always the correct\n behavior, so this is enforced here by reconstructing the IndexedSlices to\n have only unique indices, then calling _apply_sparse.\n\n Optimizers which deal correctly with repeated indices may instead override\n this method to avoid the overhead of summing indices.\n\n Args:\n grad: `IndexedSlices`.\n var: A `Variable` object.\n state: An object with `get_slot(var, name)`, `get_non_slot(self, name)`,\n and `get_hyper(name)` methods.\n\n Returns:\n An `Operation`.\n \"\"\"\n # pylint: disable=protected-access\n summed_values, unique_indices = optimizer_v1._deduplicate_indexed_slices(\n values=grad.values, indices=grad.indices)\n # pylint: enable=protected-access\n gradient_no_duplicate_indices = ops.IndexedSlices(\n indices=unique_indices,\n values=summed_values,\n dense_shape=grad.dense_shape)\n return self._apply_sparse(gradient_no_duplicate_indices, var, state)\n\n def _apply_sparse(self, grad, var, state):\n \"\"\"Add ops to apply sparse gradients to `var`.\n\n The IndexedSlices object passed to `grad` in this function is by default\n pre-processed in `_apply_sparse_duplicate_indices` to remove duplicate\n indices (see its docstring for details). Optimizers which can tolerate or\n have correct special cases for duplicate sparse indices may override\n `_apply_sparse_duplicate_indices` instead of this function, avoiding that\n overhead.\n\n Args:\n grad: `IndexedSlices`, with no repeated indices.\n var: A `Variable` object.\n state: An object with `get_slot(var, name)`, `get_non_slot(self, name)`,\n and `get_hyper(name)` methods.\n\n Returns:\n An `Operation`.\n \"\"\"\n raise NotImplementedError()\n\n def _finish(self, state):\n \"\"\"Do what is needed to finish the update.\n\n This is called inside a scope colocated with any non-slot variables.\n\n Args:\n state: An object with `get_slot(var, name)`, `get_non_slot(self, name)`,\n and `get_hyper(name)` methods.\n\n Returns:\n The operation to apply updates, or None if no updates.\n \"\"\"\n return None\n\n # --------------\n # Utility methods for subclasses.\n # --------------\n def _get_per_graph_state(self):\n # pylint: disable=protected-access\n return self._per_graph_state.get(ops.get_default_graph()._graph_key, None)\n\n def _get_state_for_var(self, var):\n # pylint: disable=protected-access\n return self._per_graph_state.get(var._graph_key, None)\n\n # --------------\n # Overridden methods from Checkpointable.\n # --------------\n\n def _track_checkpointable(self, *args, **kwargs):\n \"\"\"Optimizers may not track dependencies. Raises an error.\"\"\"\n raise NotImplementedError(\n \"Optimizers may not have dependencies. File a feature request if this \"\n \"limitation bothers you.\")\n\n @property\n def _checkpoint_dependencies(self):\n \"\"\"From Checkpointable. Gather graph-specific non-slot variables to save.\"\"\"\n current_graph_non_slot_variables = []\n state = self._get_per_graph_state()\n if state is not None:\n for name, variable_object in sorted(\n state._non_slot_dict.items(), # pylint: disable=protected-access\n # Avoid comparing variables\n key=lambda item: item[0]):\n current_graph_non_slot_variables.append(\n checkpointable.CheckpointableReference(\n name=name, ref=variable_object))\n # Note: ignores super(); Optimizers may not have any dependencies outside of\n # state objects.\n return current_graph_non_slot_variables\n\n def _lookup_dependency(self, name):\n \"\"\"From Checkpointable. Find a non-slot variable in the current graph.\"\"\"\n state = self._get_per_graph_state()\n if state is None:\n return None\n else:\n return state.get_non_slot(name)\n\n @property\n def _deferred_dependencies(self):\n \"\"\"Lets Checkpointable know where non-slot variables are created.\n\n If necessary, creates a new state object for the current default graph.\n Checkpointable will then add entries to that state's deferred dependency\n dictionary. The state object will check that dictionary when creating\n non-slot variables, restoring their value if an entry is found.\n\n Returns:\n A dictionary which holds deferred dependencies for the current default\n graph.\n \"\"\"\n state = self._get_or_create_state()\n return state._deferred_dependencies # pylint: disable=protected-access\n\n def _create_or_restore_slot_variable(self, slot_variable_position, slot_name,\n variable):\n \"\"\"Checkpointable: Restore a slot variable's value, possibly creating it.\n\n Called when a variable which has an associated slot variable is created or\n restored.\n\n Args:\n slot_variable_position: A `checkpointable._CheckpointPosition` object\n indicating the slot variable `Checkpointable` object to be restored.\n slot_name: The name of this `Optimizer`'s slot to restore into.\n variable: The variable object this slot is being created for.\n \"\"\"\n state = self._get_or_create_state(var_list=[variable])\n state._create_or_restore_slot_variable( # pylint: disable=protected-access\n slot_variable_position=slot_variable_position,\n slot_name=slot_name,\n variable=variable,\n optional_op_name=self._name)\n\n # --------------\n # Unsupported parent methods\n # --------------\n def _slot_dict(self, slot_name):\n raise NotImplementedError(\"_slot_dict() method unsupported in OptimizerV2\")\n\n def _get_or_make_slot(self, var, val, slot_name, op_name):\n raise NotImplementedError(\n \"_get_or_make_slot() method unsupported in OptimizerV2\")\n\n def _get_or_make_slot_with_initializer(self, var, initializer, shape, dtype,\n slot_name, op_name):\n raise NotImplementedError(\n \"_get_or_make_slot_with_initializer() method unsupported in \"\n \"OptimizerV2\")\n\n def _create_non_slot_variable(self, initial_value, name, colocate_with):\n raise NotImplementedError(\n \"_create_non_slot_variable() method unsupported in OptimizerV2\")\n\n def _get_non_slot_variable(self, name, graph=None):\n raise NotImplementedError(\n \"_get_non_slot_variable() method unsupported in OptimizerV2\")\n\n def _non_slot_variables(self):\n raise NotImplementedError(\n \"_non_slot_variables() method unsupported in OptimizerV2\")\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Training-related part of the Keras engine.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport weakref\nimport numpy as np\n\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.data.ops import iterator_ops\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.keras import backend as K\nfrom tensorflow.python.keras import losses\nfrom tensorflow.python.keras import metrics as metrics_module\nfrom tensorflow.python.keras import optimizers\nfrom tensorflow.python.keras.engine import base_layer\nfrom tensorflow.python.keras.engine import distributed_training_utils\nfrom tensorflow.python.keras.engine import training_arrays\nfrom tensorflow.python.keras.engine import training_distributed\nfrom tensorflow.python.keras.engine import training_eager\nfrom tensorflow.python.keras.engine import training_generator\nfrom tensorflow.python.keras.engine import training_utils\nfrom tensorflow.python.keras.engine.network import Network\nfrom tensorflow.python.keras.utils import data_utils\nfrom tensorflow.python.keras.utils.generic_utils import slice_arrays\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import weights_broadcast_ops\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.training import optimizer as tf_optimizer_module\nfrom tensorflow.python.training.checkpointable import base as checkpointable\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util.tf_export import tf_export\n\n\n@tf_export('keras.models.Model', 'keras.Model')\nclass Model(Network):\n \"\"\"`Model` groups layers into an object with training and inference features.\n\n There are two ways to instantiate a `Model`:\n\n 1 - With the \"functional API\", where you start from `Input`,\n you chain layer calls to specify the model's forward pass,\n and finally you create your model from inputs and outputs:\n\n ```python\n import tensorflow as tf\n\n inputs = tf.keras.Input(shape=(3,))\n x = tf.keras.layers.Dense(4, activation=tf.nn.relu)(inputs)\n outputs = tf.keras.layers.Dense(5, activation=tf.nn.softmax)(x)\n model = tf.keras.Model(inputs=inputs, outputs=outputs)\n ```\n\n 2 - By subclassing the `Model` class: in that case, you should define your\n layers in `__init__` and you should implement the model's forward pass\n in `call`.\n\n ```python\n import tensorflow as tf\n\n class MyModel(tf.keras.Model):\n\n def __init__(self):\n super(MyModel, self).__init__()\n self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu)\n self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax)\n\n def call(self, inputs):\n x = self.dense1(inputs)\n return self.dense2(x)\n\n model = MyModel()\n ```\n\n If you subclass `Model`, you can optionally have\n a `training` argument (boolean) in `call`, which you can use to specify\n a different behavior in training and inference:\n\n ```python\n import tensorflow as tf\n\n class MyModel(tf.keras.Model):\n\n def __init__(self):\n super(MyModel, self).__init__()\n self.dense1 = tf.keras.layers.Dense(4, activation=tf.nn.relu)\n self.dense2 = tf.keras.layers.Dense(5, activation=tf.nn.softmax)\n self.dropout = tf.keras.layers.Dropout(0.5)\n\n def call(self, inputs, training=False):\n x = self.dense1(inputs)\n if training:\n x = self.dropout(x, training=training)\n return self.dense2(x)\n\n model = MyModel()\n ```\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(Model, self).__init__(*args, **kwargs)\n # Create a cache for iterator get_next op.\n self._iterator_get_next = weakref.WeakKeyDictionary()\n # Create a cache for dataset - uninitialized iterators\n self._dataset_iterator_cache = weakref.WeakKeyDictionary()\n # initializing _distribution_strategy here since it is possible to call\n # predict on a model without compiling it.\n self._distribution_strategy = None\n # This flag must be disabled upon model mutation, such as changing the model\n # layers or recompiling the model to use a different optimizer. New function\n # definitions are generated whenever this flag is disabled, ensuring that\n # internal graph functions are always using the current model structure.\n self._built_graph_functions = False\n\n def _set_sample_weight_attributes(self, sample_weight_mode,\n skip_target_weighing_indices):\n \"\"\"Sets sample weight related attributes on the model.\"\"\"\n sample_weights, sample_weight_modes = training_utils.prepare_sample_weights(\n self.output_names, sample_weight_mode, skip_target_weighing_indices)\n self.sample_weights = sample_weights\n self.sample_weight_modes = sample_weight_modes\n self._feed_sample_weight_modes = [\n sample_weight_modes[i]\n for i in range(len(self.outputs))\n if i not in skip_target_weighing_indices\n ]\n self._feed_sample_weights = [\n sample_weights[i]\n for i in range(len(sample_weights))\n if i not in skip_target_weighing_indices\n ]\n\n def _cache_output_metric_attributes(self, metrics, weighted_metrics):\n \"\"\"Caches metric name and function attributes for every model output.\"\"\"\n output_shapes = [\n None if output is None else output.get_shape().as_list()\n for output in self.outputs\n ]\n self._per_output_metrics = training_utils.collect_per_output_metric_info(\n metrics, self.output_names, output_shapes, self.loss_functions)\n self._per_output_weighted_metrics = \\\n training_utils.collect_per_output_metric_info(\n weighted_metrics, self.output_names, output_shapes,\n self.loss_functions, self.sample_weights)\n\n def _add_unique_metric_name(self, metric_name, output_index):\n \"\"\"Makes the metric name unique and adds it to the model's metric name list.\n\n If there are multiple outputs for which the metrics are calculated, the\n metric names have to be made unique by appending an integer.\n\n Arguments:\n metric_name: Metric name that corresponds to the metric specified by the\n user. For example: 'acc'.\n output_index: The index of the model output for which the metric name is\n being added.\n\n Returns:\n string, name of the model's unique metric name\n \"\"\"\n if len(self.output_names) > 1:\n metric_name = '%s_%s' % (self.output_names[output_index], metric_name)\n j = 1\n base_metric_name = metric_name\n while metric_name in self.metrics_names:\n metric_name = '%s_%d' % (base_metric_name, j)\n j += 1\n\n return metric_name\n\n def _init_metric_attributes(self):\n \"\"\"Initialized model metric attributes.\"\"\"\n self.metrics_names = ['loss']\n self.metrics_tensors = []\n self.metrics_updates = []\n self.stateful_metric_names = []\n self.stateful_metric_functions = []\n\n def _set_per_output_metric_attributes(self, metrics_dict, output_index):\n \"\"\"Sets the metric attributes on the model for the given output.\n\n Arguments:\n metrics_dict: A dict with metric names as keys and metric fns as values.\n output_index: The index of the model output for which the metric\n attributes are added.\n \"\"\"\n for metric_name, metric_fn in metrics_dict.items():\n metric_name = self._add_unique_metric_name(metric_name, output_index)\n # Keep track of metric name.\n self.metrics_names.append(metric_name)\n\n # Keep track of stateful metric attributes (name and metric function).\n if isinstance(metric_fn, base_layer.Layer) and metric_fn.stateful:\n self.stateful_metric_names.append(metric_name)\n self.stateful_metric_functions.append(metric_fn)\n\n def _set_metric_attributes(self, outputs, skip_target_indices=None):\n \"\"\"Sets the metric attributes on the model for all the model outputs.\"\"\"\n skip_target_indices = skip_target_indices or []\n for i in range(len(outputs)):\n if i in skip_target_indices:\n continue\n self._set_per_output_metric_attributes(self._per_output_metrics[i], i)\n self._set_per_output_metric_attributes(\n self._per_output_weighted_metrics[i], i)\n\n def _handle_per_output_metrics(self,\n metrics_dict,\n y_true,\n y_pred,\n mask,\n weights=None):\n \"\"\"Calls metric functions for a single output.\n\n Arguments:\n metrics_dict: A dict with metric names as keys and metric fns as values.\n y_true: Target output.\n y_pred: Predicted output.\n mask: Computed mask value for the current output.\n weights: Weights to be applied on the current output.\n\n Returns:\n A list of metric result tensors.\n \"\"\"\n metric_results = []\n for metric_name, metric_fn in metrics_dict.items():\n with K.name_scope(metric_name):\n if isinstance(metric_fn, metrics_module.Metric):\n # Call the stateful metric function.\n if mask is not None:\n mask = math_ops.cast(mask, y_pred.dtype)\n # Update weights with mask.\n if weights is None:\n weights = mask\n else:\n # Update shape of weights if possible before adding mask.\n # Update dimensions of weights to match with mask if possible.\n mask, _, weights = metrics_module.squeeze_or_expand_dimensions(\n mask, None, weights)\n try:\n # Broadcast weights if possible.\n weights = weights_broadcast_ops.broadcast_weights(weights, mask)\n except ValueError:\n pass\n # TODO(psv): Handle case when mask and weight shapes are not\n # compatible.\n weights *= mask\n\n metric_result = metric_fn(y_true, y_pred, weights)\n else:\n # Call the stateless metric function.\n weighted_metric_fn = training_utils.weighted_masked_objective(\n metric_fn)\n metric_result = weighted_metric_fn(\n y_true, y_pred, weights=weights, mask=mask)\n\n if not context.executing_eagerly():\n # Keep track of metric result tensor.\n self.metrics_tensors.append(metric_result)\n\n metric_results.append(metric_result)\n is_stateful = isinstance(metric_fn,\n base_layer.Layer) and metric_fn.stateful\n if is_stateful and not context.executing_eagerly():\n # Keep track of updates created by stateful metrics.\n self.metrics_updates += metric_fn.updates\n return metric_results\n\n def _handle_metrics(self,\n outputs,\n skip_target_indices=None,\n targets=None,\n sample_weights=None,\n masks=None):\n \"\"\"Handles calling metric functions.\n\n Arguments:\n outputs: List of outputs (predictions).\n skip_target_indices: Optional. List of target ids to skip.\n targets: List of targets.\n sample_weights: Optional list of sample weight arrays.\n masks: List of computed output mask values.\n\n Returns:\n A list of metric result tensors.\n \"\"\"\n skip_target_indices = skip_target_indices or []\n metric_results = []\n with K.name_scope('metrics'):\n for i in range(len(outputs)):\n if i in skip_target_indices:\n continue\n output = outputs[i] if outputs else None\n target = targets[i] if targets else None\n output_mask = masks[i] if masks else None\n metric_results.extend(\n self._handle_per_output_metrics(self._per_output_metrics[i], target,\n output, output_mask))\n metric_results.extend(\n self._handle_per_output_metrics(\n self._per_output_weighted_metrics[i],\n target,\n output,\n output_mask,\n weights=sample_weights[i]))\n return metric_results\n\n @checkpointable.no_automatic_dependency_tracking\n def compile(self,\n optimizer,\n loss=None,\n metrics=None,\n loss_weights=None,\n sample_weight_mode=None,\n weighted_metrics=None,\n target_tensors=None,\n distribute=None,\n **kwargs):\n \"\"\"Configures the model for training.\n\n Arguments:\n optimizer: String (name of optimizer) or optimizer instance.\n See [optimizers](/api_docs/python/tf/keras/optimizers).\n loss: String (name of objective function) or objective function.\n See [losses](/api_docs/python/tf/losses).\n If the model has multiple outputs, you can use a different loss\n on each output by passing a dictionary or a list of losses.\n The loss value that will be minimized by the model\n will then be the sum of all individual losses.\n metrics: List of metrics to be evaluated by the model\n during training and testing.\n Typically you will use `metrics=['accuracy']`.\n To specify different metrics for different outputs of a\n multi-output model, you could also pass a dictionary,\n such as `metrics={'output_a': 'accuracy'}`.\n loss_weights: Optional list or dictionary specifying scalar\n coefficients (Python floats) to weight the loss contributions\n of different model outputs.\n The loss value that will be minimized by the model\n will then be the *weighted sum* of all individual losses,\n weighted by the `loss_weights` coefficients.\n If a list, it is expected to have a 1:1 mapping\n to the model's outputs. If a tensor, it is expected to map\n output names (strings) to scalar coefficients.\n sample_weight_mode: If you need to do timestep-wise\n sample weighting (2D weights), set this to `\"temporal\"`.\n `None` defaults to sample-wise weights (1D).\n If the model has multiple outputs, you can use a different\n `sample_weight_mode` on each output by passing a\n dictionary or a list of modes.\n weighted_metrics: List of metrics to be evaluated and weighted\n by sample_weight or class_weight during training and testing.\n target_tensors: By default, Keras will create placeholders for the\n model's target, which will be fed with the target data during\n training. If instead you would like to use your own\n target tensors (in turn, Keras will not expect external\n Numpy data for these targets at training time), you\n can specify them via the `target_tensors` argument. It can be\n a single tensor (for a single-output model), a list of tensors,\n or a dict mapping output names to target tensors.\n distribute: The DistributionStrategy instance that we want to use to\n distribute the training of the model.\n **kwargs: These arguments are passed to `tf.Session.run`.\n\n Raises:\n ValueError: In case of invalid arguments for\n `optimizer`, `loss`, `metrics` or `sample_weight_mode`.\n \"\"\"\n # The correct graph function may have changed,\n # already-built ones must be updated\n self._built_graph_functions = False\n\n # Validate that arguments passed by the user to `compile` are supported by\n # DistributionStrategy.\n if distribute:\n if not isinstance(\n optimizer, (tf_optimizer_module.Optimizer, optimizers.TFOptimizer)):\n raise NotImplementedError(\n 'optimizer must be an instance of '\n 'tf.train.Optimizer, not a %s' % type(optimizer))\n if context.executing_eagerly():\n raise NotImplementedError('DistributionStrategy is not supported '\n 'when eager execution is enabled.')\n if sample_weight_mode:\n raise NotImplementedError('sample_weight_mode is not supported with '\n 'DistributionStrategy.')\n if weighted_metrics:\n raise NotImplementedError('weighted_metrics is not supported with '\n 'DistributionStrategy.')\n if target_tensors:\n raise ValueError('target_tensors is not supported with '\n 'DistributionStrategy.')\n\n loss = loss or {}\n if context.executing_eagerly() and not isinstance(\n optimizer, (tf_optimizer_module.Optimizer, optimizers.TFOptimizer)):\n raise ValueError(\n 'optimizer must be an instance of tf.train.Optimizer, not '\n 'a %s' % type(optimizer))\n\n self.optimizer = optimizers.get(optimizer)\n # We've disabled automatic dependency tracking for this method, but do want\n # to add a checkpoint dependency on the optimizer if it's checkpointable.\n if isinstance(self.optimizer, checkpointable.CheckpointableBase):\n self._track_checkpointable(\n self.optimizer, name='optimizer', overwrite=True)\n self.loss = loss\n self.metrics = metrics or []\n self.loss_weights = loss_weights\n self.sample_weight_mode = sample_weight_mode\n self.weighted_metrics = weighted_metrics\n if context.executing_eagerly() and target_tensors is not None:\n raise ValueError('target_tensors is not supported in Eager mode.')\n self.target_tensors = target_tensors\n\n # Set DistributionStrategy specific parameters.\n self._distribution_strategy = distribute\n # Reset the value of grouped_model\n self._grouped_model = None\n if self._distribution_strategy is not None:\n distributed_training_utils.configure_and_create_session(\n self._distribution_strategy)\n if not self.built:\n # Model is not compilable because it does not know its number of inputs\n # and outputs, nor their shapes and names. We will compile after the first\n # time the model gets called on training data.\n return\n self._is_compiled = True\n\n # Prepare loss functions.\n if isinstance(loss, dict):\n for name in loss:\n if name not in self.output_names:\n raise ValueError(\n 'Unknown entry in loss '\n 'dictionary: \"' + name + '\". '\n 'Only expected the following keys: ' + str(self.output_names))\n loss_functions = []\n for name in self.output_names:\n if name not in loss:\n logging.warning(\n 'Output \"' + name +\n '\" missing from loss dictionary. We assume '\n 'this was done on purpose. The fit and evaluate APIs will not be '\n 'expecting any data to be passed to \"' + name + '\".')\n loss_functions.append(losses.get(loss.get(name)))\n elif isinstance(loss, list):\n if len(loss) != len(self.outputs):\n raise ValueError('When passing a list as loss, '\n 'it should have one entry per model outputs. '\n 'The model has ' + str(len(self.outputs)) +\n ' outputs, but you passed loss=' + str(loss))\n loss_functions = [losses.get(l) for l in loss]\n else:\n loss_function = losses.get(loss)\n loss_functions = [loss_function for _ in range(len(self.outputs))]\n self.loss_functions = loss_functions\n\n weighted_losses = [training_utils.weighted_masked_objective(fn)\n for fn in loss_functions]\n skip_target_indices = []\n skip_target_weighing_indices = []\n self._feed_outputs = []\n self._feed_output_names = []\n self._feed_output_shapes = []\n self._feed_loss_fns = []\n for i in range(len(weighted_losses)):\n if weighted_losses[i] is None:\n skip_target_indices.append(i)\n skip_target_weighing_indices.append(i)\n\n # Prepare output masks.\n if not context.executing_eagerly():\n masks = [getattr(x, '_keras_mask', None) for x in self.outputs]\n if not isinstance(masks, list):\n masks = [masks]\n\n # Prepare loss weights.\n if loss_weights is None:\n loss_weights_list = [1. for _ in range(len(self.outputs))]\n elif isinstance(loss_weights, dict):\n for name in loss_weights:\n if name not in self.output_names:\n raise ValueError(\n 'Unknown entry in loss_weights '\n 'dictionary: \"' + name + '\". '\n 'Only expected the following keys: ' + str(self.output_names))\n loss_weights_list = []\n for name in self.output_names:\n loss_weights_list.append(loss_weights.get(name, 1.))\n elif isinstance(loss_weights, list):\n if len(loss_weights) != len(self.outputs):\n raise ValueError(\n 'When passing a list as loss_weights, '\n 'it should have one entry per model output. '\n 'The model has ' + str(len(self.outputs)) +\n ' outputs, but you passed loss_weights=' + str(loss_weights))\n loss_weights_list = loss_weights\n else:\n raise TypeError('Could not interpret loss_weights argument: ' +\n str(loss_weights) + ' - expected a list of dicts.')\n self.loss_weights_list = loss_weights_list\n\n # Initialize model metric attributes.\n self._init_metric_attributes()\n\n # Initialization for Eager mode execution.\n if context.executing_eagerly():\n # Prepare sample weights.\n self._set_sample_weight_attributes(sample_weight_mode,\n skip_target_weighing_indices)\n # Save all metric attributes per output of the model.\n self._cache_output_metric_attributes(metrics, weighted_metrics)\n\n if target_tensors is not None:\n raise ValueError('target_tensors are not currently supported in Eager '\n 'mode.')\n self.total_loss = None\n for i in range(len(self.outputs)):\n if len(self.outputs) > 1:\n self.metrics_names.append(self.output_names[i] + '_loss')\n\n # Set metric attributes on model.\n self._set_metric_attributes(\n self.outputs,\n skip_target_indices=skip_target_indices,\n )\n\n self.targets = []\n for i in range(len(self.outputs)):\n self._feed_output_names.append(self.output_names[i])\n self._collected_trainable_weights = self.trainable_weights\n return\n\n # Prepare targets of model.\n self.targets = []\n self._feed_targets = []\n if target_tensors not in (None, []):\n if isinstance(target_tensors, list):\n if len(target_tensors) != len(self.outputs):\n raise ValueError(\n 'When passing a list as `target_tensors`, '\n 'it should have one entry per model output. '\n 'The model has ' + str(len(self.outputs)) +\n ' outputs, but you passed target_tensors=' + str(target_tensors))\n elif isinstance(target_tensors, dict):\n for name in target_tensors:\n if name not in self.output_names:\n raise ValueError(\n 'Unknown entry in `target_tensors` '\n 'dictionary: \"' + name + '\". '\n 'Only expected the following keys: ' + str(self.output_names))\n tmp_target_tensors = []\n for name in self.output_names:\n tmp_target_tensors.append(target_tensors.get(name, None))\n target_tensors = tmp_target_tensors\n elif tensor_util.is_tensor(target_tensors):\n target_tensors = [target_tensors]\n else:\n raise TypeError('Expected `target_tensors` to be a list or tuple or '\n 'dict or a single tensor, but got:', target_tensors)\n\n for i in range(len(self.outputs)):\n if i in skip_target_indices:\n self.targets.append(None)\n else:\n shape = K.int_shape(self.outputs[i])\n name = self.output_names[i]\n if target_tensors not in (None, []):\n target = target_tensors[i]\n else:\n target = None\n if target is None or K.is_placeholder(target):\n if target is None:\n target = K.placeholder(\n ndim=len(shape),\n name=name + '_target',\n sparse=K.is_sparse(self.outputs[i]),\n dtype=K.dtype(self.outputs[i]))\n self._feed_targets.append(target)\n self._feed_outputs.append(self.outputs[i])\n self._feed_output_names.append(name)\n self._feed_output_shapes.append(shape)\n self._feed_loss_fns.append(self.loss_functions[i])\n else:\n skip_target_weighing_indices.append(i)\n self.targets.append(target)\n\n # Prepare sample weights.\n self._set_sample_weight_attributes(sample_weight_mode,\n skip_target_weighing_indices)\n # Save all metric attributes per output of the model.\n self._cache_output_metric_attributes(metrics, weighted_metrics)\n\n # Compute total loss.\n total_loss = None\n with K.name_scope('loss'):\n for i in range(len(self.outputs)):\n if i in skip_target_indices:\n continue\n y_true = self.targets[i]\n y_pred = self.outputs[i]\n weighted_loss = weighted_losses[i]\n sample_weight = self.sample_weights[i]\n mask = masks[i]\n loss_weight = loss_weights_list[i]\n with K.name_scope(self.output_names[i] + '_loss'):\n output_loss = weighted_loss(y_true, y_pred, sample_weight, mask)\n if len(self.outputs) > 1:\n self.metrics_tensors.append(output_loss)\n self.metrics_names.append(self.output_names[i] + '_loss')\n if total_loss is None:\n total_loss = loss_weight * output_loss\n else:\n total_loss += loss_weight * output_loss\n if total_loss is None:\n if not self.losses:\n raise ValueError('The model cannot be compiled '\n 'because it has no loss to optimize.')\n else:\n total_loss = 0.\n\n # Add regularization penalties\n # and other layer-specific losses.\n for loss_tensor in self.losses:\n total_loss += loss_tensor\n\n # Set metric attributes on model.\n self._set_metric_attributes(\n self.outputs,\n skip_target_indices=skip_target_indices,\n )\n # Invoke metric functions for all the outputs.\n self._handle_metrics(\n self.outputs,\n masks=masks,\n targets=self.targets,\n skip_target_indices=skip_target_indices,\n sample_weights=self.sample_weights)\n\n # Prepare gradient updates and state updates.\n self.total_loss = total_loss\n\n # Functions for train, test and predict will\n # be compiled lazily when required.\n # This saves time when the user is not using all functions.\n self._function_kwargs = kwargs\n\n self.train_function = None\n self.test_function = None\n self.predict_function = None\n\n # Collected trainable weights, sorted in topological order.\n trainable_weights = self.trainable_weights\n self._collected_trainable_weights = trainable_weights\n\n def _check_trainable_weights_consistency(self):\n \"\"\"Check trainable weights count consistency.\n\n This will raise a warning if `trainable_weights` and\n `_collected_trainable_weights` are inconsistent (i.e. have different\n number of parameters).\n Inconsistency will typically arise when one modifies `model.trainable`\n without calling `model.compile` again.\n \"\"\"\n if not hasattr(self, '_collected_trainable_weights'):\n return\n\n if len(self.trainable_weights) != len(self._collected_trainable_weights):\n logging.log_first_n(\n logging.WARN, 'Discrepancy between trainable weights and collected'\n ' trainable weights, did you set `model.trainable`'\n ' without calling `model.compile` after ?', 1)\n\n def _make_train_function(self):\n if not hasattr(self, 'train_function'):\n raise RuntimeError('You must compile your model before using it.')\n self._check_trainable_weights_consistency()\n if self.train_function is None:\n inputs = (self._feed_inputs +\n self._feed_targets +\n self._feed_sample_weights)\n if self.uses_learning_phase and not isinstance(K.learning_phase(), int):\n inputs += [K.learning_phase()]\n\n with K.name_scope('training'):\n with K.name_scope(self.optimizer.__class__.__name__):\n # Training updates\n updates = self.optimizer.get_updates(\n params=self._collected_trainable_weights, loss=self.total_loss)\n # Unconditional updates\n updates += self.get_updates_for(None)\n # Conditional updates relevant to this model\n updates += self.get_updates_for(self.inputs)\n # Stateful metrics updates\n updates += self.metrics_updates\n # Gets loss and metrics. Updates weights at each call.\n self.train_function = K.function(\n inputs, [self.total_loss] + self.metrics_tensors,\n updates=updates,\n name='train_function',\n **self._function_kwargs)\n\n def _make_test_function(self):\n if not hasattr(self, 'test_function'):\n raise RuntimeError('You must compile your model before using it.')\n if self.test_function is None:\n inputs = (self._feed_inputs +\n self._feed_targets +\n self._feed_sample_weights)\n if self.uses_learning_phase and not isinstance(K.learning_phase(), int):\n inputs += [K.learning_phase()]\n # Return loss and metrics, no gradient updates.\n # Does update the network states.\n self.test_function = K.function(\n inputs, [self.total_loss] + self.metrics_tensors,\n updates=self.state_updates + self.metrics_updates,\n name='test_function',\n **self._function_kwargs)\n\n def _make_predict_function(self):\n if not hasattr(self, 'predict_function'):\n self.predict_function = None\n if self.predict_function is None:\n if self.uses_learning_phase and not isinstance(K.learning_phase(), int):\n inputs = self._feed_inputs + [K.learning_phase()]\n else:\n inputs = self._feed_inputs\n # Gets network outputs. Does not update weights.\n # Does update the network states.\n kwargs = getattr(self, '_function_kwargs', {})\n self.predict_function = K.function(\n inputs,\n self.outputs,\n updates=self.state_updates,\n name='predict_function',\n **kwargs)\n\n def _get_iterator_get_next_tensors(self, iterator):\n get_next_op = self._iterator_get_next.get(iterator, None)\n if get_next_op is None:\n get_next_op = iterator.get_next()\n self._iterator_get_next[iterator] = get_next_op\n return get_next_op\n\n def _distribution_standardize_user_data(self,\n x,\n y=None,\n sample_weight=None,\n class_weight=None,\n batch_size=None,\n check_steps=False,\n steps_name='steps',\n steps=None,\n validation_split=0,\n shuffle=False):\n \"\"\"Runs validation checks on input and target data passed by the user.\n\n This is called when using DistributionStrategy to train, evaluate or serve\n the model.\n\n Args:\n x: Input data. A numpy array or `tf.data` dataset.\n y: Target data. A numpy array or None if x is a `tf.data` dataset.\n sample_weight: An optional sample-weight array passed by the user to\n weight the importance of each sample in `x`.\n class_weight: An optional class-weight array by the user to\n weight the importance of samples in `x` based on the class they belong\n to, as conveyed by `y`.\n batch_size: Integer batch size. If provided, it is used to run additional\n validation checks on stateful models.\n check_steps: boolean, True if we want to check for validity of `steps` and\n False, otherwise.\n steps_name: The public API's parameter name for `steps`.\n steps: Integer or `None`. Total number of steps (batches of samples) to\n execute.\n validation_split: Float between 0 and 1.\n Fraction of the training data to be used as validation data.\n shuffle: Boolean whether to shuffle the training data before each epoch.\n\n Returns:\n Iterator for reading the dataset `x`.\n\n Raises:\n ValueError: In case of invalid user-provided data.\n RuntimeError: If the model was never compiled.\n \"\"\"\n if class_weight:\n raise NotImplementedError('`class_weight` is currently not supported '\n 'when using DistributionStrategy.')\n\n if (sample_weight is not None and sample_weight.all() and\n self._distribution_strategy.__class__.__name__ == 'TPUStrategy'):\n raise NotImplementedError('`sample_weight` is currently not supported '\n 'when using TPUStrategy.')\n\n # Validates `steps` argument right at the beginning since we use it to\n # construct the dataset object.\n # TODO(anjalisridhar): Remove this check once we refactor the\n # _standardize_user_data code path. This check is already present elsewhere\n # in the codebase.\n if check_steps and isinstance(x, dataset_ops.Dataset) and steps is None:\n raise ValueError('When using Datasets as input, '\n 'you should specify the `{steps_name}` argument.'\n .format(steps_name=steps_name))\n\n first_x_value = nest.flatten(x)[0]\n if isinstance(first_x_value, np.ndarray):\n assert steps is not None\n x_shape = first_x_value.shape\n if batch_size is None:\n batch_size = distributed_training_utils.get_batch_size(\n self._distribution_strategy.num_replicas, x_shape[0], steps)\n # We need to use the drop_remainder argument to allow for a static\n # input shape which is required for TPUs.\n drop_remainder = self._distribution_strategy.require_static_shapes\n if y is not None:\n var_x = distributed_training_utils.get_var_for_numpy(\n self._distribution_strategy, x)\n var_y = distributed_training_utils.get_var_for_numpy(\n self._distribution_strategy, y)\n if sample_weight is not None:\n var_sample_weights = distributed_training_utils.get_var_for_numpy(\n self._distribution_strategy, sample_weight)\n\n x = dataset_ops.Dataset.from_tensor_slices((var_x, var_y,\n var_sample_weights))\n else:\n x = dataset_ops.Dataset.from_tensor_slices((var_x, var_y))\n\n x = dataset_ops.Dataset.from_tensor_slices((var_x, var_y))\n if shuffle:\n # 1024 is a good buffer size since it is much larger than the average\n # batch size provided by the user and provides sufficient randomness.\n # One thing to keep in mind is the memory usage based on the size of\n # each sample.\n x = x.shuffle(1024)\n x = x.repeat()\n x = x.batch(batch_size, drop_remainder=drop_remainder)\n y = None\n sample_weight = None\n else:\n # This case is for the predict call where the dataset only contains\n # inputs and no targets, i.e. it does not return a tuple\n var_x = distributed_training_utils.get_var_for_numpy(\n self._distribution_strategy, x)\n x = dataset_ops.Dataset.from_tensor_slices(var_x)\n x = x.repeat()\n x = x.batch(batch_size, drop_remainder=drop_remainder)\n\n assert isinstance(x, dataset_ops.Dataset)\n\n # TODO(anjalisridhar): We want distribute_dataset() to accept a Dataset or a\n # function which returns a Dataset. Currently distribute_dataset() only\n # accepts a function that returns a Dataset. Once we add support for being\n # able to clone a Dataset on multiple workers we can remove this lambda.\n result = self._distribution_strategy.distribute_dataset(lambda: x)\n iterator = result.make_initializable_iterator()\n with self._distribution_strategy.scope():\n K.get_session().run(iterator.initializer)\n\n training_utils.validate_iterator_input(x, y, sample_weight,\n validation_split)\n return iterator\n\n def _standardize_user_data(self,\n x,\n y=None,\n sample_weight=None,\n class_weight=None,\n batch_size=None,\n check_steps=False,\n steps_name='steps',\n steps=None,\n validation_split=0,\n shuffle=False):\n \"\"\"Runs validation checks on input and target data passed by the user.\n\n Also standardizes the data to lists of arrays, in order.\n\n Also builds and compiles the model on the fly if it is a subclassed model\n that has never been called before (and thus has no inputs/outputs).\n\n This is a purely internal method, subject to refactoring at any time.\n\n Args:\n x: Input data. It could be:\n - A Numpy array (or array-like), or a list of arrays\n (in case the model has multiple inputs).\n - A TensorFlow tensor, or a list of tensors\n (in case the model has multiple inputs).\n - A dict mapping input names to the corresponding array/tensors,\n if the model has named inputs.\n - A `tf.data` dataset or a dataset iterator.\n y: Target data. Like the input data `x`,\n it could be either Numpy array(s) or TensorFlow tensor(s).\n It should be consistent with `x` (you cannot have Numpy inputs and\n tensor targets, or inversely). If `x` is a dataset or a\n dataset iterator, `y` should not be specified\n (since targets will be obtained from the iterator).\n sample_weight: An optional sample-weight array passed by the user to\n weight the importance of each sample in `x`.\n class_weight: An optional class-weight array by the user to\n weight the importance of samples in `x` based on the class they belong\n to, as conveyed by `y`.\n batch_size: Integer batch size. If provided, it is used to run additional\n validation checks on stateful models.\n check_steps: boolean, True if we want to check for validity of `steps` and\n False, otherwise. For example, when we are standardizing one batch of\n data for train_on_batch/predict_on_batch/test_on_batch APIs, `steps`\n value is not required and we should not check for its validity in these\n cases.\n steps_name: The public API's parameter name for `steps`.\n steps: Integer or `None`. Total number of steps (batches of samples) to\n execute.\n validation_split: Float between 0 and 1.\n Fraction of the training data to be used as validation data.\n shuffle: Boolean whether to shuffle the training data before each epoch.\n\n Returns:\n A tuple of 3: inputs (arrays or dicts, depending on whether `x` was a dict\n or not), target arrays, sample-weight arrays.\n If the model's input and targets are symbolic, these lists are empty\n (since the model takes no user-provided data, instead the data comes\n from the symbolic inputs/targets).\n\n Raises:\n ValueError: In case of invalid user-provided data.\n RuntimeError: If the model was never compiled.\n \"\"\"\n if self._distribution_strategy:\n iterator = self._distribution_standardize_user_data(\n x,\n y,\n sample_weight=sample_weight,\n class_weight=class_weight,\n batch_size=batch_size,\n check_steps=check_steps,\n steps_name=steps_name,\n steps=steps,\n validation_split=validation_split,\n shuffle=shuffle)\n return iterator, None, None\n\n if isinstance(x, dataset_ops.Dataset):\n if context.executing_eagerly():\n x = x.make_one_shot_iterator()\n else:\n if x in self._dataset_iterator_cache:\n x = self._dataset_iterator_cache[x]\n else:\n iterator = x.make_initializable_iterator()\n self._dataset_iterator_cache[x] = iterator\n x = iterator\n K.get_session().run(x.initializer)\n\n # Validates `steps` argument based on x's type.\n if check_steps:\n training_utils.check_steps_argument(x, steps, steps_name)\n\n is_x_eager_iterator = isinstance(x, iterator_ops.EagerIterator)\n is_x_iterator = isinstance(x, iterator_ops.Iterator)\n\n # Validate user inputs when data is given as a dataset or dataset iterator.\n if is_x_iterator or is_x_eager_iterator:\n training_utils.validate_iterator_input(x, y, sample_weight,\n validation_split)\n\n # For eager iterators, when we have to process multiple batches of samples,\n # we will standardize the data when we actually loop over iterator and get\n # the batches. For now, we just return the iterator as is.\n if is_x_eager_iterator and steps is not None:\n return x, y, sample_weight\n\n # If input data is a dataset iterator in graph mode or if it is an eager\n # iterator and only one batch of samples is required, we fetch the data\n # tensors from the iterator and then standardize them.\n if is_x_iterator or is_x_eager_iterator:\n try:\n if is_x_iterator:\n next_element = self._get_iterator_get_next_tensors(x)\n else:\n next_element = x.get_next()\n except errors.OutOfRangeError:\n raise RuntimeError('Your dataset iterator ran out of data; '\n 'Make sure that your dataset can generate '\n 'required number of samples.')\n\n if isinstance(next_element, (list, tuple)):\n if len(next_element) not in [2, 3]:\n raise ValueError(\n 'Please provide model inputs as a list or tuple of 2 or 3'\n 'elements: (input, target) or (input, target, sample_weights)'\n 'Received %s' % next_element)\n if len(next_element) == 2:\n x, y = next_element\n else:\n x, y, sample_weight = next_element\n else:\n x = next_element\n x, y, sample_weights = self._standardize_weights(x, y, sample_weight,\n class_weight, batch_size)\n return x, y, sample_weights\n\n def _standardize_weights(self, x, y, sample_weight=None, class_weight=None,\n batch_size=None,):\n # TODO(sourabhbajaj): Split input validation from weight standardization.\n if sample_weight is not None and class_weight is not None:\n logging.warning(\n 'Received both a `sample_weight` and `class_weight` argument. '\n 'The `class_weight` argument will be ignored.')\n # First, we build/compile the model on the fly if necessary.\n all_inputs = []\n is_build_called = False\n is_compile_called = False\n dict_inputs = False\n if not self.inputs:\n # We need to use `x` to set the model inputs.\n # We type-check that `x` and `y` are either single arrays\n # or lists of arrays.\n if isinstance(x, (list, tuple)):\n if not all(isinstance(v, np.ndarray) or\n tensor_util.is_tensor(v) for v in x):\n raise ValueError('Please provide as model inputs either a single '\n 'array or a list of arrays. You passed: x=' + str(x))\n all_inputs += list(x)\n elif isinstance(x, dict):\n dict_inputs = True\n keys = sorted(x.keys())\n all_inputs = [x[k] for k in keys]\n else:\n if not isinstance(x, np.ndarray) and not tensor_util.is_tensor(x):\n raise ValueError('Please provide as model inputs either a single '\n 'array or a list of arrays. You passed: x=' + str(x))\n all_inputs.append(x)\n\n # Build the model using the retrieved inputs (value or symbolic).\n # If values, then in symbolic-mode placeholders will be created\n # to match the value shapes.\n if not self.inputs:\n is_build_called = True\n self._set_inputs(x)\n else:\n dict_inputs = isinstance(self.inputs, dict)\n\n if y is not None:\n if not self.optimizer:\n raise RuntimeError('You must compile a model before '\n 'training/testing. '\n 'Use `model.compile(optimizer, loss)`.')\n if not self._is_compiled:\n # On-the-fly compilation of the model.\n # We need to use `y` to set the model targets.\n if isinstance(y, (list, tuple)):\n if not all(isinstance(v, np.ndarray) or\n tensor_util.is_tensor(v) for v in y):\n raise ValueError('Please provide as model targets either a single '\n 'array or a list of arrays. '\n 'You passed: y=' + str(y))\n all_inputs += list(y)\n elif isinstance(y, dict):\n raise ValueError('Please do not pass a dictionary as model targets.')\n else:\n if not isinstance(y, np.ndarray) and not tensor_util.is_tensor(y):\n raise ValueError('Please provide as model targets either a single '\n 'array or a list of arrays. '\n 'You passed: y=' + str(y))\n all_inputs.append(y)\n\n # Typecheck that all inputs are *either* value *or* symbolic.\n # TODO(fchollet): this check could be removed in Eager mode?\n if any(tensor_util.is_tensor(v) for v in all_inputs):\n if not all(tensor_util.is_tensor(v) for v in all_inputs):\n raise ValueError('Do not pass inputs that mix Numpy arrays and '\n 'TensorFlow tensors. '\n 'You passed: x=' + str(x) + '; y=' + str(y))\n\n if context.executing_eagerly():\n target_tensors = None\n else:\n # Handle target tensors if any passed.\n if not isinstance(y, (list, tuple)):\n y = [y]\n target_tensors = [v for v in y if tensor_util.is_tensor(v)]\n is_compile_called = True\n self.compile(optimizer=self.optimizer,\n loss=self.loss,\n metrics=self.metrics,\n loss_weights=self.loss_weights,\n target_tensors=target_tensors)\n\n # In graph mode, if we had just set inputs and targets as symbolic tensors\n # by invoking build and compile on the model respectively, we do not have to\n # feed anything to the model. Model already has input and target data as\n # part of the graph.\n # Note: in this case, `any` and `all` are equivalent since we disallow\n # mixed symbolic/value inputs.\n if (not context.executing_eagerly() and is_build_called and\n is_compile_called and\n any(tensor_util.is_tensor(v) for v in all_inputs)):\n return [], [], []\n\n # What follows is input validation and standardization to list format,\n # in the case where all inputs are value arrays.\n\n if context.executing_eagerly():\n # In eager mode, do not do shape validation\n # since the network has no input nodes (placeholders) to be fed.\n feed_input_names = self.input_names\n feed_input_shapes = None\n elif not self._is_graph_network:\n # Case: symbolic-mode subclassed network. Do not do shape validation.\n feed_input_names = self._feed_input_names\n feed_input_shapes = None\n else:\n # Case: symbolic-mode graph network.\n # In this case, we run extensive shape validation checks.\n feed_input_names = self._feed_input_names\n feed_input_shapes = self._feed_input_shapes\n\n # Standardize the inputs.\n x = training_utils.standardize_input_data(\n x,\n feed_input_names,\n feed_input_shapes,\n check_batch_axis=False, # Don't enforce the batch size.\n exception_prefix='input')\n\n if y is not None:\n if not self._is_graph_network:\n feed_output_names = self._feed_output_names\n feed_output_shapes = None\n # Sample weighting not supported in this case.\n # TODO(fchollet): consider supporting it.\n feed_sample_weight_modes = [None for _ in self.outputs]\n else:\n feed_output_names = self._feed_output_names\n feed_sample_weight_modes = self._feed_sample_weight_modes\n feed_output_shapes = []\n for output_shape, loss_fn in zip(self._feed_output_shapes,\n self._feed_loss_fns):\n if loss_fn is losses.sparse_categorical_crossentropy:\n if K.image_data_format() == 'channels_first':\n feed_output_shapes.append(\n (output_shape[0], 1) + output_shape[2:])\n else:\n feed_output_shapes.append(output_shape[:-1] + (1,))\n elif (not hasattr(loss_fn, '__name__') or\n getattr(losses, loss_fn.__name__, None) is None):\n # If `loss_fn` is not a function (e.g. callable class)\n # or if it not in the `losses` module, then\n # it is a user-defined loss and we make no assumptions\n # about it.\n feed_output_shapes.append(None)\n else:\n feed_output_shapes.append(output_shape)\n\n # Standardize the outputs.\n y = training_utils.standardize_input_data(\n y,\n feed_output_names,\n feed_output_shapes,\n check_batch_axis=False, # Don't enforce the batch size.\n exception_prefix='target')\n\n # Generate sample-wise weight values given the `sample_weight` and\n # `class_weight` arguments.\n sample_weights = training_utils.standardize_sample_weights(\n sample_weight, feed_output_names)\n class_weights = training_utils.standardize_class_weights(\n class_weight, feed_output_names)\n sample_weights = [\n training_utils.standardize_weights(ref, sw, cw, mode)\n for (ref, sw, cw, mode) in zip(y, sample_weights, class_weights,\n feed_sample_weight_modes)\n ]\n # Check that all arrays have the same length.\n if not self._distribution_strategy:\n training_utils.check_array_lengths(x, y, sample_weights)\n if self._is_graph_network and not context.executing_eagerly():\n # Additional checks to avoid users mistakenly using improper loss fns.\n training_utils.check_loss_and_target_compatibility(\n y, self._feed_loss_fns, feed_output_shapes)\n else:\n y = []\n sample_weights = []\n\n if self.stateful and batch_size:\n # Check that for stateful networks, number of samples is a multiple\n # of the static batch size.\n if x[0].shape[0] % batch_size != 0:\n raise ValueError('In a stateful network, '\n 'you should only pass inputs with '\n 'a number of samples that can be '\n 'divided by the batch size. Found: ' +\n str(x[0].shape[0]) + ' samples')\n\n # If dictionary inputs were provided, we return a dictionary as well.\n if dict_inputs:\n x = dict(zip(feed_input_names, x))\n return x, y, sample_weights\n\n @checkpointable.no_automatic_dependency_tracking\n def _set_inputs(self, inputs, outputs=None, training=None):\n \"\"\"Set model's input and output specs based on the input data received.\n\n This is to be used for Model subclasses, which do not know at instantiation\n time what their inputs look like.\n\n Args:\n inputs: Single array, or list of arrays. The arrays could be placeholders,\n Numpy arrays, or data tensors.\n - if placeholders: the model is built on top of these placeholders,\n and we expect Numpy data to be fed for them when calling `fit`/etc.\n - if Numpy data: we create placeholders matching the shape of the Numpy\n arrays. We expect Numpy data to be fed for these placeholders\n when calling `fit`/etc.\n - if data tensors: the model is built on top of these tensors.\n We do not expect any Numpy data to be provided when calling `fit`/etc.\n outputs: None, a data tensor, or a list of tensors. If None, the\n outputs will be determined by invoking `self.call()`, otherwise the\n provided value will be used.\n training: Boolean or None. Only relevant in symbolic mode. Specifies\n whether to build the model's graph in inference mode (False), training\n mode (True), or using the Keras learning phase (None).\n Raises:\n ValueError: If dict inputs are passed to a Sequential Model where the\n first layer isn't FeatureLayer.\n \"\"\"\n if self.inputs:\n raise ValueError('Model inputs are already set.')\n\n if self.__class__.__name__ == 'Sequential' and not self.built:\n if tensor_util.is_tensor(inputs):\n input_shape = (None,) + tuple(inputs.get_shape().as_list()[1:])\n self.build(input_shape=input_shape)\n elif isinstance(inputs, dict):\n # We assert that the first layer is a FeatureLayer.\n if not training_utils.is_feature_layer(self.layers[0]):\n raise ValueError('Passing a dictionary input to a Sequential Model '\n 'which doesnt have FeatureLayer as the first layer '\n 'is an error')\n input_shape = (None,)\n self.build(input_shape=input_shape)\n else:\n input_shape = (None,) + inputs.shape[1:]\n self.build(input_shape=input_shape)\n\n # On-the-fly setting of symbolic model inputs (either by using the tensor\n # provided, or by creating a placeholder if Numpy data was provided).\n model_inputs = training_utils.ModelInputs(inputs)\n inputs = model_inputs.get_symbolic_inputs()\n self.inputs = model_inputs.get_symbolic_inputs(return_single_as_list=True)\n self.input_names = model_inputs.get_input_names()\n\n self._feed_inputs = []\n self._feed_input_names = []\n self._feed_input_shapes = []\n\n for k, v in model_inputs.as_dict():\n if K.is_placeholder(v):\n self._feed_inputs.append(v)\n self._feed_input_names.append(k)\n self._feed_input_shapes.append(K.int_shape(v))\n\n if outputs is None:\n # Obtain symbolic outputs by calling the model.\n graph = K.get_graph()\n with graph.as_default():\n if self._expects_training_arg:\n outputs = self.call(inputs, training=training)\n else:\n outputs = self.call(inputs)\n\n outputs = nest.flatten(outputs)\n self.outputs = outputs\n self.output_names = [\n 'output_%d' % (i + 1) for i in range(len(self.outputs))]\n self.built = True\n\n def fit(self,\n x=None,\n y=None,\n batch_size=None,\n epochs=1,\n verbose=1,\n callbacks=None,\n validation_split=0.,\n validation_data=None,\n shuffle=True,\n class_weight=None,\n sample_weight=None,\n initial_epoch=0,\n steps_per_epoch=None,\n validation_steps=None,\n max_queue_size=10,\n workers=1,\n use_multiprocessing=False,\n **kwargs):\n \"\"\"Trains the model for a fixed number of epochs (iterations on a dataset).\n\n Arguments:\n x: Input data. It could be:\n - A Numpy array (or array-like), or a list of arrays\n (in case the model has multiple inputs).\n - A TensorFlow tensor, or a list of tensors\n (in case the model has multiple inputs).\n - A dict mapping input names to the corresponding array/tensors,\n if the model has named inputs.\n - A `tf.data` dataset or a dataset iterator. Should return a tuple\n of either `(inputs, targets)` or\n `(inputs, targets, sample_weights)`.\n - A generator or `keras.utils.Sequence` returning `(inputs, targets)`\n or `(inputs, targets, sample weights)`.\n y: Target data. Like the input data `x`,\n it could be either Numpy array(s) or TensorFlow tensor(s).\n It should be consistent with `x` (you cannot have Numpy inputs and\n tensor targets, or inversely). If `x` is a dataset, dataset\n iterator, generator, or `keras.utils.Sequence` instance, `y` should\n not be specified (since targets will be obtained from `x`).\n batch_size: Integer or `None`.\n Number of samples per gradient update.\n If unspecified, `batch_size` will default to 32.\n Do not specify the `batch_size` if your data is in the\n form of symbolic tensors, dataset, dataset iterators,\n generators, or `keras.utils.Sequence` instances (since they generate\n batches).\n epochs: Integer. Number of epochs to train the model.\n An epoch is an iteration over the entire `x` and `y`\n data provided.\n Note that in conjunction with `initial_epoch`,\n `epochs` is to be understood as \"final epoch\".\n The model is not trained for a number of iterations\n given by `epochs`, but merely until the epoch\n of index `epochs` is reached.\n verbose: Integer. 0, 1, or 2. Verbosity mode.\n 0 = silent, 1 = progress bar, 2 = one line per epoch.\n callbacks: List of `keras.callbacks.Callback` instances.\n List of callbacks to apply during training.\n See [callbacks](/api_docs/python/tf/keras/callbacks).\n validation_split: Float between 0 and 1.\n Fraction of the training data to be used as validation data.\n The model will set apart this fraction of the training data,\n will not train on it, and will evaluate\n the loss and any model metrics\n on this data at the end of each epoch.\n The validation data is selected from the last samples\n in the `x` and `y` data provided, before shuffling. This argument is\n not supported when `x` is a dataset, dataset iterator, generator or\n `keras.utils.Sequence` instance.\n validation_data: Data on which to evaluate\n the loss and any model metrics at the end of each epoch.\n The model will not be trained on this data.\n `validation_data` will override `validation_split`.\n `validation_data` could be:\n - tuple `(x_val, y_val)` of Numpy arrays or tensors\n - tuple `(x_val, y_val, val_sample_weights)` of Numpy arrays\n - dataset or a dataset iterator\n For the first two cases, `batch_size` must be provided.\n For the last case, `validation_steps` must be provided.\n shuffle: Boolean (whether to shuffle the training data\n before each epoch) or str (for 'batch').\n 'batch' is a special option for dealing with the\n limitations of HDF5 data; it shuffles in batch-sized chunks.\n Has no effect when `steps_per_epoch` is not `None`.\n class_weight: Optional dictionary mapping class indices (integers)\n to a weight (float) value, used for weighting the loss function\n (during training only).\n This can be useful to tell the model to\n \"pay more attention\" to samples from\n an under-represented class.\n sample_weight: Optional Numpy array of weights for\n the training samples, used for weighting the loss function\n (during training only). You can either pass a flat (1D)\n Numpy array with the same length as the input samples\n (1:1 mapping between weights and samples),\n or in the case of temporal data,\n you can pass a 2D array with shape\n `(samples, sequence_length)`,\n to apply a different weight to every timestep of every sample.\n In this case you should make sure to specify\n `sample_weight_mode=\"temporal\"` in `compile()`. This argument is not\n supported when `x` is a dataset, dataset iterator, generator, or\n `keras.utils.Sequence` instance, instead provide the sample_weights\n as the third element of `x`.\n initial_epoch: Integer.\n Epoch at which to start training\n (useful for resuming a previous training run).\n steps_per_epoch: Integer or `None`.\n Total number of steps (batches of samples)\n before declaring one epoch finished and starting the\n next epoch. When training with input tensors such as\n TensorFlow data tensors, the default `None` is equal to\n the number of samples in your dataset divided by\n the batch size, or 1 if that cannot be determined.\n validation_steps: Only relevant if `validation_data` is provided and\n is a dataset or dataset iterator. Total number of steps (batches of\n samples) to draw before stopping when performing validation\n at the end of every epoch.\n max_queue_size: Integer. Used for generator or `keras.utils.Sequence`\n input only. Maximum size for the generator queue.\n If unspecified, `max_queue_size` will default to 10.\n workers: Integer. Used for generator or `keras.utils.Sequence` input\n only. Maximum number of processes to spin up\n when using process-based threading. If unspecified, `workers`\n will default to 1. If 0, will execute the generator on the main\n thread.\n use_multiprocessing: Boolean. Used for generator or\n `keras.utils.Sequence` input only. If `True`, use process-based\n threading. If unspecified, `use_multiprocessing` will default to\n `False`. Note that because this implementation relies on\n multiprocessing, you should not pass non-picklable arguments to\n the generator as they can't be passed easily to children processes.\n **kwargs: Used for backwards compatibility.\n\n Returns:\n A `History` object. Its `History.history` attribute is\n a record of training loss values and metrics values\n at successive epochs, as well as validation loss values\n and validation metrics values (if applicable).\n\n Raises:\n RuntimeError: If the model was never compiled.\n ValueError: In case of mismatch between the provided input data\n and what the model expects.\n \"\"\"\n # TODO(fchollet): this method may be creating reference cycles, which would\n # lead to accumulating garbage in memory when called in a loop. Investigate.\n\n if data_utils.is_generator_or_sequence(x):\n training_utils.check_generator_arguments(y, sample_weight)\n return self.fit_generator(\n x,\n steps_per_epoch=steps_per_epoch,\n epochs=epochs,\n verbose=verbose,\n callbacks=callbacks,\n validation_data=validation_data,\n validation_steps=validation_steps,\n class_weight=class_weight,\n max_queue_size=max_queue_size,\n workers=workers,\n use_multiprocessing=use_multiprocessing,\n shuffle=shuffle,\n initial_epoch=initial_epoch)\n\n # Backwards compatibility\n if batch_size is None and steps_per_epoch is None:\n batch_size = 32\n # Legacy support\n if 'nb_epoch' in kwargs:\n logging.warning(\n 'The `nb_epoch` argument in `fit` '\n 'has been renamed `epochs`.')\n epochs = kwargs.pop('nb_epoch')\n if kwargs:\n raise TypeError('Unrecognized keyword arguments: ' + str(kwargs))\n\n # Validate and standardize user data.\n if self._distribution_strategy:\n distributed_training_utils.validate_callbacks(callbacks)\n\n distributed_training_utils.validate_inputs(\n x, y, self._distribution_strategy)\n\n first_x_value = nest.flatten(x)[0]\n if not steps_per_epoch and isinstance(first_x_value, np.ndarray):\n steps_per_epoch = distributed_training_utils.get_input_batch_params(\n first_x_value, batch_size, self._distribution_strategy)\n\n x, y, sample_weights = self._standardize_user_data(\n x,\n y,\n sample_weight=sample_weight,\n class_weight=class_weight,\n batch_size=batch_size,\n check_steps=True,\n steps_name='steps_per_epoch',\n steps=steps_per_epoch,\n validation_split=validation_split,\n shuffle=shuffle)\n\n # Prepare validation data.\n if validation_data:\n if (isinstance(validation_data, iterator_ops.Iterator) or\n isinstance(validation_data, iterator_ops.EagerIterator) or\n isinstance(validation_data, dataset_ops.Dataset)):\n val_x = validation_data\n val_y = None\n val_sample_weight = None\n elif len(validation_data) == 2:\n val_x, val_y = validation_data # pylint: disable=unpacking-non-sequence\n val_sample_weight = None\n elif len(validation_data) == 3:\n val_x, val_y, val_sample_weight = validation_data # pylint: disable=unpacking-non-sequence\n else:\n raise ValueError(\n 'When passing a `validation_data` argument, '\n 'it must contain either 2 items (x_val, y_val), '\n 'or 3 items (x_val, y_val, val_sample_weights), '\n 'or alternatively it could be a dataset or a '\n 'dataset or a dataset iterator. '\n 'However we received `validation_data=%s`' % validation_data)\n\n # Validate and standardize validation data.\n if self._distribution_strategy:\n distributed_training_utils.validate_inputs(\n val_x, val_y, self._distribution_strategy)\n first_valx_value = nest.flatten(val_x)[0]\n if not validation_steps and isinstance(first_valx_value, np.ndarray):\n validation_steps = distributed_training_utils.get_input_batch_params(\n first_valx_value, batch_size, self._distribution_strategy)\n\n val_x, val_y, val_sample_weights = self._standardize_user_data(\n val_x,\n val_y,\n sample_weight=val_sample_weight,\n batch_size=batch_size,\n steps=validation_steps)\n\n elif validation_split and 0. < validation_split < 1.:\n if training_utils.has_symbolic_tensors(x):\n raise ValueError('If your data is in the form of symbolic tensors, '\n 'you cannot use `validation_split`.')\n if hasattr(x[0], 'shape'):\n split_at = int(x[0].shape[0] * (1. - validation_split))\n else:\n split_at = int(len(x[0]) * (1. - validation_split))\n x, val_x = (slice_arrays(x, 0, split_at), slice_arrays(x, split_at))\n y, val_y = (slice_arrays(y, 0, split_at), slice_arrays(y, split_at))\n sample_weights, val_sample_weights = (slice_arrays(\n sample_weights, 0, split_at), slice_arrays(sample_weights, split_at))\n elif validation_steps:\n val_x = []\n val_y = []\n val_sample_weights = []\n else:\n val_x = None\n val_y = None\n val_sample_weights = None\n\n if context.executing_eagerly():\n return training_eager.fit_loop(\n self,\n inputs=x,\n targets=y,\n sample_weights=sample_weights,\n class_weight=class_weight,\n batch_size=batch_size,\n epochs=epochs,\n verbose=verbose,\n callbacks=callbacks,\n val_inputs=val_x,\n val_targets=val_y,\n val_sample_weights=val_sample_weights,\n shuffle=shuffle,\n initial_epoch=initial_epoch,\n steps_per_epoch=steps_per_epoch,\n validation_steps=validation_steps)\n elif self._distribution_strategy:\n return training_distributed.fit_loop(\n self, x,\n epochs=epochs,\n verbose=verbose,\n callbacks=callbacks,\n val_iterator=val_x,\n initial_epoch=initial_epoch,\n steps_per_epoch=steps_per_epoch,\n validation_steps=validation_steps)\n else:\n return training_arrays.fit_loop(\n self, x, y,\n sample_weights=sample_weights,\n batch_size=batch_size,\n epochs=epochs,\n verbose=verbose,\n callbacks=callbacks,\n val_inputs=val_x,\n val_targets=val_y,\n val_sample_weights=val_sample_weights,\n shuffle=shuffle,\n initial_epoch=initial_epoch,\n steps_per_epoch=steps_per_epoch,\n validation_steps=validation_steps)\n\n def evaluate(self,\n x=None,\n y=None,\n batch_size=None,\n verbose=1,\n sample_weight=None,\n steps=None,\n max_queue_size=10,\n workers=1,\n use_multiprocessing=False):\n \"\"\"Returns the loss value & metrics values for the model in test mode.\n\n Computation is done in batches.\n\n Arguments:\n x: Input data. It could be:\n - A Numpy array (or array-like), or a list of arrays\n (in case the model has multiple inputs).\n - A TensorFlow tensor, or a list of tensors\n (in case the model has multiple inputs).\n - A dict mapping input names to the corresponding array/tensors,\n if the model has named inputs.\n - A `tf.data` dataset or a dataset iterator.\n - A generator or `keras.utils.Sequence` instance.\n y: Target data. Like the input data `x`,\n it could be either Numpy array(s) or TensorFlow tensor(s).\n It should be consistent with `x` (you cannot have Numpy inputs and\n tensor targets, or inversely).\n If `x` is a dataset, dataset iterator, generator or\n `keras.utils.Sequence` instance, `y` should not be specified (since\n targets will be obtained from the iterator/dataset).\n batch_size: Integer or `None`.\n Number of samples per gradient update.\n If unspecified, `batch_size` will default to 32.\n Do not specify the `batch_size` is your data is in the\n form of symbolic tensors, dataset, dataset iterators,\n generators, or `keras.utils.Sequence` instances (since they generate\n batches).\n verbose: 0 or 1. Verbosity mode.\n 0 = silent, 1 = progress bar.\n sample_weight: Optional Numpy array of weights for\n the test samples, used for weighting the loss function.\n You can either pass a flat (1D)\n Numpy array with the same length as the input samples\n (1:1 mapping between weights and samples),\n or in the case of temporal data,\n you can pass a 2D array with shape\n `(samples, sequence_length)`,\n to apply a different weight to every timestep of every sample.\n In this case you should make sure to specify\n `sample_weight_mode=\"temporal\"` in `compile()`. This argument is not\n supported when `x` is a dataset or a dataset iterator, instead pass\n sample weights as the third element of `x`.\n steps: Integer or `None`.\n Total number of steps (batches of samples)\n before declaring the evaluation round finished.\n Ignored with the default value of `None`.\n max_queue_size: Integer. Used for generator or `keras.utils.Sequence`\n input only. Maximum size for the generator queue.\n If unspecified, `max_queue_size` will default to 10.\n workers: Integer. Used for generator or `keras.utils.Sequence` input\n only. Maximum number of processes to spin up when using\n process-based threading. If unspecified, `workers` will default\n to 1. If 0, will execute the generator on the main thread.\n use_multiprocessing: Boolean. Used for generator or\n `keras.utils.Sequence` input only. If `True`, use process-based\n threading. If unspecified, `use_multiprocessing` will default to\n `False`. Note that because this implementation relies on\n multiprocessing, you should not pass non-picklable arguments to\n the generator as they can't be passed easily to children processes.\n\n Returns:\n Scalar test loss (if the model has a single output and no metrics)\n or list of scalars (if the model has multiple outputs\n and/or metrics). The attribute `model.metrics_names` will give you\n the display labels for the scalar outputs.\n\n Raises:\n ValueError: in case of invalid arguments.\n \"\"\"\n if data_utils.is_generator_or_sequence(x):\n training_utils.check_generator_arguments(y, sample_weight)\n return self.evaluate_generator(\n x,\n steps=steps,\n verbose=verbose,\n max_queue_size=max_queue_size,\n workers=workers,\n use_multiprocessing=use_multiprocessing)\n\n # Backwards compatibility.\n if batch_size is None and steps is None:\n batch_size = 32\n\n # Validate and standardize user data.\n if self._distribution_strategy:\n distributed_training_utils.validate_inputs(\n x, y, self._distribution_strategy)\n first_x_value = nest.flatten(x)[0]\n if isinstance(first_x_value, np.ndarray) and not steps:\n steps = distributed_training_utils.get_input_batch_params(\n first_x_value, batch_size, self._distribution_strategy)\n\n x, y, sample_weights = self._standardize_user_data(\n x,\n y,\n sample_weight=sample_weight,\n batch_size=batch_size,\n check_steps=True,\n steps_name='steps',\n steps=steps)\n\n if context.executing_eagerly():\n return training_eager.test_loop(\n self,\n inputs=x,\n targets=y,\n sample_weights=sample_weights,\n batch_size=batch_size,\n verbose=verbose,\n steps=steps)\n elif self._distribution_strategy:\n return training_distributed.test_loop(\n self,\n iterator=x,\n verbose=verbose,\n steps=steps)\n else:\n return training_arrays.test_loop(\n self,\n inputs=x,\n targets=y,\n sample_weights=sample_weights,\n batch_size=batch_size,\n verbose=verbose,\n steps=steps)\n\n def predict(self,\n x,\n batch_size=None,\n verbose=0,\n steps=None,\n max_queue_size=10,\n workers=1,\n use_multiprocessing=False):\n \"\"\"Generates output predictions for the input samples.\n\n Computation is done in batches.\n\n Arguments:\n x: Input samples. It could be:\n - A Numpy array (or array-like), or a list of arrays\n (in case the model has multiple inputs).\n - A TensorFlow tensor, or a list of tensors\n (in case the model has multiple inputs).\n - A `tf.data` dataset or a dataset iterator.\n - A generator or `keras.utils.Sequence` instance.\n batch_size: Integer or `None`.\n Number of samples per gradient update.\n If unspecified, `batch_size` will default to 32.\n Do not specify the `batch_size` is your data is in the\n form of symbolic tensors, dataset, dataset iterators,\n generators, or `keras.utils.Sequence` instances (since they generate\n batches).\n verbose: Verbosity mode, 0 or 1.\n steps: Total number of steps (batches of samples)\n before declaring the prediction round finished.\n Ignored with the default value of `None`.\n max_queue_size: Integer. Used for generator or `keras.utils.Sequence`\n input only. Maximum size for the generator queue.\n If unspecified, `max_queue_size` will default to 10.\n workers: Integer. Used for generator or `keras.utils.Sequence` input\n only. Maximum number of processes to spin up when using\n process-based threading. If unspecified, `workers` will default\n to 1. If 0, will execute the generator on the main thread.\n use_multiprocessing: Boolean. Used for generator or\n `keras.utils.Sequence` input only. If `True`, use process-based\n threading. If unspecified, `use_multiprocessing` will default to\n `False`. Note that because this implementation relies on\n multiprocessing, you should not pass non-picklable arguments to\n the generator as they can't be passed easily to children processes.\n\n\n Returns:\n Numpy array(s) of predictions.\n\n Raises:\n ValueError: In case of mismatch between the provided\n input data and the model's expectations,\n or in case a stateful model receives a number of samples\n that is not a multiple of the batch size.\n \"\"\"\n if data_utils.is_generator_or_sequence(x):\n return self.predict_generator(\n x,\n steps=steps,\n verbose=verbose,\n max_queue_size=max_queue_size,\n workers=workers,\n use_multiprocessing=use_multiprocessing)\n\n # Backwards compatibility.\n if batch_size is None and steps is None:\n batch_size = 32\n\n if self._distribution_strategy:\n # Turn off prefetching since this is currently not deterministic. Once\n # b/112498930 is fixed we can turn it back on.\n # `_prefetch_on_device` is currently a property of only\n # `MirroredStrategy`.\n if hasattr(self._distribution_strategy, '_prefetch_on_device'):\n self._distribution_strategy._prefetch_on_device = False # pylint: disable=protected-access\n distributed_training_utils.validate_inputs(\n x, None, self._distribution_strategy)\n first_x_value = nest.flatten(x)[0]\n if isinstance(first_x_value, np.ndarray) and not steps:\n steps = distributed_training_utils.get_input_batch_params(\n first_x_value, batch_size, self._distribution_strategy)\n\n # Validate and standardize user data.\n # TODO(anjalisridhar): We don't pass batch_size here for some reason. This\n # means that we end up calculating it twice which we should avoid.\n x, _, _ = self._standardize_user_data(\n x, check_steps=True, steps_name='steps', steps=steps)\n\n if context.executing_eagerly():\n return training_eager.predict_loop(\n self, x, batch_size=batch_size, verbose=verbose, steps=steps)\n elif self._distribution_strategy:\n results = training_distributed.predict_loop(\n self, x, verbose=verbose, steps=steps)\n # Turn prefetching back on since we turned it off previously.\n if hasattr(self._distribution_strategy, '_prefetch_on_device'):\n self._distribution_strategy._prefetch_on_device = True # pylint: disable=protected-access\n return results\n else:\n return training_arrays.predict_loop(\n self, x, batch_size=batch_size, verbose=verbose, steps=steps)\n\n def train_on_batch(self, x, y=None, sample_weight=None, class_weight=None):\n \"\"\"Runs a single gradient update on a single batch of data.\n\n Arguments:\n x: Input data. It could be:\n - A Numpy array (or array-like), or a list of arrays\n (in case the model has multiple inputs).\n - A TensorFlow tensor, or a list of tensors\n (in case the model has multiple inputs).\n - A dict mapping input names to the corresponding array/tensors,\n if the model has named inputs.\n - A `tf.data` dataset or a dataset iterator.\n y: Target data. Like the input data `x`,\n it could be either Numpy array(s) or TensorFlow tensor(s).\n It should be consistent with `x` (you cannot have Numpy inputs and\n tensor targets, or inversely). If `x` is a dataset or a\n dataset iterator, `y` should not be specified\n (since targets will be obtained from the iterator).\n sample_weight: Optional array of the same length as x, containing\n weights to apply to the model's loss for each sample.\n In the case of temporal data, you can pass a 2D array\n with shape (samples, sequence_length),\n to apply a different weight to every timestep of every sample.\n In this case you should make sure to specify\n sample_weight_mode=\"temporal\" in compile(). This argument is not\n supported when `x` is a dataset or a dataset iterator.\n class_weight: Optional dictionary mapping\n class indices (integers) to\n a weight (float) to apply to the model's loss for the samples\n from this class during training.\n This can be useful to tell the model to \"pay more attention\" to\n samples from an under-represented class.\n\n Returns:\n Scalar training loss\n (if the model has a single output and no metrics)\n or list of scalars (if the model has multiple outputs\n and/or metrics). The attribute `model.metrics_names` will give you\n the display labels for the scalar outputs.\n\n Raises:\n ValueError: In case of invalid user-provided arguments.\n \"\"\"\n if self._distribution_strategy:\n raise NotImplementedError('`train_on_batch` is not supported for models '\n 'compiled with DistributionStrategy.')\n # Validate and standardize user data.\n x, y, sample_weights = self._standardize_user_data(\n x, y, sample_weight=sample_weight, class_weight=class_weight)\n\n if context.executing_eagerly():\n outputs = training_eager.train_on_batch(\n self, x, y, sample_weights=sample_weights)\n else:\n if self.uses_learning_phase and not isinstance(K.learning_phase(), int):\n ins = x + y + sample_weights + [1]\n else:\n ins = x + y + sample_weights\n\n self._make_train_function()\n outputs = self.train_function(ins)\n\n if len(outputs) == 1:\n return outputs[0]\n return outputs\n\n def test_on_batch(self, x, y=None, sample_weight=None):\n \"\"\"Test the model on a single batch of samples.\n\n Arguments:\n x: Input data. It could be:\n - A Numpy array (or array-like), or a list of arrays\n (in case the model has multiple inputs).\n - A TensorFlow tensor, or a list of tensors\n (in case the model has multiple inputs).\n - A dict mapping input names to the corresponding array/tensors,\n if the model has named inputs.\n - A `tf.data` dataset or a dataset iterator.\n y: Target data. Like the input data `x`,\n it could be either Numpy array(s) or TensorFlow tensor(s).\n It should be consistent with `x` (you cannot have Numpy inputs and\n tensor targets, or inversely). If `x` is a dataset or a\n dataset iterator, `y` should not be specified\n (since targets will be obtained from the iterator).\n sample_weight: Optional array of the same length as x, containing\n weights to apply to the model's loss for each sample.\n In the case of temporal data, you can pass a 2D array\n with shape (samples, sequence_length),\n to apply a different weight to every timestep of every sample.\n In this case you should make sure to specify\n sample_weight_mode=\"temporal\" in compile(). This argument is not\n supported when `x` is a dataset or a dataset iterator.\n\n Returns:\n Scalar test loss (if the model has a single output and no metrics)\n or list of scalars (if the model has multiple outputs\n and/or metrics). The attribute `model.metrics_names` will give you\n the display labels for the scalar outputs.\n\n Raises:\n ValueError: In case of invalid user-provided arguments.\n \"\"\"\n if self._distribution_strategy:\n raise NotImplementedError('`test_on_batch` is not supported for models '\n 'compiled with DistributionStrategy.')\n # Validate and standardize user data.\n x, y, sample_weights = self._standardize_user_data(\n x, y, sample_weight=sample_weight)\n\n if context.executing_eagerly():\n outputs = training_eager.test_on_batch(\n self, x, y, sample_weights=sample_weights)\n else:\n if self.uses_learning_phase and not isinstance(K.learning_phase(), int):\n ins = x + y + sample_weights + [0]\n else:\n ins = x + y + sample_weights\n self._make_test_function()\n outputs = self.test_function(ins)\n\n if len(outputs) == 1:\n return outputs[0]\n return outputs\n\n def predict_on_batch(self, x):\n \"\"\"Returns predictions for a single batch of samples.\n\n Arguments:\n x: Input data. It could be:\n - A Numpy array (or array-like), or a list of arrays\n (in case the model has multiple inputs).\n - A TensorFlow tensor, or a list of tensors\n (in case the model has multiple inputs).\n - A `tf.data` dataset or a dataset iterator.\n\n Returns:\n Numpy array(s) of predictions.\n\n Raises:\n ValueError: In case of mismatch between given number of inputs and\n expectations of the model.\n \"\"\"\n if self._distribution_strategy:\n raise NotImplementedError('`predict_on_batch` is not supported for '\n 'models compiled with DistributionStrategy.')\n # Validate and standardize user data.\n inputs, _, _ = self._standardize_user_data(x)\n if context.executing_eagerly():\n if (isinstance(x, iterator_ops.EagerIterator) or\n (isinstance(x, dataset_ops.Dataset) and context.executing_eagerly())):\n inputs = training_utils.cast_if_floating_dtype(inputs)\n else:\n inputs = [\n ops.convert_to_tensor(val, dtype=K.floatx()) for val in inputs\n ]\n return self(inputs) # pylint: disable=not-callable\n\n if not context.executing_eagerly():\n if self.uses_learning_phase and not isinstance(K.learning_phase(), int):\n ins = inputs + [0]\n else:\n ins = inputs\n\n self._make_predict_function()\n outputs = self.predict_function(ins)\n\n if len(outputs) == 1:\n return outputs[0]\n return outputs\n\n def fit_generator(self,\n generator,\n steps_per_epoch=None,\n epochs=1,\n verbose=1,\n callbacks=None,\n validation_data=None,\n validation_steps=None,\n class_weight=None,\n max_queue_size=10,\n workers=1,\n use_multiprocessing=False,\n shuffle=True,\n initial_epoch=0):\n \"\"\"Fits the model on data yielded batch-by-batch by a Python generator.\n\n The generator is run in parallel to the model, for efficiency.\n For instance, this allows you to do real-time data augmentation\n on images on CPU in parallel to training your model on GPU.\n\n The use of `keras.utils.Sequence` guarantees the ordering\n and guarantees the single use of every input per epoch when\n using `use_multiprocessing=True`.\n\n Arguments:\n generator: A generator or an instance of `Sequence`\n (`keras.utils.Sequence`)\n object in order to avoid duplicate data\n when using multiprocessing.\n The output of the generator must be either\n - a tuple `(inputs, targets)`\n - a tuple `(inputs, targets, sample_weights)`.\n This tuple (a single output of the generator) makes a single batch.\n Therefore, all arrays in this tuple must have the same length (equal\n to the size of this batch). Different batches may have different\n sizes.\n For example, the last batch of the epoch is commonly smaller than\n the\n others, if the size of the dataset is not divisible by the batch\n size.\n The generator is expected to loop over its data\n indefinitely. An epoch finishes when `steps_per_epoch`\n batches have been seen by the model.\n steps_per_epoch: Total number of steps (batches of samples)\n to yield from `generator` before declaring one epoch\n finished and starting the next epoch. It should typically\n be equal to the number of samples of your dataset\n divided by the batch size.\n Optional for `Sequence`: if unspecified, will use\n the `len(generator)` as a number of steps.\n epochs: Integer, total number of iterations on the data.\n verbose: Verbosity mode, 0, 1, or 2.\n callbacks: List of callbacks to be called during training.\n validation_data: This can be either\n - a generator for the validation data\n - a tuple (inputs, targets)\n - a tuple (inputs, targets, sample_weights).\n validation_steps: Only relevant if `validation_data`\n is a generator. Total number of steps (batches of samples)\n to yield from `generator` before stopping.\n Optional for `Sequence`: if unspecified, will use\n the `len(validation_data)` as a number of steps.\n class_weight: Dictionary mapping class indices to a weight\n for the class.\n max_queue_size: Integer. Maximum size for the generator queue.\n If unspecified, `max_queue_size` will default to 10.\n workers: Integer. Maximum number of processes to spin up\n when using process-based threading.\n If unspecified, `workers` will default to 1. If 0, will\n execute the generator on the main thread.\n use_multiprocessing: Boolean.\n If `True`, use process-based threading.\n If unspecified, `use_multiprocessing` will default to `False`.\n Note that because this implementation relies on multiprocessing,\n you should not pass non-picklable arguments to the generator\n as they can't be passed easily to children processes.\n shuffle: Boolean. Whether to shuffle the order of the batches at\n the beginning of each epoch. Only used with instances\n of `Sequence` (`keras.utils.Sequence`).\n Has no effect when `steps_per_epoch` is not `None`.\n initial_epoch: Epoch at which to start training\n (useful for resuming a previous training run)\n\n Returns:\n A `History` object.\n\n Example:\n\n ```python\n def generate_arrays_from_file(path):\n while 1:\n f = open(path)\n for line in f:\n # create numpy arrays of input data\n # and labels, from each line in the file\n x1, x2, y = process_line(line)\n yield ({'input_1': x1, 'input_2': x2}, {'output': y})\n f.close()\n\n model.fit_generator(generate_arrays_from_file('/my_file.txt'),\n steps_per_epoch=10000, epochs=10)\n ```\n Raises:\n ValueError: In case the generator yields data in an invalid format.\n \"\"\"\n if self._distribution_strategy:\n raise NotImplementedError('`fit_generator` is not supported for '\n 'models compiled with DistributionStrategy.')\n\n if not self.built and not self._is_graph_network:\n raise NotImplementedError(\n '`fit_generator` is not yet enabled for unbuilt Model subclasses')\n\n return training_generator.fit_generator(\n self,\n generator,\n steps_per_epoch=steps_per_epoch,\n epochs=epochs,\n verbose=verbose,\n callbacks=callbacks,\n validation_data=validation_data,\n validation_steps=validation_steps,\n class_weight=class_weight,\n max_queue_size=max_queue_size,\n workers=workers,\n use_multiprocessing=use_multiprocessing,\n shuffle=shuffle,\n initial_epoch=initial_epoch)\n\n def evaluate_generator(self,\n generator,\n steps=None,\n max_queue_size=10,\n workers=1,\n use_multiprocessing=False,\n verbose=0):\n \"\"\"Evaluates the model on a data generator.\n\n The generator should return the same kind of data\n as accepted by `test_on_batch`.\n\n Arguments:\n generator: Generator yielding tuples (inputs, targets)\n or (inputs, targets, sample_weights)\n or an instance of `keras.utils.Sequence`\n object in order to avoid duplicate data\n when using multiprocessing.\n steps: Total number of steps (batches of samples)\n to yield from `generator` before stopping.\n Optional for `Sequence`: if unspecified, will use\n the `len(generator)` as a number of steps.\n max_queue_size: maximum size for the generator queue\n workers: Integer. Maximum number of processes to spin up\n when using process-based threading.\n If unspecified, `workers` will default to 1. If 0, will\n execute the generator on the main thread.\n use_multiprocessing: Boolean.\n If `True`, use process-based threading.\n If unspecified, `use_multiprocessing` will default to `False`.\n Note that because this implementation relies on multiprocessing,\n you should not pass non-picklable arguments to the generator\n as they can't be passed easily to children processes.\n verbose: Verbosity mode, 0 or 1.\n\n Returns:\n Scalar test loss (if the model has a single output and no metrics)\n or list of scalars (if the model has multiple outputs\n and/or metrics). The attribute `model.metrics_names` will give you\n the display labels for the scalar outputs.\n\n Raises:\n ValueError: in case of invalid arguments.\n\n Raises:\n ValueError: In case the generator yields data in an invalid format.\n \"\"\"\n if self._distribution_strategy:\n raise NotImplementedError('`evaluate_generator` is not supported for '\n 'models compiled with DistributionStrategy.')\n\n if not self.built and not self._is_graph_network:\n raise NotImplementedError(\n '`evaluate_generator` is not yet enabled for '\n 'unbuilt Model subclasses')\n\n return training_generator.evaluate_generator(\n self,\n generator,\n steps=steps,\n max_queue_size=max_queue_size,\n workers=workers,\n use_multiprocessing=use_multiprocessing,\n verbose=verbose)\n\n def predict_generator(self,\n generator,\n steps=None,\n max_queue_size=10,\n workers=1,\n use_multiprocessing=False,\n verbose=0):\n \"\"\"Generates predictions for the input samples from a data generator.\n\n The generator should return the same kind of data as accepted by\n `predict_on_batch`.\n\n Arguments:\n generator: Generator yielding batches of input samples\n or an instance of `keras.utils.Sequence` object in order to\n avoid duplicate data when using multiprocessing.\n steps: Total number of steps (batches of samples)\n to yield from `generator` before stopping.\n Optional for `Sequence`: if unspecified, will use\n the `len(generator)` as a number of steps.\n max_queue_size: Maximum size for the generator queue.\n workers: Integer. Maximum number of processes to spin up\n when using process-based threading.\n If unspecified, `workers` will default to 1. If 0, will\n execute the generator on the main thread.\n use_multiprocessing: Boolean.\n If `True`, use process-based threading.\n If unspecified, `use_multiprocessing` will default to `False`.\n Note that because this implementation relies on multiprocessing,\n you should not pass non-picklable arguments to the generator\n as they can't be passed easily to children processes.\n verbose: verbosity mode, 0 or 1.\n\n Returns:\n Numpy array(s) of predictions.\n\n Raises:\n ValueError: In case the generator yields data in an invalid format.\n \"\"\"\n if self._distribution_strategy:\n raise NotImplementedError('`predict_generator` is not supported for '\n 'models compiled with DistributionStrategy.')\n\n if not self.built and not self._is_graph_network:\n raise NotImplementedError(\n '`predict_generator` is not yet enabled for unbuilt Model subclasses')\n\n return training_generator.predict_generator(\n self,\n generator,\n steps=steps,\n max_queue_size=max_queue_size,\n workers=workers,\n use_multiprocessing=use_multiprocessing,\n verbose=verbose)\n\n def _get_callback_model(self):\n \"\"\"Returns the Callback Model for this Model.\"\"\"\n\n if hasattr(self, '_replicated_model') and self._replicated_model:\n # When using training_distributed, we set the callback model\n # to an instance of the `DistributedModel` that we create in\n # the `compile` call. The `DistributedModel` is initialized\n # with the first replicated model. We need to set the callback\n # model to a DistributedModel to allow us to override saving\n # and loading weights when we checkpoint the model during training.\n return self._replicated_model\n if hasattr(self, 'callback_model') and self.callback_model:\n return self.callback_model\n return self\n\n def _make_callback_model(self, grouped_model):\n first_replicated_model = self._distribution_strategy.unwrap(\n grouped_model)[0]\n # We initialize the callback model with the first replicated model.\n self._replicated_model = DistributedCallbackModel(first_replicated_model)\n self._replicated_model.set_original_model(self)\n\n\nclass DistributedCallbackModel(Model):\n \"\"\"Model that is used for callbacks with DistributionStrategy.\"\"\"\n\n def __init__(self, model):\n super(DistributedCallbackModel, self).__init__()\n # TODO(anjalisridhar): Right now the only attributes set are the layer and\n # weights. We may need to set additional attributes as needed since we have\n # not called compile on this model.\n\n def set_original_model(self, orig_model):\n self._original_model = orig_model\n\n def save_weights(self, filepath, overwrite=True, save_format=None):\n self._replicated_model.save_weights(filepath, overwrite=overwrite,\n save_format=save_format)\n\n def save(self, filepath, overwrite=True, include_optimizer=True):\n # save weights from the distributed model to the original model\n distributed_model_weights = self.get_weights()\n self._original_model.set_weights(distributed_model_weights)\n # TODO(anjalisridhar): Do we need to save the original model here?\n # Saving the first replicated model works as well.\n self._original_model.save(filepath, overwrite=True, include_optimizer=False)\n\n def load_weights(self, filepath, by_name=False):\n self._original_model.load_weights(filepath, by_name=False)\n # Copy the weights from the original model to each of the replicated models.\n orig_model_weights = self._original_model.get_weights()\n distributed_training_utils.set_weights(\n self._original_model._distribution_strategy, self, # pylint: disable=protected-access\n orig_model_weights)\n\n def __getattr__(self, item):\n # Whitelisted atttributes of the model that can be accessed by the user\n # during a callback.\n if item not in ['_setattr_tracking']:\n logging.warning('You are accessing attribute ' + item + ' of the '\n 'DistributedCallbackModel that may not have been set '\n 'correctly.')\n"
] |
[
[
"tensorflow.python.ops.control_flow_ops.tuple",
"tensorflow.python.eager.context.context",
"tensorflow.python.framework.ops.init_scope",
"tensorflow.python.training.checkpointable.base.CheckpointableReference",
"tensorflow.python.training.checkpointable.base.CheckpointInitialValue",
"tensorflow.python.util.nest.flatten",
"tensorflow.python.framework.ops._get_graph_from_inputs",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.framework.ops.get_collection",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.ops.variable_scope.variable",
"tensorflow.python.framework.ops.IndexedSlices",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.training.slot_creator.create_zeros_slot",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.training.distribution_strategy_context.get_distribution_strategy",
"tensorflow.python.ops.variables.trainable_variables",
"tensorflow.python.training.slot_creator.create_slot",
"tensorflow.python.ops.gradients.gradients",
"tensorflow.python.eager.backprop.GradientTape",
"tensorflow.python.training.slot_creator.create_slot_with_initializer",
"tensorflow.python.training.optimizer._deduplicate_indexed_slices",
"tensorflow.python.training.distribute.get_loss_reduction",
"tensorflow.python.ops.control_flow_ops.group",
"tensorflow.python.framework.ops.get_collection_ref",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.framework.ops.convert_to_tensor_or_indexed_slices",
"tensorflow.python.training.distribution_strategy_context.get_replica_context",
"tensorflow.python.framework.ops.get_default_graph"
],
[
"tensorflow.python.keras.engine.training_utils.cast_if_floating_dtype",
"tensorflow.python.keras.backend.learning_phase",
"tensorflow.python.keras.backend.get_graph",
"tensorflow.python.keras.engine.training_arrays.predict_loop",
"tensorflow.python.keras.engine.training_utils.is_feature_layer",
"tensorflow.python.framework.tensor_util.is_tensor",
"tensorflow.python.keras.backend.is_placeholder",
"tensorflow.python.keras.engine.distributed_training_utils.validate_callbacks",
"tensorflow.python.keras.engine.distributed_training_utils.set_weights",
"tensorflow.python.keras.engine.training_arrays.test_loop",
"tensorflow.python.keras.engine.training_utils.check_generator_arguments",
"tensorflow.python.platform.tf_logging.log_first_n",
"tensorflow.python.util.nest.flatten",
"tensorflow.python.keras.backend.is_sparse",
"tensorflow.python.keras.backend.int_shape",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.keras.engine.training_utils.standardize_sample_weights",
"tensorflow.python.keras.backend.name_scope",
"tensorflow.python.keras.engine.training_utils.check_steps_argument",
"tensorflow.python.keras.engine.distributed_training_utils.validate_inputs",
"tensorflow.python.keras.engine.training_utils.standardize_class_weights",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.keras.engine.training_distributed.fit_loop",
"tensorflow.python.keras.metrics.squeeze_or_expand_dimensions",
"tensorflow.python.keras.engine.training_utils.check_loss_and_target_compatibility",
"tensorflow.python.keras.engine.training_generator.evaluate_generator",
"tensorflow.python.keras.losses.get",
"tensorflow.python.keras.engine.training_utils.validate_iterator_input",
"tensorflow.python.keras.backend.get_session",
"tensorflow.python.keras.engine.training_eager.predict_loop",
"tensorflow.python.keras.engine.training_distributed.test_loop",
"tensorflow.python.keras.engine.training_utils.standardize_input_data",
"tensorflow.python.keras.engine.training_distributed.predict_loop",
"tensorflow.python.keras.engine.training_utils.ModelInputs",
"tensorflow.python.keras.engine.distributed_training_utils.get_input_batch_params",
"tensorflow.python.keras.engine.training_eager.test_loop",
"tensorflow.python.platform.tf_logging.warning",
"tensorflow.python.keras.engine.training_eager.fit_loop",
"tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices",
"tensorflow.python.ops.weights_broadcast_ops.broadcast_weights",
"tensorflow.python.keras.optimizers.get",
"tensorflow.python.keras.engine.training_generator.predict_generator",
"tensorflow.python.keras.engine.training_eager.test_on_batch",
"tensorflow.python.keras.engine.training_utils.prepare_sample_weights",
"tensorflow.python.keras.backend.image_data_format",
"tensorflow.python.keras.engine.training_utils.weighted_masked_objective",
"tensorflow.python.keras.engine.training_utils.standardize_weights",
"tensorflow.python.keras.engine.training_utils.check_array_lengths",
"tensorflow.python.keras.engine.distributed_training_utils.get_var_for_numpy",
"tensorflow.python.keras.engine.training_utils.has_symbolic_tensors",
"tensorflow.python.keras.engine.training_arrays.fit_loop",
"tensorflow.python.keras.backend.floatx",
"tensorflow.python.keras.utils.generic_utils.slice_arrays",
"tensorflow.python.keras.engine.distributed_training_utils.configure_and_create_session",
"tensorflow.python.keras.engine.training_eager.train_on_batch",
"tensorflow.python.keras.engine.distributed_training_utils.get_batch_size",
"tensorflow.python.keras.backend.function",
"tensorflow.python.keras.engine.training_utils.collect_per_output_metric_info",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.keras.utils.data_utils.is_generator_or_sequence",
"tensorflow.python.keras.engine.training_generator.fit_generator",
"tensorflow.python.keras.backend.dtype"
]
] |
DS3Lab/LambdaML
|
[
"0afca7819e08632ba116fec8e102084e4040a47a"
] |
[
"archived/functions/higgs/SVM_ADMM_reduce.py"
] |
[
"import time\r\nimport numpy as np\r\n\r\nimport torch\r\nfrom torch.autograd import Variable\r\nfrom torch.utils.data.sampler import SubsetRandomSampler\r\n\r\nfrom archived.s3.get_object import get_object\r\nfrom archived.s3 import clear_bucket\r\nfrom archived.sync import reduce_epoch, delete_expired_merged_epoch\r\n\r\nfrom archived.old_model.SVM import SVM\r\nfrom data_loader.libsvm_dataset import DenseDatasetWithLines\r\n\r\n# lambda setting\r\n# file_bucket = \"s3-libsvm\"\r\n# tmp_bucket = \"tmp-grads\"\r\n# merged_bucket = \"merged-params\"\r\nlocal_dir = \"/tmp\"\r\n\r\n# algorithm setting\r\nnum_features = 30\r\nnum_classes = 2\r\nlearning_rate = 0.01\r\nbatch_size = 300\r\nnum_epochs = 10\r\nnum_admm_epochs = 30\r\nvalidation_ratio = .2\r\nshuffle_dataset = True\r\nrandom_seed = 42\r\nep_abs=1e-4\r\nep_rel=1e-2\r\n\r\n\r\ndef initialize_z_and_u(shape):\r\n z = np.random.rand(shape[0], shape[1]).astype(np.float)\r\n u = np.random.rand(shape[0], shape[1]).astype(np.float)\r\n return z, u\r\n\r\n\r\ndef update_z_u(w, z, u, rho, n, lam_0):\r\n z_new = w + u\r\n z_tem = abs(z_new) - lam_0 / float(n * rho)\r\n z_new = np.sign(z_new) * z_tem * (z_tem > 0)\r\n\r\n s = z_new - z\r\n r = w - np.ones(w.shape[0] * w.shape[1]).astype(np.float).reshape(w.shape) * z_new\r\n u_new = u + r\r\n return z_new, s, r, s\r\n\r\n\r\ndef update_z(w, u, rho, n, lam_0):\r\n z_new = w + u\r\n z_tem = abs(z_new) - lam_0 / float(n * rho)\r\n z_new = np.sign(z_new) * z_tem * (z_tem > 0)\r\n return z_new\r\n\r\n\r\ndef check_stop(ep_abs, ep_rel, r, s, n, p, w, z, u, rho):\r\n e_pri = (n*p)**(0.5) * ep_abs + ep_rel * (max(np.sum(w**2),np.sum(n*z**2)))**(0.5)\r\n e_dual = (p)**(0.5) * ep_abs + ep_rel * rho * (np.sum(u**2))**(0.5)/(n)**(0.5)\r\n print(\"r^2 = {}, s^2 = {}, e_pri = {}, e_dual = {}\".\r\n format(np.sum(r**2), e_pri, np.sum(s**2), e_dual))\r\n stop = (np.sum(r**2) <= e_pri**2) & (np.sum(s**2) <= e_dual**2)\r\n return(stop)\r\n\r\n\r\ndef handler(event, context):\r\n start_time = time.time()\r\n bucket = event['bucket_name']\r\n worker_index = event['rank']\r\n num_workers = event['num_workers']\r\n key = event['file']\r\n tmp_bucket = event['tmp_bucket']\r\n merged_bucket = event['merged_bucket']\r\n num_epochs = event['num_epochs']\r\n num_admm_epochs = event['num_admm_epochs']\r\n learning_rate = event['learning_rate']\r\n lam = event['lambda']\r\n rho = event['rho']\r\n batch_size = event['batch_size']\r\n\r\n print('bucket = {}'.format(bucket))\r\n print(\"file = {}\".format(key))\r\n print('number of workers = {}'.format(num_workers))\r\n print('worker index = {}'.format(worker_index))\r\n print('tmp bucket = {}'.format(tmp_bucket))\r\n print('merge bucket = {}'.format(merged_bucket))\r\n print('num epochs = {}'.format(num_epochs))\r\n print('num admm epochs = {}'.format(num_admm_epochs))\r\n print('learning rate = {}'.format(learning_rate))\r\n print(\"lambda = {}\".format(lam))\r\n print(\"rho = {}\".format(rho))\r\n print(\"batch_size = {}\".format(batch_size))\r\n\r\n # read file from s3\r\n file = get_object(bucket, key).read().decode('utf-8').split(\"\\n\")\r\n print(\"read data cost {} s\".format(time.time() - start_time))\r\n # file_path = \"../../dataset/agaricus_127d_train.libsvm\"\r\n # file = open(file_path).readlines()\r\n\r\n parse_start = time.time()\r\n dataset = DenseDatasetWithLines(file, num_features)\r\n print(\"parse data cost {} s\".format(time.time() - parse_start))\r\n\r\n preprocess_start = time.time()\r\n # Creating data indices for training and validation splits:\r\n dataset_size = len(dataset)\r\n\r\n indices = list(range(dataset_size))\r\n split = int(np.floor(validation_ratio * dataset_size))\r\n if shuffle_dataset:\r\n np.random.seed(random_seed)\r\n np.random.shuffle(indices)\r\n train_indices, val_indices = indices[split:], indices[:split]\r\n\r\n # Creating PT data samplers and loaders:\r\n train_sampler = SubsetRandomSampler(train_indices)\r\n valid_sampler = SubsetRandomSampler(val_indices)\r\n\r\n train_loader = torch.utils.data.DataLoader(dataset,\r\n batch_size=batch_size,\r\n sampler=train_sampler)\r\n validation_loader = torch.utils.data.DataLoader(dataset,\r\n batch_size=batch_size,\r\n sampler=valid_sampler)\r\n\r\n print(\"preprocess data cost {} s, dataset size = {}\"\r\n .format(time.time() - preprocess_start, dataset_size))\r\n\r\n model = SVM(num_features, num_classes).float()\r\n print(\"size of w = {}\".format(model.linear.weight.data.size()))\r\n\r\n z, u = initialize_z_and_u(model.linear.weight.data.size())\r\n print(\"size of z = {}\".format(z.shape))\r\n print(\"size of u = {}\".format(u.shape))\r\n\r\n # Loss and Optimizer\r\n # Softmax is internally computed.\r\n # Set parameters to be updated.\r\n optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)\r\n\r\n # Training the Model\r\n train_start = time.time()\r\n stop = False\r\n for admm_epoch in range(num_admm_epochs):\r\n print(\"ADMM Epoch >>> {}\".format(admm_epoch))\r\n for epoch in range(num_epochs):\r\n epoch_start = time.time()\r\n epoch_loss = 0\r\n for batch_index, (items, labels) in enumerate(train_loader):\r\n # print(\"------worker {} epoch {} batch {}------\".format(worker_index, epoch, batch_index))\r\n batch_start = time.time()\r\n items = Variable(items.view(-1, num_features))\r\n labels = Variable(labels)\r\n\r\n # Forward + Backward + Optimize\r\n optimizer.zero_grad()\r\n outputs = model(items)\r\n\r\n classify_loss = torch.mean(torch.clamp(1 - outputs.t() * labels.float(), min=0)) # hinge loss\r\n epoch_loss += classify_loss\r\n\r\n u_z = torch.from_numpy(u).float() - torch.from_numpy(z).float()\r\n loss = classify_loss\r\n for name, param in model.named_parameters():\r\n if name.split('.')[-1] == \"weight\":\r\n loss += rho / 2.0 * torch.norm(param + u_z, p=2)\r\n #loss = classify_loss + rho / 2.0 * torch.norm(torch.sum(model.linear.weight, u_z))\r\n optimizer.zero_grad()\r\n loss.backward(retain_graph=True)\r\n optimizer.step()\r\n\r\n train_time = time.time() - epoch_start\r\n\r\n # Test the Model\r\n test_start = time.time()\r\n correct = 0\r\n total = 0\r\n test_loss = 0\r\n for items, labels in validation_loader:\r\n items = Variable(items.view(-1, num_features))\r\n labels = Variable(labels)\r\n outputs = model(items)\r\n test_loss += torch.mean(torch.clamp(1 - outputs.t() * labels.float(), min=0))\r\n _, predicted = torch.max(outputs.data, 1)\r\n total += labels.size(0)\r\n correct += (predicted == labels).sum()\r\n test_time = time.time() - test_start\r\n\r\n print('Epoch: [%d/%d], Step: [%d/%d], Time: %.4f, Loss: %.4f, epoch cost %.4f, '\r\n 'train cost %.4f s, test cost %.4f s: '\r\n 'accuracy of the model on the %d test samples: %d %%, test loss = %f'\r\n % (epoch + 1, num_epochs, batch_index + 1, len(train_indices) / batch_size,\r\n time.time() - train_start, epoch_loss.data, time.time() - epoch_start,\r\n train_time, test_time, len(val_indices), 100 * correct / total, test_loss / total))\r\n\r\n w = model.linear.weight.data.numpy()\r\n w_shape = w.shape\r\n b = model.linear.bias.data.numpy()\r\n b_shape = b.shape\r\n u_shape = u.shape\r\n\r\n w_and_b = np.concatenate((w.flatten(), b.flatten()))\r\n u_w_b = np.concatenate((u.flatten(), w_and_b.flatten()))\r\n cal_time = time.time() - epoch_start\r\n print(\"Epoch {} calculation cost = {} s\".format(epoch, cal_time))\r\n\r\n sync_start = time.time()\r\n postfix = \"{}\".format(admm_epoch)\r\n u_w_b_merge = reduce_epoch(u_w_b, tmp_bucket, merged_bucket, num_workers, worker_index, postfix)\r\n\r\n u_mean = u_w_b_merge[:u_shape[0] * u_shape[1]].reshape(u_shape) / float(num_workers)\r\n w_mean = u_w_b_merge[u_shape[0]*u_shape[1] : u_shape[0]*u_shape[1]+w_shape[0]*w_shape[1]].reshape(w_shape) / float(num_workers)\r\n b_mean = u_w_b_merge[u_shape[0]*u_shape[1]+w_shape[0]*w_shape[1]:].reshape(b_shape[0]) / float(num_workers)\r\n #model.linear.weight.data = torch.from_numpy(w)\r\n model.linear.bias.data = torch.from_numpy(b_mean).float()\r\n sync_time = time.time() - sync_start\r\n print(\"Epoch {} synchronization cost {} s\".format(epoch, sync_time))\r\n\r\n if worker_index == 0:\r\n delete_expired_merged_epoch(merged_bucket, admm_epoch)\r\n\r\n #z, u, r, s = update_z_u(w, z, u, rho, num_workers, lam)\r\n #stop = check_stop(ep_abs, ep_rel, r, s, dataset_size, num_features, w, z, u, rho)\r\n #print(\"stop = {}\".format(stop))\r\n\r\n #z = num_workers * rho / (2 * lam + num_workers * rho) * (w + u_mean)\r\n z = update_z(w_mean, u_mean, rho, num_workers, lam)\r\n #print(z)\r\n u = u + model.linear.weight.data.numpy() - z\r\n #print(u)\r\n\r\n # Test the Model\r\n correct = 0\r\n total = 0\r\n test_loss = 0\r\n for items, labels in validation_loader:\r\n items = Variable(items.view(-1, num_features))\r\n labels = Variable(labels)\r\n outputs = model(items)\r\n test_loss += torch.mean(torch.clamp(1 - outputs.t() * labels.float(), min=0))\r\n _, predicted = torch.max(outputs.data, 1)\r\n total += labels.size(0)\r\n correct += (predicted == labels).sum()\r\n\r\n print('Epoch: %d, time = %.4f, accuracy of the model on the %d test samples: %d %%, loss = %f'\r\n % (epoch, time.time() - train_start, len(val_indices), 100 * correct / total, test_loss / total))\r\n\r\n if worker_index == 0:\r\n clear_bucket(merged_bucket)\r\n clear_bucket(tmp_bucket)\r\n\r\n end_time = time.time()\r\n print(\"Elapsed time = {} s\".format(end_time - start_time))\r\n"
] |
[
[
"numpy.random.rand",
"torch.max",
"numpy.random.seed",
"torch.autograd.Variable",
"numpy.sum",
"numpy.ones",
"torch.norm",
"numpy.random.shuffle",
"torch.from_numpy",
"numpy.sign",
"torch.utils.data.DataLoader",
"torch.utils.data.sampler.SubsetRandomSampler",
"numpy.floor"
]
] |
MattUAV/pandas_market_calendars
|
[
"397efbf835085152c0eae2de97fe48bac58d8a82"
] |
[
"pandas_market_calendars/exchange_calendar_hel.py"
] |
[
"from datetime import time\r\n\r\nfrom pandas.tseries.holiday import Holiday, GoodFriday, EasterMonday, AbstractHolidayCalendar\r\nfrom pytz import timezone\r\n\r\nfrom .common_holidays import (\r\n new_years_day,\r\n epiphany,\r\n european_labour_day,\r\n ascension_day,\r\n midsummer_eve,\r\n christmas_eve,\r\n christmas,\r\n boxing_day,\r\n new_years_eve,\r\n)\r\nfrom .market_calendar import MarketCalendar #HolidayCalendar\r\n\r\nNewYearsDay = new_years_day()\r\n\r\nEpiphany = epiphany()\r\n\r\nLabourDay = european_labour_day()\r\n\r\nAscensionDay = ascension_day()\r\n\r\nMidsummerEve = midsummer_eve()\r\n\r\nIndependenceDay = Holiday('Finland Independence Day', month=12, day=6)\r\n\r\nChristmasEve = christmas_eve()\r\nChristmas = christmas()\r\nBoxingDay = boxing_day()\r\n\r\nNewYearsEve = new_years_eve()\r\n\r\n\r\nclass HELExchangeCalendar(MarketCalendar):\r\n \"\"\"\r\n Calendar for the Helsinki Stock Exchange in Finland.\r\n Open Time: 10:00 AM, CET (Eastern European Time)\r\n Close Time: 6:30 PM, CET (Eastern European Time)\r\n Regularly-Observed Holidays:\r\n - New Year's Day\r\n - Epiphany\r\n - Good Friday\r\n - Easter Monday\r\n - Labour Day\r\n - Ascension Day\r\n - Midsummer Eve\r\n - Independence Day\r\n - Christmas Eve\r\n - Christmas Day\r\n - Boxing Day\r\n - New Year's Eve\r\n Early Closes:\r\n - None\r\n \"\"\"\r\n\r\n aliases = ['HEL']\r\n\r\n @property\r\n def name(self):\r\n return \"HEL\"\r\n\r\n @property\r\n def tz(self):\r\n return timezone(\"Europe/Helsinki\")\r\n\r\n @property\r\n def open_time_default(self):\r\n return time(10, 1, tzinfo=self.tz)\r\n\r\n @property\r\n def close_time_default(self):\r\n return time(18, 30, tzinfo=self.tz)\r\n\r\n @property\r\n def regular_holidays(self):\r\n return AbstractHolidayCalendar(rules=[\r\n NewYearsDay,\r\n Epiphany,\r\n GoodFriday,\r\n EasterMonday,\r\n LabourDay,\r\n AscensionDay,\r\n MidsummerEve,\r\n IndependenceDay,\r\n ChristmasEve,\r\n Christmas,\r\n BoxingDay,\r\n NewYearsEve,\r\n ])"
] |
[
[
"pandas.tseries.holiday.Holiday",
"pandas.tseries.holiday.AbstractHolidayCalendar"
]
] |
kensho-technologies/kwnlp-preprocessor
|
[
"97b13aa109018e38d528e1e9c11f69e0847aa069"
] |
[
"kwnlp_preprocessor/task_21p1_gather_wikidata_chunks.py"
] |
[
"# Copyright 2021-present Kensho Technologies, LLC.\nimport logging\nimport os\nimport re\n\nimport pandas as pd\n\nfrom kwnlp_preprocessor import argconfig\nfrom kwnlp_preprocessor import utils\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef main(wd_yyyymmdd: str, data_path: str = argconfig.DEFAULT_KWNLP_DATA_PATH) -> None:\n\n for sample in [\n \"p31-claim\",\n \"p279-claim\",\n \"qpq-claim\",\n \"item\",\n \"item-alias\",\n \"item-statements\",\n \"property\",\n \"property-alias\",\n \"skipped-entity\",\n ]:\n\n in_dump_path = os.path.join(\n data_path,\n f\"wikidata-derived-{wd_yyyymmdd}\",\n f\"{sample}-chunks\",\n )\n logger.info(f\"in_dump_path: {in_dump_path}\")\n\n out_dump_path = os.path.join(\n data_path,\n f\"wikidata-derived-{wd_yyyymmdd}\",\n f\"{sample}\",\n )\n out_dump_file = os.path.join(\n out_dump_path,\n f\"kwnlp-wikidata-{wd_yyyymmdd}-{sample}.csv\",\n )\n logger.info(f\"out_dump_path: {out_dump_path}\")\n os.makedirs(out_dump_path, exist_ok=True)\n\n pattern = re.compile(r\"kwnlp-wikidata-\\d{8}-chunk-(\\d{4})-\" + sample + \".csv\")\n all_file_names = [\n match.string for match in utils._get_ordered_files_from_path(in_dump_path, pattern)\n ]\n\n df = pd.DataFrame()\n for file_name in all_file_names:\n file_path = os.path.join(in_dump_path, file_name)\n df1 = pd.read_csv(file_path)\n df = pd.concat([df, df1])\n df.to_csv(out_dump_file, index=False)\n\n\nif __name__ == \"__main__\":\n\n description = \"gather wikidata chunks\"\n arg_names = [\"wd_yyyymmdd\", \"data_path\", \"loglevel\"]\n parser = argconfig.get_argparser(description, arg_names)\n\n args = parser.parse_args()\n logging.basicConfig(level=args.loglevel)\n logger.info(f\"args={args}\")\n\n main(args.wd_yyyymmdd, data_path=args.data_path)\n"
] |
[
[
"pandas.DataFrame",
"pandas.read_csv",
"pandas.concat"
]
] |
dc2016bte0006/Latex_OCR
|
[
"2e919617da8f2f7f3445ed8d1953a5664c1aaba7"
] |
[
"eval.py"
] |
[
"from dataset.dataset import Im2LatexDataset\r\nimport os\r\nimport sys\r\nimport argparse\r\nimport logging\r\nimport yaml\r\n\r\nimport numpy as np\r\nimport torch\r\nfrom torchtext.data import metrics\r\nfrom munch import Munch\r\nfrom tqdm.auto import tqdm\r\nimport wandb\r\nfrom Levenshtein import distance\r\n\r\nfrom models import get_model, Model\r\nfrom utils import *\r\n\r\n\r\ndef detokenize(tokens, tokenizer):\r\n toks = [tokenizer.convert_ids_to_tokens(tok) for tok in tokens]\r\n for b in range(len(toks)):\r\n for i in reversed(range(len(toks[b]))):\r\n if toks[b][i] is None:\r\n toks[b][i] = ''\r\n toks[b][i] = toks[b][i].replace('Ġ', ' ').strip()\r\n if toks[b][i] in (['[BOS]', '[EOS]', '[PAD]']):\r\n del toks[b][i]\r\n return toks\r\n\r\n\r\[email protected]_grad()\r\ndef evaluate(model: Model, dataset: Im2LatexDataset, args: Munch, num_batches: int = None, name: str = 'test'):\r\n \"\"\"evaluates the model. Returns bleu score on the dataset\r\n\r\n Args:\r\n model (torch.nn.Module): the model\r\n dataset (Im2LatexDataset): test dataset\r\n args (Munch): arguments\r\n num_batches (int): How many batches to evaluate on. Defaults to None (all batches).\r\n name (str, optional): name of the test e.g. val or test for wandb. Defaults to 'test'.\r\n\r\n Returns:\r\n bleu_score: BLEU score of validation set.\r\n \"\"\"\r\n assert len(dataset) > 0\r\n device = args.device\r\n log = {}\r\n bleus, edit_dists = [], []\r\n bleu_score, edit_distance = 0, 1\r\n pbar = tqdm(enumerate(iter(dataset)), total=len(dataset))\r\n for i, (seq, im) in pbar:\r\n if seq is None or im is None:\r\n continue\r\n tgt_seq, tgt_mask = seq['input_ids'].to(device), seq['attention_mask'].bool().to(device)\r\n encoded = model.encoder(im.to(device))\r\n #loss = decoder(tgt_seq, mask=tgt_mask, context=encoded)\r\n dec = model.decoder.generate(torch.LongTensor([args.bos_token]*len(encoded))[:, None].to(device), args.max_seq_len,\r\n eos_token=args.pad_token, context=encoded, temperature=args.get('temperature', .2))\r\n pred = detokenize(dec, dataset.tokenizer)\r\n truth = detokenize(seq['input_ids'], dataset.tokenizer)\r\n bleus.append(metrics.bleu_score(pred, [alternatives(x) for x in truth]))\r\n for predi, truthi in zip(token2str(dec, dataset.tokenizer), token2str(seq['input_ids'], dataset.tokenizer)):\r\n ts = post_process(truthi)\r\n if len(ts) > 0:\r\n edit_dists.append(distance(post_process(predi), ts)/len(ts))\r\n pbar.set_description('BLEU: %.3f, ED: %.2e' % (np.mean(bleus), np.mean(edit_dists)))\r\n if num_batches is not None and i >= num_batches:\r\n break\r\n if len(bleus) > 0:\r\n bleu_score = np.mean(bleus)\r\n log[name+'/bleu'] = bleu_score\r\n if len(edit_dists) > 0:\r\n edit_distance = np.mean(edit_dists)\r\n log[name+'/edit_distance'] = edit_distance\r\n if args.wandb:\r\n # samples\r\n pred = token2str(dec, dataset.tokenizer)\r\n truth = token2str(seq['input_ids'], dataset.tokenizer)\r\n table = wandb.Table(columns=[\"Truth\", \"Prediction\"])\r\n for k in range(min([len(pred), args.test_samples])):\r\n table.add_data(post_process(truth[k]), post_process(pred[k]))\r\n log[name+'/examples'] = table\r\n wandb.log(log)\r\n else:\r\n print('\\n%s\\n%s' % (truth, pred))\r\n print('BLEU: %.2f' % bleu_score)\r\n return bleu_score, edit_distance\r\n\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser(description='Test model')\r\n parser.add_argument('--config', default='settings/config.yaml', help='path to yaml config file', type=argparse.FileType('r'))\r\n parser.add_argument('-c', '--checkpoint', default='checkpoints/weights.pth', type=str, help='path to model checkpoint')\r\n parser.add_argument('-d', '--data', default='dataset/data/val.pkl', type=str, help='Path to Dataset pkl file')\r\n parser.add_argument('--no-cuda', action='store_true', help='Use CPU')\r\n parser.add_argument('-b', '--batchsize', type=int, default=10, help='Batch size')\r\n parser.add_argument('--debug', action='store_true', help='DEBUG')\r\n parser.add_argument('-t', '--temperature', type=float, default=.333, help='sampling emperature')\r\n parser.add_argument('-n', '--num-batches', type=int, default=None, help='how many batches to evaluate on. Defaults to None (all)')\r\n\r\n parsed_args = parser.parse_args()\r\n with parsed_args.config as f:\r\n params = yaml.load(f, Loader=yaml.FullLoader)\r\n args = parse_args(Munch(params))\r\n args.testbatchsize = parsed_args.batchsize\r\n args.wandb = False\r\n args.temperature = parsed_args.temperature\r\n logging.getLogger().setLevel(logging.DEBUG if parsed_args.debug else logging.WARNING)\r\n seed_everything(args.seed if 'seed' in args else 42)\r\n model = get_model(args)\r\n if parsed_args.checkpoint is not None:\r\n model.load_state_dict(torch.load(parsed_args.checkpoint, args.device))\r\n dataset = Im2LatexDataset().load(parsed_args.data)\r\n valargs = args.copy()\r\n valargs.update(batchsize=args.testbatchsize, keep_smaller_batches=True, test=True)\r\n dataset.update(**valargs)\r\n evaluate(model, dataset, args, num_batches=parsed_args.num_batches)\r\n"
] |
[
[
"torch.no_grad",
"numpy.mean",
"torch.load"
]
] |
tilman/compositional_elements
|
[
"45271196ed01d0515357c7abdf35d6b87f2036d5"
] |
[
"evaluation/compare_final2_compoelem.py"
] |
[
"# call this script with `python -m evaluation.evaluate_poselines_globalaction`\nimport os\nimport numpy as np\nimport datetime\nfrom tqdm import tqdm\nfrom . import eval_utils\nimport pickle\nimport copyreg\nimport cv2\n\nfrom .compare_deepfeatures import negative_cosine_dist_flatten, eucl_dist_flatten\nfrom .compare_sift import compare_siftBFMatcher1\nfrom .compare_orb import compare_orbBFMatcher1\nfrom .compare_brief import compare_briefBFMatcher1\n\nfrom compoelem.config import config\nfrom compoelem.generate import global_action, pose_abstraction\nfrom compoelem.compare.pose_line import compare_pose_lines_3, compare_pose_lines_3, filter_pose_line_ga_result\nfrom compoelem.compare.normalize import minmax_norm_by_imgrect, minmax_norm_by_bbox, norm_by_global_action\n\n\n\n\n\n# fix cv2 keypoint pickling error\ndef _pickle_keypoint(keypoint): # : cv2.KeyPoint\n return cv2.KeyPoint, (\n keypoint.pt[0],\n keypoint.pt[1],\n keypoint.size,\n keypoint.angle,\n keypoint.response,\n keypoint.octave,\n keypoint.class_id,\n )\n# Apply the bundling to pickle\ncopyreg.pickle(cv2.KeyPoint().__class__, _pickle_keypoint)\n\ndef compare_setupA(data, sort_method, norm_method, glac_fallback, compare_other, additional_feature_weight):\n if norm_method != 'norm_by_global_action':\n raise NotImplementedError(\"only norm_by_global_action is implemented\")\n res_metrics = {}\n precision_curves = {}\n all_retrieval_res = []\n for query_data in tqdm(data, total=len(data)):\n compare_results = []\n #query_pose_lines = minmax_norm_by_imgrect(query_data[\"compoelem\"][pose_lines], query_data[\"width\"], query_data[\"height\"])\n query_pose_lines_seq = norm_by_global_action(query_data[\"compoelem\"][\"pose_lines\"], query_data[\"compoelem\"][\"global_action_lines\"], fallback=glac_fallback)\n for target_data in data:\n if query_data[\"className\"] == target_data[\"className\"] and query_data[\"imgName\"] == target_data[\"imgName\"]:\n continue\n if compare_other == 'vgg19_ncos':\n r_addition = negative_cosine_dist_flatten(query_data[\"imageNet_vgg19_bn_features\"], target_data[\"imageNet_vgg19_bn_features\"])\n elif compare_other == 'resnet50_cos':\n r_addition = negative_cosine_dist_flatten(query_data[\"places365_resnet50_feature_noFC\"], target_data[\"places365_resnet50_feature_noFC\"])\n elif compare_other == 'resnet50_eucl':\n r_addition = eucl_dist_flatten(query_data[\"places365_resnet50_feature_noFC\"], target_data[\"places365_resnet50_feature_noFC\"])\n elif compare_other == 'sift_bfm1':\n r_addition = compare_siftBFMatcher1(query_data[\"sift\"], target_data[\"sift\"])\n elif compare_other == 'orb_bfm1':\n r_addition = compare_orbBFMatcher1(query_data[\"orb\"], target_data[\"orb\"])\n elif compare_other == 'brief_bfm1':\n r_addition = compare_briefBFMatcher1(query_data[\"brief\"], target_data[\"brief\"])\n elif compare_other is None:\n r_addition = 0\n else:\n raise NotImplementedError(\"not implemented compare_other\", compare_other)\n\n #combined_ratio, hit_ratio, neg_mean_distance_hits = compare_pose_lines_3(query_pose_lines, minmax_norm_by_imgrect(target_data[\"compoelem\"][pose_lines], target_data[\"width\"], target_data[\"height\"]))\n target_pose_lines_seq = norm_by_global_action(target_data[\"compoelem\"][\"pose_lines\"], target_data[\"compoelem\"][\"global_action_lines\"], fallback=glac_fallback)\n pair_compare_results = []\n for query_pose_lines in query_pose_lines_seq:\n for target_pose_lines in target_pose_lines_seq:\n combined_ratio, hit_ratio, neg_mean_distance_hits = compare_pose_lines_3(query_pose_lines, target_pose_lines)\n pair_compare_results.append((combined_ratio, hit_ratio, neg_mean_distance_hits, target_data))\n combined_ratio, hit_ratio, neg_mean_distance_hits, target_data = filter_pose_line_ga_result(pair_compare_results)\n\n a = additional_feature_weight\n wra = r_addition * (1-a)\n r_combi1 = wra * (1 - combined_ratio * a)\n r_combi2 = wra + (1 - combined_ratio * a)\n r_combi3 = wra * (1 - neg_mean_distance_hits * a)\n r_combi4 = wra + (1 - neg_mean_distance_hits * a)\n\n\n compare_results.append((combined_ratio, hit_ratio, neg_mean_distance_hits, r_combi1, r_combi2, r_combi3, r_combi4, r_addition, target_data))\n compare_results = np.array(compare_results)\n sorted_compare_results = sort_method(compare_results)\n query_label = query_data[\"className\"]\n res_labels = list(map(lambda x: x[\"className\"], sorted_compare_results[:,-1]))\n res_keys = list(map(lambda x: x[\"className\"]+'_'+x[\"imgName\"], sorted_compare_results[:,-1]))\n all_retrieval_res.append(np.array([\n query_data[\"className\"]+'_'+query_data[\"imgName\"],\n query_label,\n res_keys,\n res_labels\n ]))\n metrics = eval_utils.score_retrievals(query_label, res_labels)\n label = metrics[\"label\"]\n if label in precision_curves:\n precision_curves[label].append(metrics[\"precision_at_rank\"])\n else:\n precision_curves[label] = [metrics[\"precision_at_rank\"]]\n for key in metrics.keys():\n if key != \"label\":\n if key not in res_metrics:\n res_metrics[key] = {}\n if label not in res_metrics[key]:\n res_metrics[key][label] = []\n res_metrics[key][label].append(metrics[key])\n return (eval_utils.get_eval_dataframe(res_metrics), precision_curves, np.array(all_retrieval_res))\n\ndef compare_setupB(data, sort_method, norm_method, glac_fallback, compare_other, additional_feature_weight):\n if compare_other is not None:\n raise NotImplementedError(\"compare other not implemented\")\n res_metrics = {}\n precision_curves = {}\n all_retrieval_res = []\n for query_data in tqdm(data, total=len(data)):\n compare_results = []\n if norm_method == 'none':\n query_pose_lines = query_data[\"compoelem\"][\"pose_lines\"]\n elif norm_method == 'minmax_norm_by_imgrect':\n query_pose_lines = minmax_norm_by_imgrect(query_data[\"compoelem\"][\"pose_lines\"], query_data[\"compoelem\"][\"width\"], query_data[\"compoelem\"][\"height\"])\n elif norm_method == 'minmax_norm_by_bbox':\n query_pose_lines = minmax_norm_by_bbox(query_data[\"compoelem\"][\"pose_lines\"])\n else:\n raise NotImplementedError(\"norm_method: {} not implemented\".format(norm_method))\n for target_data in data:\n if query_data[\"className\"] == target_data[\"className\"] and query_data[\"imgName\"] == target_data[\"imgName\"]:\n continue\n if norm_method == 'none':\n target_pose_lines = target_data[\"compoelem\"][\"pose_lines\"]\n elif norm_method == 'minmax_norm_by_imgrect':\n target_pose_lines = minmax_norm_by_imgrect(target_data[\"compoelem\"][\"pose_lines\"], target_data[\"compoelem\"][\"width\"], target_data[\"compoelem\"][\"height\"])\n elif norm_method == 'minmax_norm_by_bbox':\n target_pose_lines = minmax_norm_by_bbox(target_data[\"compoelem\"][\"pose_lines\"])\n else:\n raise NotImplementedError(\"norm_method: {} not implemented\".format(norm_method))\n combined_ratio, hit_ratio, neg_mean_distance_hits = compare_pose_lines_3(query_pose_lines, target_pose_lines)\n compare_results.append((combined_ratio, hit_ratio, neg_mean_distance_hits, target_data))\n compare_results = np.array(compare_results)\n sorted_compare_results = sort_method(compare_results)\n query_label = query_data[\"className\"]\n res_labels = list(map(lambda x: x[\"className\"], sorted_compare_results[:,-1]))\n res_keys = list(map(lambda x: x[\"className\"]+'_'+x[\"imgName\"], sorted_compare_results[:,-1]))\n all_retrieval_res.append(np.array([\n query_data[\"className\"]+'_'+query_data[\"imgName\"],\n query_label,\n res_keys,\n res_labels\n ]))\n metrics = eval_utils.score_retrievals(query_label, res_labels)\n label = metrics[\"label\"]\n if label in precision_curves:\n precision_curves[label].append(metrics[\"precision_at_rank\"])\n else:\n precision_curves[label] = [metrics[\"precision_at_rank\"]]\n for key in metrics.keys():\n if key != \"label\":\n if key not in res_metrics:\n res_metrics[key] = {}\n if label not in res_metrics[key]:\n res_metrics[key][label] = []\n res_metrics[key][label].append(metrics[key])\n return (eval_utils.get_eval_dataframe(res_metrics), precision_curves, np.array(all_retrieval_res))\n\n# indices for sorting functions\n# 0: combined_ratio\n# 1: hit_ratio\n# 2: neg_mean_distance_hits\n# 3: r_combi1\n# 4: r_combi2\n# 5: r_combi3\n# 6: r_combi4\n# 7: r_addition\n# 8: target_data\n\ndef cr_desc(compare_results):\n sorted_compare_results = compare_results[np.argsort(compare_results[:,0])][::-1]\n return sorted_compare_results\n\ndef nmd_desc(compare_results):\n sorted_compare_results = compare_results[np.argsort(compare_results[:,2])][::-1]\n return sorted_compare_results\n\ndef hr_nmd_desc(compare_results):\n # hr is primary and therefore second sorting key\n # nmd is seondary and therefore second first key\n sorted_compare_results = compare_results[np.lexsort((compare_results[:,2], compare_results[:,1]))][::-1]\n return sorted_compare_results\n\n\n# additional methods:\ndef hr_additional_desc(compare_results):\n # hr is primary and therefore second sorting key\n # r_addidtion is seondary and therefore second first key\n sorted_compare_results = compare_results[np.lexsort((-compare_results[:,7], compare_results[:,1]))][::-1]\n return sorted_compare_results\n\ndef hr_combi3_desc(compare_results):\n # hr is primary and therefore second sorting key\n # nmd is seondary and therefore second first key\n sorted_compare_results = compare_results[np.lexsort((-compare_results[:,5], compare_results[:,1]))][::-1]\n return sorted_compare_results\n\ndef hr_combi4_desc(compare_results):\n # hr is primary and therefore second sorting key\n # nmd is seondary and therefore second first key\n sorted_compare_results = compare_results[np.lexsort((-compare_results[:,6], compare_results[:,1]))][::-1]\n return sorted_compare_results\n\ndef combi1_asc(compare_results):\n sorted_compare_results = compare_results[np.argsort(compare_results[:,3])]\n return sorted_compare_results\n\ndef combi2_asc(compare_results):\n sorted_compare_results = compare_results[np.argsort(compare_results[:,4])]\n return sorted_compare_results\n\n\nosuname = os.uname().nodename\nprint(\"osuname\", osuname)\nif osuname == 'MBP-von-Tilman' or osuname == 'MacBook-Pro-von-Tilman.local':\n COMPOELEM_ROOT = \"/Users/tilman/Documents/Programme/Python/new_bachelor_thesis/compoelem\"\nelif osuname == 'lme117':\n COMPOELEM_ROOT = \"/home/zi14teho/compositional_elements\"\nelse:\n COMPOELEM_ROOT = os.getenv('COMPOELEM_ROOT')\nDATASTORE_NAME = \"combined_datastore_ceb_dataset\"\nDATASTORE_FILE = COMPOELEM_ROOT+\"/final_evaluation/\"+DATASTORE_NAME+\".pkl\"\nEVAL_RESULTS_FILE_DIR = COMPOELEM_ROOT+\"/final_evaluation/final2pkl/\"\nDATASTORE_NAME = \"combined_datastore_ceb_dataset\"\ndatastore = pickle.load(open(DATASTORE_FILE, \"rb\"))\ndatastore_name = DATASTORE_NAME\n\n# def eval_single_combination(\n# norm_method,\n# sort_method_name,\n \n# correction_angle,\n# cone_opening_angle,\n# cone_scale_factor,\n# cone_base_scale_factor,\n# filter_threshold,\n\n# poseline_fallback,\n# bisection_fallback,\n# glac_fallback,\n# ):\n\n# print({\n# \"norm_method\":norm_method,\n# \"sort_method_name\":sort_method_name,\n# \"correction_angle\":correction_angle,\n# \"cone_opening_angle\":cone_opening_angle,\n# \"cone_scale_factor\":cone_scale_factor,\n# \"cone_base_scale_factor\":cone_base_scale_factor,\n# \"filter_threshold\":filter_threshold,\n# \"poseline_fallback\":poseline_fallback,\n# \"bisection_fallback\":bisection_fallback,\n# \"glac_fallback\":glac_fallback,\n# })\ndef eval_single_combination(arg_obj):\n print(arg_obj)\n experiment_name = arg_obj[\"experiment_name\"]\n norm_method = arg_obj[\"norm_method\"]\n sort_method_name = arg_obj[\"sort_method_name\"]\n correction_angle = arg_obj[\"correction_angle\"]\n cone_opening_angle = arg_obj[\"cone_opening_angle\"]\n cone_scale_factor = arg_obj[\"cone_scale_factor\"]\n cone_base_scale_factor = arg_obj[\"cone_base_scale_factor\"]\n filter_threshold = arg_obj[\"filter_threshold\"]\n poseline_fallback = arg_obj[\"poseline_fallback\"]\n bisection_fallback = arg_obj[\"bisection_fallback\"]\n glac_fallback = arg_obj[\"glac_fallback\"]\n additional_feature_weight = arg_obj[\"additional_feature_weight\"] if \"additional_feature_weight\" in arg_obj else 0.5\n compare_other = arg_obj[\"compare_other\"] if \"compare_other\" in arg_obj else None\n\n setup = compare_setupA if norm_method == 'norm_by_global_action' else compare_setupB\n if sort_method_name == 'cr_desc':\n sort_method = cr_desc\n elif sort_method_name == 'nmd_desc':\n sort_method = nmd_desc\n elif sort_method_name == 'hr_nmd_desc':\n sort_method = hr_nmd_desc\n elif sort_method_name == 'hr_additional_desc':\n sort_method = hr_additional_desc\n elif sort_method_name == 'hr_combi3_desc':\n sort_method = hr_combi3_desc\n elif sort_method_name == 'hr_combi4_desc':\n sort_method = hr_combi4_desc\n elif sort_method_name == 'combi1_asc':\n sort_method = combi1_asc\n elif sort_method_name == 'combi2_asc':\n sort_method = combi2_asc\n else:\n raise NotImplementedError(\"sort_method: {} not implemented\".format(sort_method_name))\n\n config[\"bisection\"][\"correction_angle\"] = correction_angle\n config[\"bisection\"][\"cone_opening_angle\"] = cone_opening_angle\n config[\"bisection\"][\"cone_scale_factor\"] = cone_scale_factor\n config[\"bisection\"][\"cone_base_scale_factor\"] = cone_base_scale_factor\n config[\"compare\"][\"filter_threshold\"] = filter_threshold\n\n new_datastore_values = []\n for key in datastore.keys():\n poses = datastore[key][\"compoelem\"][\"poses\"]\n datastore[key][\"compoelem\"][\"global_action_lines\"] = global_action.get_global_action_lines(poses, bisection_fallback)\n datastore[key][\"compoelem\"][\"pose_lines\"] = pose_abstraction.get_pose_lines(poses, poseline_fallback)\n new_datastore_values.append(datastore[key])\n\n start_time = datetime.datetime.now()\n eval_dataframe, precision_curves, all_retrieval_res = setup(new_datastore_values, sort_method, norm_method, glac_fallback, compare_other, additional_feature_weight)\n norm_alias = {\n \"minmax_norm_by_imgrect\":\"Size\",\n \"minmax_norm_by_bbox\":\"Bbox\",\n \"norm_by_global_action\":\"Glac\",\n \"none\":\"None\",\n }\n filename = \"final2_time{}_norm{}_{}_ca{}_co{}_cs{}_cbs{}_th{}_fbPl{}_fbBis{}_fbGa{}_other{}_aw{}.pkl\".format(\n start_time.strftime(\"%d%m%y%H%M%S\"),\n\n norm_alias[norm_method],\n sort_method.__name__,\n\n correction_angle,\n cone_opening_angle,\n cone_scale_factor,\n cone_base_scale_factor,\n filter_threshold,\n\n poseline_fallback,\n bisection_fallback,\n glac_fallback,\n\n compare_other,\n additional_feature_weight,\n )\n print(\"filename\", filename, \"p@1\", eval_dataframe[\"p@1\"][\"total (mean)\"])\n res_summary = {\n \"experiment_name\": experiment_name,\n \"experiment_id\": filename,\n \"filename\": filename,\n \"datetime\": start_time,\n \"setup\": setup.__name__,\n \"eval_time_s\": (datetime.datetime.now() - start_time).seconds,\n \"datastore_name\": datastore_name,\n\n \"eval_dataframe\": eval_dataframe,\n \"precision_curves\": precision_curves,\n \"all_retrieval_res\": all_retrieval_res,\n \n \"config\": config,\n\n \"norm_method\": norm_method,\n \"compare_method\": \"compare_pose_lines_3\",\n \"sort_method\": sort_method.__name__,\n\n \"compare_other\": compare_other,\n\n \"correction_angle\": correction_angle,\n \"cone_opening_angle\": cone_opening_angle,\n \"cone_scale_factor\": cone_scale_factor,\n \"filter_threshold\": filter_threshold,\n\n \"poseline_fallback\": poseline_fallback,\n \"bisection_fallback\": bisection_fallback,\n \"glac_fallback\": glac_fallback,\n }\n pickle.dump(res_summary, open(EVAL_RESULTS_FILE_DIR+filename, \"wb\"))\n"
] |
[
[
"numpy.array",
"numpy.lexsort",
"numpy.argsort"
]
] |
jae1001/FibreCOP
|
[
"328cf5feb1c8447a20d52b23035098558c3a6e8b"
] |
[
"ODFnOPsimulator.py"
] |
[
"'''This program calculates the Chebyshev/Herman orientation parameters\r\nfrom simulated intensity distribution data.\r\n\r\nCode developed by Dr. A. Kaniyoor,\r\nMacromolecular Materials Laboratory,University of Cambridge, Cambridge, UK\r\n2020-2021\r\n\r\nReference Publication: Quantifying Alignment in Carbon Nanotube Yarns and Similar 2D Anisotropic Systems\r\nA. Kaniyoor, T.S. Gspann, J. E. Mizen, J.A. Elliott.\r\nTo be submitted\r\n\r\nThere are two main programs here. Program 1 (True by default) generates orientation distribution functions with varying widths \r\nand calculates orientation parameters from the ODFs. To view the ODFs, please enable command - plot ODF=True (False by default)\r\nProgram 2 (off/False by default) generates ODFs with secondary peaks whose height can be adjusted in the code, and calculates orientation parameters.\r\n\r\n'''\r\n\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport scipy.special as sp\r\nfrom matplotlib import rcParams\r\nrcParams['font.family']='Arial'\r\nrcParams['legend.fontsize']=8 # 11 for 0.5 column figure\r\nrcParams['axes.labelsize']=12 #16 for 0.5 column figure,12 for 1 column figure\r\nrcParams['xtick.labelsize']=10 # 14 for 0.5 column figure\r\nrcParams['ytick.labelsize']=10\r\nrcParams['lines.markersize']=3\r\nrcParams['lines.linewidth']=1\r\nrcParams['lines.antialiased']=True\r\nrcParams['mathtext.default']='regular'\r\nrcParams['figure.figsize']=3.5,3.2 #3.5,2.6 is 1 column figure\r\nrcParams['figure.dpi']=150 # change dpi=300 for publication quality images\r\n\r\n\r\n\r\n# defining the functional forms\r\ndef gaussian(x, mu, hwidth):\r\n sigma=hwidth/(np.sqrt(2*np.log(2)))\r\n return (1/(sigma*np.sqrt(2*np.pi)))*np.exp(-(np.abs(x - mu)/(np.sqrt(2)*sigma))**2)\r\n\r\ndef lorentzian(x,mu,hwidth):\r\n return (1/(np.pi*hwidth))*(hwidth**2)/(((x-mu)**2)+hwidth**2)\r\n\r\ndef gnd(x,mu,hwidth,beta):\r\n alpha=hwidth/((np.log(2))**(1/beta))\r\n return (beta/(2*alpha*sp.gamma(1/beta)))*np.exp(-(np.abs(x - mu)/alpha)**beta)\r\n\r\n\r\n\r\n# generating angle data\r\na=5000\r\nx=[]\r\nx = np.linspace(0, 360, a)\r\ncosSqX=np.power(np.cos(x*np.pi/180),2)\r\nsinX=np.sin(x*np.pi/180)\r\ncosFtX=np.power(np.cos(x*np.pi/180),4)\r\n\r\n\r\n\r\n# Choose the program to run: program 1 - primary peaks only ; prgoram 2 - with secondary peaks\r\n\r\nprogram1 = True\r\nprogram2 = False\r\n\r\n\r\n\r\n\r\n\r\n###### PROGRAM 1 for generating ODFs with primary peaks #####\r\n\r\n\r\nif program1:\r\n \r\n # setting peak parameters\r\n c=[0.01,0.6,2.9,5.7,9.5,11.5,14.3,19.1,28.6,38.2,52.1,57.3,71.6,81.8,95.5,114.6,120,135,150,165,180]\r\n #c=[5]\r\n beta=1.5\r\n mu = [0,180,360]\r\n mu1=[-90,90,270,450]\r\n\r\n \r\n plotODF = False # True only if you want to view ODFs, usually when c is a fixed value\r\n\r\n \r\n # generating ODFs oriented along reference direction\r\n gaussianOP=np.empty_like(x)\r\n lorentzianOP=np.empty_like(x)\r\n gndOP=np.empty_like(x)\r\n for hwidth in c:\r\n g=gaussian(x, mu[0], hwidth)+gaussian(x, mu[1],hwidth)+gaussian(x, mu[2],hwidth)\r\n l=lorentzian(x,mu[0],hwidth)+lorentzian(x,mu[1],hwidth)+lorentzian(x,mu[2],hwidth)\r\n gn=gnd(x,mu[0],hwidth,beta)+gnd(x,mu[1],hwidth,beta)+gnd(x,mu[2],hwidth,beta)\r\n gaussianOP = np.vstack((gaussianOP,g))\r\n lorentzianOP= np.vstack((lorentzianOP,l))\r\n gndOP=np.vstack((gndOP,gn))\r\n\r\n #output= np.array([x,l])\r\n #np.savetxt('ModelData.txt',output.T)\r\n\r\n # generating ODFs oriented perpendicular to reference direction \r\n gaussianOP1=np.empty_like(x)\r\n lorentzianOP1=np.empty_like(x)\r\n gndOP1=np.empty_like(x)\r\n\r\n for hwidth in c:\r\n g1=gaussian(x, mu1[0],hwidth)+gaussian(x, mu1[1],hwidth)+gaussian(x, mu1[2],hwidth)+gaussian(x, mu1[3],hwidth)\r\n l1=lorentzian(x,mu1[0],hwidth)+lorentzian(x,mu1[1],hwidth)+lorentzian(x,mu1[2],hwidth)+lorentzian(x,mu1[3],hwidth)\r\n gn1=gnd(x, mu1[0], hwidth,beta)+gnd(x, mu1[1], hwidth,beta)++gnd(x, mu1[2], hwidth,beta)++gnd(x, mu1[3], hwidth,beta)\r\n gaussianOP1 = np.vstack((gaussianOP1,g1))\r\n lorentzianOP1= np.vstack((lorentzianOP1,l1))\r\n gndOP1=np.vstack((gndOP1,gn1))\r\n \r\n\r\n # plotting ODF\r\n if plotODF:\r\n plt.figure(figsize=(3.5,3))\r\n plt.plot(x,lorentzianOP[1::,:].T,'b-',label='LD')\r\n plt.plot(x,gndOP[1::,:].T,'g-.',label='GND')\r\n plt.plot(x,gaussianOP[1::,:].T,'r--',label='GD')\r\n plt.legend(bbox_to_anchor=(0,1.1,1,0), loc=\"lower left\",mode=\"expand\", ncol=3)\r\n plt.xlabel('Angle (\\xb0)')\r\n plt.ylabel('Intensity (a.u.)')\r\n plt.xticks([0,90,180,270,360])\r\n plt.locator_params('y',nbins=6)\r\n plt.ticklabel_format(axis='y',style='sci',scilimits=(0,0))\r\n plt.tight_layout()\r\n plt.minorticks_on()\r\n plt.show()\r\n plt.close()\r\n\r\n plt.figure(figsize=(3.5,3))\r\n plt.plot(x,lorentzianOP1[1::,:].T,'b-',label='LD')\r\n plt.plot(x,gndOP1[1::,:].T,'g-.',label='GND')\r\n plt.plot(x,gaussianOP1[1::,:].T,'r--',label='GD')\r\n plt.legend(bbox_to_anchor=(0,1.1,1,0), loc=\"lower left\",mode=\"expand\", ncol=3)\r\n plt.xlabel('Angle (\\xb0)')\r\n plt.ylabel('Intensity (a.u.)')\r\n plt.xticks([0,90,180,270,360])\r\n plt.locator_params('y',nbins=6)\r\n plt.ticklabel_format(axis='y',style='sci',scilimits=(0,0))\r\n plt.tight_layout()\r\n plt.show()\r\n plt.close()\r\n\r\n # calculating orientation parameters\r\n P2g=[]\r\n P2l=[]\r\n T2g=[]\r\n T2l=[]\r\n P2gn=[]\r\n T2gn=[]\r\n T4l=[]\r\n\r\n P2g1=[]\r\n P2l1=[]\r\n T2g1=[]\r\n T2l1=[]\r\n P2gn1=[]\r\n T2gn1=[]\r\n T4l1=[]\r\n\r\n for i in range(1,np.shape(gaussianOP)[0]):\r\n cosSq3DG=np.sum(gaussianOP[i,0:int(a/2)]*cosSqX[0:int(a/2)]*sinX[0:int(a/2)])/np.sum(gaussianOP[i,0:int(a/2)]*sinX[0:int(a/2)])\r\n P2g.append(np.round(1.5*cosSq3DG-0.5,3))\r\n cosSq3DL=np.sum(lorentzianOP[i,0:int(a/2)]*cosSqX[0:int(a/2)]*sinX[0:int(a/2)])/np.sum(lorentzianOP[i,0:int(a/2)]*sinX[0:int(a/2)])\r\n P2l.append(np.round(1.5*cosSq3DL-0.5,3))\r\n cosSq3DGN=np.sum(gndOP[i,0:int(a/2)]*cosSqX[0:int(a/2)]*sinX[0:int(a/2)])/np.sum(gndOP[i,0:int(a/2)]*sinX[0:int(a/2)])\r\n P2gn.append(np.round(1.5*cosSq3DGN-0.5,3))\r\n cosSq2DG=np.sum(gaussianOP[i,0:int(a/2)]*cosSqX[0:int(a/2)])/np.sum(gaussianOP[i,0:int(a/2)])\r\n T2g.append(np.round(2*cosSq2DG-1,3))\r\n cosSq2DL=np.sum(lorentzianOP[i,0:int(a/2)]*cosSqX[0:int(a/2)])/np.sum(lorentzianOP[i,0:int(a/2)])\r\n T2l.append(np.round(2*cosSq2DL-1,3))\r\n cosSq2DGN=np.sum(gndOP[i,0:int(a/2)]*cosSqX[0:int(a/2)])/np.sum(gndOP[i,0:int(a/2)])\r\n T2gn.append(np.round(2*cosSq2DGN-1,3))\r\n \r\n \r\n for i in range(1,np.shape(gaussianOP)[0]):\r\n cosSq3DG1=np.sum(gaussianOP1[i,0:int(a/2)]*cosSqX[0:int(a/2)]*sinX[0:int(a/2)])/np.sum(gaussianOP1[i,0:int(a/2)]*sinX[0:int(a/2)])\r\n P2g1.append(np.round(1.5*cosSq3DG1-0.5,3))\r\n cosSq3DL1=np.sum(lorentzianOP1[i,0:int(a/2)]*cosSqX[0:int(a/2)]*sinX[0:int(a/2)])/np.sum(lorentzianOP1[i,0:int(a/2)]*sinX[0:int(a/2)])\r\n P2l1.append(np.round(1.5*cosSq3DL1-0.5,3))\r\n cosSq3DGN1=np.sum(gndOP1[i,0:int(a/2)]*cosSqX[0:int(a/2)]*sinX[0:int(a/2)])/np.sum(gndOP1[i,0:int(a/2)]*sinX[0:int(a/2)])\r\n P2gn1.append(np.round(1.5*cosSq3DGN1-0.5,3))\r\n cosSq2DG1=np.sum(gaussianOP1[i,0:int(a/2)]*cosSqX[0:int(a/2)])/np.sum(gaussianOP1[i,0:int(a/2)])\r\n T2g1.append(np.round(2*cosSq2DG1-1,3))\r\n cosSq2DL1=np.sum(lorentzianOP1[i,0:int(a/2)]*cosSqX[0:int(a/2)])/np.sum(lorentzianOP1[i,0:int(a/2)])\r\n T2l1.append(np.round(2*cosSq2DL1-1,3))\r\n cosSq2DGN1=np.sum(gndOP1[i,0:int(a/2)]*cosSqX[0:int(a/2)])/np.sum(gndOP1[i,0:int(a/2)])\r\n T2gn1.append(np.round(2*cosSq2DGN1-1,3))\r\n \r\n \r\n # Plotting orientation parameters \r\n fw=2*np.asarray(c) # converting widths to full widths (FWHM)\r\n\r\n plt.plot(fw,P2l,'b-x', label='<P$_2$>$_{LD}$')\r\n plt.plot(fw,P2gn,'g--x', label='<P$_2$>$_{GND}$')\r\n plt.plot(fw,P2g,'r:x', label = '<P$_2$>$_{GD}$')\r\n plt.plot(fw,P2l1,'b-x')\r\n plt.plot(fw,P2gn1,'g--x')\r\n plt.plot(fw,P2g1,'r:x')\r\n\r\n\r\n plt.plot(fw,T2l,'b-o', label='<T$_2$>$_{LD}$')\r\n plt.plot(fw,T2gn,'g--o', label='<T$_2$>$_{GND}$')\r\n plt.plot(fw,T2g,'r:o', label='<T$_2$>$_{GD}$')\r\n plt.plot(fw,T2l1,'b-o')\r\n plt.plot(fw,T2gn1,'g--o')\r\n plt.plot(fw,T2g1,'r:o')\r\n\r\n plt.legend(bbox_to_anchor=(0,1,1,0), loc=\"lower left\",mode=\"expand\", ncol=3)\r\n #plt.legend()\r\n plt.xlabel('FWHM (\\xb0)')\r\n plt.ylabel('Orientation Parameter')\r\n #plt.title('OP vs FWHM')\r\n plt.minorticks_on()\r\n plt.tight_layout()\r\n plt.show()\r\n plt.close()\r\n\r\n print('\\n <T2> vs. FWHM')\r\n print('+LD\\n',T2l,'\\n-LD\\n',T2l1,'\\n+GD\\n',T2g,'\\n-GD\\n',T2g1,'\\n+GND\\n',T2gn,'\\n-GND\\n',T2gn1)\r\n print('\\n <P2> vs. FWHM')\r\n print('+LD\\n',P2l,'\\n-LD\\n',P2l1,'\\n+GD\\n',P2g,'\\n-GD\\n',P2g1,'\\n+GND\\n',P2gn,'\\n-GND\\n',P2gn1)\r\n\r\n\r\n\r\n\r\n '''\r\n # Plotting T4 values\r\n\r\n for i in range(1,np.shape(gaussianOP)[0]):\r\n cosFt2DL=np.sum(lorentzianOP[i,0:int(a/2)]*cosFtX[0:int(a/2)])/np.sum(lorentzianOP[i,0:int(a/2)])\r\n T4l.append(np.round(8*cosFt2DL-8*cosSq2DL+1,3))\r\n cosFt2DL1=np.sum(lorentzianOP1[i,0:int(a/2)]*cosFtX[0:int(a/2)])/np.sum(lorentzianOP1[i,0:int(a/2)])\r\n T4l1.append(np.round(8*cosFt2DL1-8*cosSq2DL1+1,3))\r\n \r\n plt.figure()\r\n plt.plot(fw,T4l,'b-o', label='<T$_4$>$_{LD}$')\r\n plt.plot(fw,T4l1,'b-o')\r\n plt.legend()\r\n plt.xlabel('FWHM (\\xb0)')\r\n plt.ylabel('Orientation Parameter')\r\n plt.minorticks_on()\r\n plt.tight_layout()\r\n plt.show()\r\n plt.close()\r\n print('\\n',T4l,T4l1)\r\n '''\r\n \r\n\r\n\r\n \r\n # to calculate Orientation parameter for GND function with different shape factors\r\n b=np.linspace(1,2,5)\r\n for beta in b:\r\n gaussianOP=np.empty_like(x)\r\n lorentzianOP=np.empty_like(x)\r\n gndOP=np.empty_like(x)\r\n \r\n for hwidth in c:\r\n g=gaussian(x, mu[0], hwidth)+gaussian(x, mu[1],hwidth)+gaussian(x, mu[2],hwidth)\r\n l=lorentzian(x,mu[0],hwidth)+lorentzian(x,mu[1],hwidth)+lorentzian(x,mu[2],hwidth)\r\n gn=gnd(x,mu[0],hwidth,beta)+gnd(x,mu[1],hwidth,beta)+gnd(x,mu[2],hwidth,beta)\r\n gaussianOP = np.vstack((gaussianOP,g))\r\n lorentzianOP= np.vstack((lorentzianOP,l))\r\n gndOP=np.vstack((gndOP,gn))\r\n \r\n \r\n P2g=[]\r\n P2l=[]\r\n T2g=[]\r\n T2l=[]\r\n P2gn=[]\r\n T2gn=[]\r\n \r\n for i in range(1,np.shape(gaussianOP)[0]):\r\n cosSq3DG=np.sum(gaussianOP[i,0:int(a/2)]*cosSqX[0:int(a/2)]*sinX[0:int(a/2)])/np.sum(gaussianOP[i,0:int(a/2)]*sinX[0:int(a/2)])\r\n P2g.append(np.round(1.5*cosSq3DG-0.5,3))\r\n cosSq3DL=np.sum(lorentzianOP[i,0:int(a/2)]*cosSqX[0:int(a/2)]*sinX[0:int(a/2)])/np.sum(lorentzianOP[i,0:int(a/2)]*sinX[0:int(a/2)])\r\n P2l.append(np.round(1.5*cosSq3DL-0.5,3))\r\n cosSq3DGN=np.sum(gndOP[i,0:int(a/2)]*cosSqX[0:int(a/2)]*sinX[0:int(a/2)])/np.sum(gndOP[i,0:int(a/2)]*sinX[0:int(a/2)])\r\n P2gn.append(np.round(1.5*cosSq3DGN-0.5,3))\r\n cosSq2DG=np.sum(gaussianOP[i,0:int(a/2)]*cosSqX[0:int(a/2)])/np.sum(gaussianOP[i,0:int(a/2)])\r\n T2g.append(np.round(2*cosSq2DG-1,3))\r\n cosSq2DL=np.sum(lorentzianOP[i,0:int(a/2)]*cosSqX[0:int(a/2)])/np.sum(lorentzianOP[i,0:int(a/2)])\r\n T2l.append(np.round(2*cosSq2DL-1,3))\r\n cosSq2DGN=np.sum(gndOP[i,0:int(a/2)]*cosSqX[0:int(a/2)])/np.sum(gndOP[i,0:int(a/2)])\r\n T2gn.append(np.round(2*cosSq2DGN-1,3))\r\n \r\n print('\\n <T2> vs. FWHM for GNDs with beta=', beta)\r\n print(T2gn)\r\n plt.plot(fw,T2gn,'--o',label=(r'$\\beta$='+str(beta)))\r\n \r\n #print('\\n <P2> vs. FWHM for GNDs with beta=', beta)\r\n #print(P2gn)\r\n #plt.plot(fw,P2gn,'--o',label=(r'$\\beta$='+str(beta)))\r\n\r\n \r\n \r\n plotNegSide=False\r\n if plotNegSide:\r\n gaussianOP1=np.empty_like(x)\r\n lorentzianOP1=np.empty_like(x)\r\n gndOP1=np.empty_like(x)\r\n \r\n for hwidth in c:\r\n g1=gaussian(x, mu1[0],hwidth)+gaussian(x, mu1[1],hwidth)+gaussian(x, mu1[2],hwidth)+gaussian(x, mu1[3],hwidth)\r\n l1=lorentzian(x,mu1[0],hwidth)+lorentzian(x,mu1[1],hwidth)+lorentzian(x,mu1[2],hwidth)+lorentzian(x,mu1[3],hwidth)\r\n gn1=gnd(x, mu1[0], hwidth,beta)+gnd(x, mu1[1], hwidth,beta)++gnd(x, mu1[2], hwidth,beta)++gnd(x, mu1[3], hwidth,beta)\r\n gaussianOP1 = np.vstack((gaussianOP1,g1))\r\n lorentzianOP1= np.vstack((lorentzianOP1,l1))\r\n gndOP1=np.vstack((gndOP1,gn1))\r\n \r\n P2g1=[]\r\n P2l1=[]\r\n T2g1=[]\r\n T2l1=[]\r\n P2gn1=[]\r\n T2gn1=[]\r\n for i in range(1,np.shape(gaussianOP)[0]):\r\n cosSq3DG1=np.sum(gaussianOP1[i,0:int(a/2)]*cosSqX[0:int(a/2)]*sinX[0:int(a/2)])/np.sum(gaussianOP1[i,0:int(a/2)]*sinX[0:int(a/2)])\r\n P2g1.append(np.round(1.5*cosSq3DG1-0.5,3))\r\n cosSq3DL1=np.sum(lorentzianOP1[i,0:int(a/2)]*cosSqX[0:int(a/2)]*sinX[0:int(a/2)])/np.sum(lorentzianOP1[i,0:int(a/2)]*sinX[0:int(a/2)])\r\n P2l1.append(np.round(1.5*cosSq3DL1-0.5,3))\r\n cosSq3DGN1=np.sum(gndOP1[i,0:int(a/2)]*cosSqX[0:int(a/2)]*sinX[0:int(a/2)])/np.sum(gndOP1[i,0:int(a/2)]*sinX[0:int(a/2)])\r\n P2gn1.append(np.round(1.5*cosSq3DGN1-0.5,3))\r\n cosSq2DG1=np.sum(gaussianOP1[i,0:int(a/2)]*cosSqX[0:int(a/2)])/np.sum(gaussianOP1[i,0:int(a/2)])\r\n T2g1.append(np.round(2*cosSq2DG1-1,3))\r\n cosSq2DL1=np.sum(lorentzianOP1[i,0:int(a/2)]*cosSqX[0:int(a/2)])/np.sum(lorentzianOP1[i,0:int(a/2)])\r\n T2l1.append(np.round(2*cosSq2DL1-1,3))\r\n cosSq2DGN1=np.sum(gndOP1[i,0:int(a/2)]*cosSqX[0:int(a/2)])/np.sum(gndOP1[i,0:int(a/2)])\r\n T2gn1.append(np.round(2*cosSq2DGN1-1,3))\r\n \r\n \r\n print('\\n <T2> vs. FWHM for GNDs with beta=', beta)\r\n print(T2gn1)\r\n plt.plot(fw,T2gn1,'--o',label=(r'$ \\beta$='+str(beta)))\r\n #print('\\n <P2> vs. FWHM for GNDs with beta=', beta)\r\n #print(P2gn1)\r\n #plt.plot(fw,P2gn1,'--x',label=(r'$ \\beta$='+str(beta)))\r\n \r\n \r\n \r\n \r\n plt.plot(fw,T2l,'b-x',label='LD')\r\n plt.plot(fw,T2g,'k-x',label='GD')\r\n #plt.plot(fw,P2l,'b-x',label='LD')\r\n #plt.plot(fw,P2g,'k-x',label='GD')\r\n plt.legend(bbox_to_anchor=(0,1,1,0), loc=\"lower left\",mode=\"expand\", ncol=4)\r\n plt.xlabel('FWHM (\\xb0)')\r\n plt.ylabel('Orientation Parameter')\r\n plt.tight_layout()\r\n plt.minorticks_on()\r\n plt.show()\r\n plt.close()\r\n \r\n print('\\n <T2> vs. FWHM for LD')\r\n print(T2l)\r\n print('\\n <T2> vs. FWHM for GD')\r\n print(T2g)\r\n #print('P2 Lorentzian =',P2l)\r\n #print('P2 Gaussian =', P2g)\r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n###### PROGRAM 2 for generating ODFs with secondary orientation peaks #####\r\nif program2:\r\n\r\n # Setting peak parameters\r\n\r\n c=[0.1,0.6,2.9,5.7,9.5,11.5,14.3,19.1,28.6,38.2,52.1,57.3,71.6,81.8,95.5,114.6,120,135,150,165,180]\r\n h=[0,0.1,0.25,0.5,0.75,1] # relative height of secondary peak\r\n #c=[5]\r\n #h=[0.1,0.75] # do not forget to change labels in legend of ODF plots if you change these values\r\n beta=1.5\r\n mu2 = [0,90,180,270,360] # setting peak positions\r\n\r\n plotODF=False\r\n\r\n # defining ODFs\r\n gaussianOP=np.empty_like(x)\r\n lorentzianOP=np.empty_like(x)\r\n gndOP=np.empty_like(x)\r\n\r\n for height in h:\r\n for hwidth in c:\r\n g=gaussian(x, mu2[0], hwidth)+height*gaussian(x, mu2[1],hwidth)+gaussian(x, mu2[2],hwidth)+height*gaussian(x, mu2[3],hwidth)+gaussian(x, mu2[4],hwidth)\r\n l=lorentzian(x,mu2[0],hwidth)+height*lorentzian(x,mu2[1],hwidth)+lorentzian(x,mu2[2],hwidth)+height*lorentzian(x,mu2[3],hwidth)+lorentzian(x,mu2[4],hwidth)\r\n gn=gnd(x,mu2[0],hwidth,beta)+height*gnd(x,mu2[1],hwidth,beta)+gnd(x,mu2[2],hwidth,beta)+height*gnd(x,mu2[3],hwidth,beta)+gnd(x,mu2[4],hwidth,beta)\r\n gaussianOP = np.vstack((gaussianOP,g))\r\n lorentzianOP= np.vstack((lorentzianOP,l))\r\n gndOP=np.vstack((gndOP,gn))\r\n \r\n\r\n #output= np.array([x,l])\r\n #np.savetxt('ModelData.txt',output.T)\r\n\r\n \r\n if plotODF:\r\n plt.figure(dpi=150,figsize=(3.5,2.6))\r\n plt.plot(x,lorentzianOP[1::,:].T)\r\n #plt.plot(x,gndOP[1::,:].T,label='GND')\r\n #plt.plot(x,gaussianOP[1::,:].T,label='GD')\r\n plt.xlabel('Angle (\\xb0)')\r\n plt.ylabel('Intensity (a.u.)')\r\n plt.xticks([0,90,180,270,360])\r\n plt.locator_params('y',nbins=6)\r\n plt.ticklabel_format(axis='y',style='sci',scilimits=(0,0))\r\n plt.tight_layout()\r\n plt.minorticks_on()\r\n plt.legend(('A$_2$/A$_1$=0.1','A$_2$/A$_1$=0.75'))\r\n plt.show() \r\n plt.close()\r\n\r\n P2g=[]\r\n P2l=[]\r\n T2g=[]\r\n T2l=[]\r\n P2gn=[]\r\n T2gn=[]\r\n T4l=[]\r\n\r\n \r\n for i in range(1,np.shape(gaussianOP)[0]):\r\n cosSq3DG=np.sum(gaussianOP[i,0:int(a/2)]*cosSqX[0:int(a/2)]*sinX[0:int(a/2)])/np.sum(gaussianOP[i,0:int(a/2)]*sinX[0:int(a/2)])\r\n P2g.append(np.round(1.5*cosSq3DG-0.5,3))\r\n cosSq3DL=np.sum(lorentzianOP[i,0:int(a/2)]*cosSqX[0:int(a/2)]*sinX[0:int(a/2)])/np.sum(lorentzianOP[i,0:int(a/2)]*sinX[0:int(a/2)])\r\n P2l.append(np.round(1.5*cosSq3DL-0.5,3))\r\n cosSq3DGN=np.sum(gndOP[i,0:int(a/2)]*cosSqX[0:int(a/2)]*sinX[0:int(a/2)])/np.sum(gndOP[i,0:int(a/2)]*sinX[0:int(a/2)])\r\n P2gn.append(np.round(1.5*cosSq3DGN-0.5,3))\r\n cosSq2DG=np.sum(gaussianOP[i,0:int(a/2)]*cosSqX[0:int(a/2)])/np.sum(gaussianOP[i,0:int(a/2)])\r\n T2g.append(np.round(2*cosSq2DG-1,3))\r\n cosSq2DL=np.sum(lorentzianOP[i,0:int(a/2)]*cosSqX[0:int(a/2)])/np.sum(lorentzianOP[i,0:int(a/2)])\r\n T2l.append(np.round(2*cosSq2DL-1,3))\r\n cosSq2DGN=np.sum(gndOP[i,0:int(a/2)]*cosSqX[0:int(a/2)])/np.sum(gndOP[i,0:int(a/2)])\r\n T2gn.append(np.round(2*cosSq2DGN-1,3))\r\n cosFt2DL=np.sum(lorentzianOP[i,0:int(a/2)]*cosFtX[0:int(a/2)])/np.sum(lorentzianOP[i,0:int(a/2)])\r\n T4l.append(np.round(8*cosFt2DL-8*cosSq2DL+1,3))\r\n \r\n \r\n \r\n fw=2*np.asarray(c)\r\n print('FWHM=',fw)\r\n print('\\n <T2> vs. FWHM for A2/A1=',h)\r\n print('LD\\n',T2l,'\\nGD\\n',T2g,'\\nGND\\n',T2gn)\r\n print('\\n <T4> vs. FWHM for A2/A1=',h)\r\n print('LD\\n',T4l)\r\n print('\\n <P2> vs. FWHM for A2/A1=',h)\r\n print('LD\\n',P2l,'\\nGD\\n',P2g,'\\nGND\\n',P2gn)\r\n\r\n\r\n \r\n #Plotting values\r\n \r\n index=0\r\n plt.figure(figsize=(3.5,2.6))\r\n plt.title('OP vs FWHM for varying A$_2$/A$_1$ (LD)')\r\n for i in range(0,len(T2l),len(c)):\r\n plt.plot(fw,T2l[i:i+len(c)],'--o', label='A$_2$/A$_1$='+str(h[index]))\r\n #plt.plot(fw,P2l[i:i+len(c)],'--x', label='A$_2$/A$_1$='+str(h[index]))\r\n index+=1\r\n \r\n plt.legend()\r\n plt.xlabel('FWHM (\\xb0)')\r\n plt.ylabel('Orientation Parameter')\r\n plt.minorticks_on()\r\n plt.tight_layout()\r\n plt.show()\r\n\r\n \r\n index=0\r\n plt.figure(figsize=(3.5,2.6))\r\n plt.title('OP vs FWHM for varying A$_2$/A$_1$ (GND)')\r\n for i in range(0,len(T2gn),len(c)):\r\n plt.plot(fw,T2gn[i:i+len(c)],'--o', label='A$_2$/A$_1$='+str(h[index]))\r\n #plt.plot(fw,P2gn[i:i+21],'--x', label='A$_2$/A$_1$='+str(h[index]))\r\n index+=1\r\n plt.legend()\r\n plt.xlabel('FWHM (\\xb0)')\r\n plt.ylabel('Orientation Parameter')\r\n plt.minorticks_on()\r\n plt.tight_layout()\r\n plt.show()\r\n\r\n index=0\r\n plt.figure(figsize=(3.5,2.6))\r\n plt.title('OP vs FWHM for varying A$_2$/A$_1$ (GD)')\r\n for i in range(0,len(T2g),len(c)):\r\n plt.plot(fw,T2g[i:i+len(c)],'--o', label='A$_2$/A$_1$='+str(h[index]))\r\n #plt.plot(fw,P2g[i:i+21],'--x', label='A$_2$/A$_1$='+str(h[index]))\r\n index+=1\r\n plt.legend()\r\n plt.xlabel('FWHM (\\xb0)')\r\n plt.ylabel('Orientation Parameter')\r\n plt.minorticks_on()\r\n plt.tight_layout()\r\n plt.show()\r\n\r\n index=0\r\n plt.figure(figsize=(3.5,2.6))\r\n plt.title('T4 vs FWHM for varying A$_2$/A$_1$ (LD)')\r\n for i in range(0,len(T4l),len(c)):\r\n plt.plot(fw,T4l[i:i+len(c)],'--o', label='A$_2$/A$_1$='+str(h[index]))\r\n index+=1\r\n plt.plot(fw,T2l[0:len(c)],'-.s', label='<T$_2$>, A$_2$/A$_1$='+str(h[0]))\r\n plt.legend()\r\n plt.xlabel('FWHM (\\xb0)')\r\n plt.ylabel('Orientation Parameter')\r\n plt.minorticks_on()\r\n plt.tight_layout()\r\n plt.show()\r\n\r\n \r\n '''\r\n # Plotting orientation parameter as function of relative areas of secondary peaks\r\n # activate when value of FWHM is fixed to a single value\r\n \r\n plt.figure(figsize=(3.5,2.6))\r\n for i in range(0,len(T2l),len(c)):\r\n line1, = plt.plot(h,T2l,'b-o') \r\n line2, = plt.plot(h,T2gn,'g--o')\r\n line3, = plt.plot(h,T2g,'r-.o')\r\n line4, = plt.plot(h,T4l,'k:o')\r\n \r\n \r\n plt.legend((line1,line2,line3,line4),('<T$_2$>$_{LD}$','<T$_2$>$_{GND}$','<T$_2$>$_{GD}$','<T$_4$>$_{LD}$'))\r\n plt.xlabel('Relative area, A$_2$/A$_1$')\r\n plt.ylabel('Orientation Parameter')\r\n plt.minorticks_on()\r\n plt.tight_layout()\r\n plt.show()\r\n '''\r\n\r\n"
] |
[
[
"scipy.special.gamma",
"numpy.cos",
"matplotlib.pyplot.minorticks_on",
"matplotlib.pyplot.xticks",
"numpy.sin",
"numpy.log",
"matplotlib.pyplot.tight_layout",
"numpy.sqrt",
"numpy.vstack",
"numpy.empty_like",
"numpy.round",
"matplotlib.pyplot.title",
"matplotlib.pyplot.close",
"numpy.shape",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ticklabel_format",
"numpy.asarray",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.locator_params",
"matplotlib.pyplot.ylabel",
"numpy.abs",
"numpy.linspace"
]
] |
bjlittle/poc-ngvat
|
[
"03cab7c4b184d1fa47d3a1dfee77f48ec609723a"
] |
[
"poc-3/data/test/synthetic/utils.py"
] |
[
"import matplotlib; matplotlib.use(\"Agg\")\r\nimport torch\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom PIL import Image\r\nimport glob\r\nimport os\r\nimport shutil\r\nimport time\r\nimport sys\r\nimport collections\r\npjoin = os.path.join\r\n\r\nclass LogPrint():\r\n def __init__(self, file, ExpID, print_to_screen):\r\n self.file = file\r\n self.ExpID = ExpID\r\n self.print_to_screen = print_to_screen\r\n def __call__(self, some_str):\r\n sstr = \"[%s %s %s \" % (self.ExpID[-6:], os.getpid(), time.strftime(\"%Y/%m/%d-%H:%M:%S]\")) + str(some_str)\r\n print(sstr, file=self.file, flush=True)\r\n if self.print_to_screen:\r\n print(sstr)\r\n\r\ndef check_path(x):\r\n if x:\r\n complete_path = glob.glob(x)\r\n assert(len(complete_path) == 1), \"The provided path points to more than 1 entity. Please check.\"\r\n x = complete_path[0]\r\n return x\r\n\r\ndef my_makedirs(d):\r\n if not os.path.exists(d):\r\n os.makedirs(d)\r\n\r\ndef set_up_dir(project_name, resume, debug):\r\n TimeID = time.strftime(\"%Y%m%d-%H%M%S\")\r\n if \"SERVER\" in os.environ.keys():\r\n ExpID = \"SERVER\" + os.environ[\"SERVER\"] + \"-\" + TimeID\r\n else:\r\n ExpID = TimeID\r\n \r\n project_path = \"Debug_Dir\" if debug else pjoin(\"Experiments\", ExpID + \"_\" + project_name)\r\n rec_img_path = pjoin(project_path, \"reconstructed_images\")\r\n weights_path = pjoin(project_path, \"weights\")\r\n my_makedirs(rec_img_path)\r\n my_makedirs(weights_path)\r\n log_path = pjoin(weights_path, \"log_\" + ExpID + \".txt\")\r\n log = open(log_path, \"w+\")\r\n print(\" \".join([\"CUDA_VISIBLE_DEVICES=0 python\", *sys.argv]),\r\n file=log, flush=True) # save the script\r\n return TimeID, ExpID, rec_img_path, weights_path, log\r\n\r\ndef get_CodeID():\r\n script = \"git log --pretty=oneline >> wh_CodeID_file.tmp\"\r\n os.system(script)\r\n x = open(\"wh_CodeID_file.tmp\").readline()\r\n os.remove(\"wh_CodeID_file.tmp\")\r\n return x[:8]\r\n\r\ndef is_img(x):\r\n return any(x.endswith(extension) for extension in [\".png\", \".jpg\", \".jpeg\"])\r\n\r\ndef load_param_from_t7(model, in_layer_index, out_layer):\r\n out_layer.weight = torch.nn.Parameter(\r\n model.get(in_layer_index).weight.float())\r\n out_layer.bias = torch.nn.Parameter(model.get(in_layer_index).bias.float())\r\n\r\nclass LogHub(object):\r\n def __init__(self, momentum=0):\r\n self.losses = {}\r\n self.momentum = momentum\r\n\r\n def update(self, name, value):\r\n if name not in self.losses:\r\n self.losses[name] = value\r\n else:\r\n self.losses[name] = self.losses[name] * \\\r\n self.momentum + value * (1 - self.momentum)\r\n\r\n def format(self):\r\n keys = self.losses.keys()\r\n keys = sorted(keys)\r\n logtmp = \"\"\r\n for k in keys:\r\n logtmp += \"%s: %.3f | \" % (k, self.losses[k])\r\n return logtmp[:-3]\r\n\r\n\r\ndef smart_load(model_path):\r\n sth = torch.load(model_path, map_location=lambda storage, location: storage)\r\n if isinstance(sth, collections.OrderedDict): # state_dict\r\n return sth\r\n elif isinstance(sth, dict): # dict which has a value of state_dict\r\n for k, v in sth.items():\r\n if isinstance(v, collections.OrderedDict):\r\n return v\r\n print(\"smart load failed, please manually check the given model\")\r\n"
] |
[
[
"matplotlib.use",
"torch.load"
]
] |
irom-lab/AMR-Policies
|
[
"43552ca0ddcd584a9faa12b5588874bac41bd205"
] |
[
"gibson2/agents/tf_agents/agents/reinforce/reinforce_agent.py"
] |
[
"# coding=utf-8\n# # Copyright 2018 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A REINFORCE Agent.\nImplements the REINFORCE algorithm from (Williams, 1992):\nhttp://www-anw.cs.umass.edu/~barto/courses/cs687/williams92simple.pdf\n\nAMR-Changes: Modified to include AMR regularizer\nOriginal Authors:\nS. Guadarrama, A. Korattikara, O. Ramirez, P. Castro, E. Holly, S. Fishman, K. Wang, E. Gonina, N. Wu, E. Kokiopoulou, L. Sbaiz, J. Smith, G. Bart ́ok, J. Berent, C. Harris, V. Vanhoucke, and E. Brevdo. TF-Agents: A library for reinforcement learning in tensorflow, 2018. URL https://github.com/tensorflow/agents\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport gin\nimport numpy as np\nimport tensorflow as tf\n\nfrom tf_agents.agents import tf_agent\nfrom tf_agents.policies import actor_policy\nfrom tf_agents.policies import greedy_policy\nfrom tf_agents.trajectories import time_step as ts\nfrom tf_agents.utils import common\nfrom tf_agents.utils import eager_utils\nfrom tf_agents.utils import value_ops\n\n\ndef _standard_normalize(values, axes=(0,)):\n \"\"\"Standard normalizes values `values`.\n Args:\n values: Tensor with values to be standardized.\n axes: Axes used to compute mean and variances.\n Returns:\n Standardized values (values - mean(values[axes])) / std(values[axes]).\n \"\"\"\n values_mean, values_var = tf.nn.moments(x=values, axes=axes, keepdims=True)\n epsilon = np.finfo(values.dtype.as_numpy_dtype).eps\n normalized_values = ((values - values_mean) / (tf.sqrt(values_var) + epsilon))\n return normalized_values\n\n\ndef _entropy_loss(distributions, spec, weights=None):\n \"\"\"Computes entropy loss.\n Args:\n distributions: A possibly batched tuple of distributions.\n spec: A nested tuple representing the action spec.\n weights: Optional scalar or element-wise (per-batch-entry) importance\n weights. Includes a mask for invalid timesteps.\n Returns:\n A Tensor representing the entropy loss.\n \"\"\"\n with tf.name_scope('entropy_regularization'):\n entropy = -tf.cast(common.entropy(distributions, spec), tf.float32)\n if weights is not None:\n entropy *= weights\n return tf.reduce_mean(input_tensor=entropy)\n\n\ndef _get_initial_policy_state(policy, time_steps):\n \"\"\"Gets the initial state of a policy.\"\"\"\n batch_size = (\n tf.compat.dimension_at_index(time_steps.discount.shape, 0) or\n tf.shape(time_steps.discount)[0])\n\n return policy.get_initial_state(batch_size=batch_size)\n\n\[email protected]\nclass ReinforceAgent(tf_agent.TFAgent):\n \"\"\"A REINFORCE Agent.\n Implements:\n REINFORCE algorithm from\n \"Simple statistical gradient-following algorithms for connectionist\n reinforcement learning\"\n Williams, R.J., 1992.\n http://www-anw.cs.umass.edu/~barto/courses/cs687/williams92simple.pdf\n REINFORCE with state-value baseline, where state-values are estimated with\n function approximation, from\n \"Reinforcement learning: An introduction\" (Sec. 13.4)\n Sutton, R.S. and Barto, A.G., 2018.\n http://incompleteideas.net/book/the-book-2nd.html\n The REINFORCE agent can be optionally provided with:\n - value_network: A `tf_agents.network.Network` which parameterizes state-value\n estimation as a neural network. The network will be called with\n call(observation, step_type) and returns a floating point state-values\n tensor.\n - value_estimation_loss_coef: Weight on the value prediction loss.\n If value_network and value_estimation_loss_coef are provided, advantages are\n computed as\n `advantages = (discounted accumulated rewards) - (estimated state-values)`\n and the overall learning objective becomes:\n `(total loss) =\n (policy gradient loss) +\n value_estimation_loss_coef * (squared error of estimated state-values)`\n \"\"\"\n\n def __init__(self,\n time_step_spec,\n action_spec,\n actor_network,\n optimizer,\n value_network=None,\n value_estimation_loss_coef=0.2,\n advantage_fn=None,\n use_advantage_loss=True,\n gamma=1.0,\n normalize_returns=True,\n gradient_clipping=None,\n debug_summaries=False,\n summarize_grads_and_vars=False,\n entropy_regularization=None,\n train_step_counter=None,\n name=None):\n \"\"\"Creates a REINFORCE Agent.\n Args:\n time_step_spec: A `TimeStep` spec of the expected time_steps.\n action_spec: A nest of BoundedTensorSpec representing the actions.\n actor_network: A tf_agents.network.Network to be used by the agent. The\n network will be called with call(observation, step_type).\n optimizer: Optimizer for the actor network.\n value_network: (Optional) A `tf_agents.network.Network` to be used by the\n agent. The network will be called with call(observation, step_type) and\n returns a floating point value tensor.\n value_estimation_loss_coef: (Optional) Multiplier for value prediction\n loss to balance with policy gradient loss.\n advantage_fn: A function `A(returns, value_preds)` that takes returns and\n value function predictions as input and returns advantages. The default\n is `A(returns, value_preds) = returns - value_preds` if a value network\n is specified and `use_advantage_loss=True`, otherwise `A(returns,\n value_preds) = returns`.\n use_advantage_loss: Whether to use value function predictions for\n computing returns. `use_advantage_loss=False` is equivalent to setting\n `advantage_fn=lambda returns, value_preds: returns`.\n gamma: A discount factor for future rewards.\n normalize_returns: Whether to normalize returns across episodes when\n computing the loss.\n gradient_clipping: Norm length to clip gradients.\n debug_summaries: A bool to gather debug summaries.\n summarize_grads_and_vars: If True, gradient and network variable summaries\n will be written during training.\n entropy_regularization: Coefficient for entropy regularization loss term.\n train_step_counter: An optional counter to increment every time the train\n op is run. Defaults to the global_step.\n name: The name of this agent. All variables in this module will fall under\n that name. Defaults to the class name.\n \"\"\"\n tf.Module.__init__(self, name=name)\n\n actor_network.create_variables()\n\n self._actor_network = actor_network\n\n if self._actor_network._rnn_encoder:\n self.rnn_size = self._actor_network._rnn_encoder._dynamic_unroll.trainable_weights[0].shape[1]\n else:\n self.rnn_size = 0\n\n if value_network:\n value_network.create_variables()\n self._value_network = value_network\n\n collect_policy = actor_policy.ActorPolicy(\n time_step_spec=time_step_spec,\n action_spec=action_spec,\n actor_network=self._actor_network,\n clip=True)\n\n policy = greedy_policy.GreedyPolicy(collect_policy)\n\n self._optimizer = optimizer\n self._gamma = gamma\n self._normalize_returns = normalize_returns\n self._gradient_clipping = gradient_clipping\n self._entropy_regularization = entropy_regularization\n self._value_estimation_loss_coef = value_estimation_loss_coef\n self._baseline = self._value_network is not None\n self._advantage_fn = advantage_fn\n if self._advantage_fn is None:\n if use_advantage_loss and self._baseline:\n self._advantage_fn = lambda returns, value_preds: returns - value_preds\n else:\n self._advantage_fn = lambda returns, _: returns\n\n super(ReinforceAgent, self).__init__(\n time_step_spec,\n action_spec,\n policy,\n collect_policy,\n train_sequence_length=None,\n debug_summaries=debug_summaries,\n summarize_grads_and_vars=summarize_grads_and_vars,\n train_step_counter=train_step_counter)\n\n def _initialize(self):\n pass\n\n def _train(self, experience, weights=None, lam=0):\n # Add a mask to ensure we reset the return calculation at episode\n # boundaries. This is needed in cases where episodes are truncated before\n # reaching a terminal state. Note experience is a batch of trajectories\n # where reward=next_step.reward so the mask may look shifted at first.\n\n non_last_mask = tf.cast(\n tf.math.not_equal(experience.next_step_type, ts.StepType.LAST),\n tf.float32)\n discounts = non_last_mask * experience.discount * self._gamma\n returns = value_ops.discounted_return(\n experience.reward, discounts, time_major=False)\n\n if self._debug_summaries:\n tf.compat.v2.summary.histogram(\n name='rewards', data=experience.reward, step=self.train_step_counter)\n tf.compat.v2.summary.histogram(\n name='discounts',\n data=experience.discount,\n step=self.train_step_counter)\n tf.compat.v2.summary.histogram(\n name='returns', data=returns, step=self.train_step_counter)\n\n with tf.GradientTape() as tape:\n loss_info = self.total_loss(\n experience, tf.stop_gradient(returns), weights=weights, lam=lam)\n tf.debugging.check_numerics(loss_info.loss, 'Loss is inf or nan')\n variables_to_train = self._actor_network.trainable_weights\n if self._baseline:\n variables_to_train += self._value_network.trainable_weights\n grads = tape.gradient(loss_info.loss, variables_to_train)\n\n grads_and_vars = list(zip(grads, variables_to_train))\n if self._gradient_clipping:\n grads_and_vars = eager_utils.clip_gradient_norms(grads_and_vars,\n self._gradient_clipping)\n\n if self._summarize_grads_and_vars:\n eager_utils.add_variables_summaries(grads_and_vars,\n self.train_step_counter)\n eager_utils.add_gradients_summaries(grads_and_vars,\n self.train_step_counter)\n\n self._optimizer.apply_gradients(\n grads_and_vars, global_step=self.train_step_counter)\n\n return tf.nest.map_structure(tf.identity, loss_info)\n\n def total_loss(self, experience, returns, weights, lam):\n # Ensure we see at least one full episode.\n time_steps = ts.TimeStep(experience.step_type,\n tf.zeros_like(experience.reward),\n tf.zeros_like(experience.discount),\n experience.observation)\n is_last = experience.is_last()\n num_episodes = tf.reduce_sum(tf.cast(is_last, tf.float32))\n tf.debugging.assert_greater(\n num_episodes,\n 0.0,\n message='No complete episode found. REINFORCE requires full episodes '\n 'to compute losses.')\n\n # Mask out partial episodes at the end of each batch of time_steps.\n # NOTE: We use is_last rather than is_boundary because the last transition\n # is the transition with the last valid reward. In other words, the\n # reward on the boundary transitions do not have valid rewards. Since\n # REINFORCE is calculating a loss w.r.t. the returns (and not bootstrapping)\n # keeping the boundary transitions is irrelevant.\n valid_mask = tf.cast(experience.is_last(), dtype=tf.float32)\n valid_mask = tf.math.cumsum(valid_mask, axis=1, reverse=True)\n valid_mask = tf.cast(valid_mask > 0, dtype=tf.float32)\n if weights is not None:\n weights *= valid_mask\n else:\n weights = valid_mask\n\n advantages = returns\n value_preds = None\n\n if self._baseline:\n value_preds, _ = self._value_network(time_steps.observation,\n time_steps.step_type)\n if self._debug_summaries:\n tf.compat.v2.summary.histogram(\n name='value_preds', data=value_preds, step=self.train_step_counter)\n\n advantages = self._advantage_fn(returns, value_preds)\n if self._debug_summaries:\n tf.compat.v2.summary.histogram(\n name='advantages', data=advantages, step=self.train_step_counter)\n\n # TODO(b/126592060): replace with tensor normalizer.\n if self._normalize_returns:\n advantages = _standard_normalize(advantages, axes=(0, 1))\n if self._debug_summaries:\n tf.compat.v2.summary.histogram(\n name='normalized_%s' %\n ('advantages' if self._baseline else 'returns'),\n data=advantages,\n step=self.train_step_counter)\n\n tf.nest.assert_same_structure(time_steps, self.time_step_spec)\n policy_state = _get_initial_policy_state(self.collect_policy, time_steps)\n actions_distribution = self.collect_policy.distribution(\n time_steps, policy_state=policy_state).action\n\n\n policy_gradient_loss = self.policy_gradient_loss(\n actions_distribution,\n experience.action,\n experience.is_boundary(),\n advantages,\n num_episodes,\n weights,\n )\n entropy_regularization_loss = self.entropy_regularization_loss(\n actions_distribution, weights)\n\n group_lasso_loss = self.group_lasso_loss()\n\n total_loss = policy_gradient_loss + entropy_regularization_loss + lam * group_lasso_loss\n\n value_estimation_loss = None\n if self._baseline:\n value_estimation_loss = self.value_estimation_loss(\n value_preds, returns, num_episodes, weights)\n total_loss += value_estimation_loss\n\n with tf.name_scope('Losses/'):\n tf.compat.v2.summary.scalar(\n name='policy_gradient_loss',\n data=policy_gradient_loss,\n step=self.train_step_counter)\n tf.compat.v2.summary.scalar(\n name='entropy_regularization_loss',\n data=entropy_regularization_loss,\n step=self.train_step_counter)\n tf.compat.v2.summary.scalar(\n name='group_lasso_loss',\n data=lam * group_lasso_loss,\n step=self.train_step_counter)\n if self._baseline:\n tf.compat.v2.summary.scalar(\n name='value_estimation_loss',\n data=value_estimation_loss,\n step=self.train_step_counter)\n tf.compat.v2.summary.scalar(\n name='total_loss', data=total_loss, step=self.train_step_counter)\n\n return tf_agent.LossInfo(total_loss, ())\n\n def policy_gradient_loss(self,\n actions_distribution,\n actions,\n is_boundary,\n returns,\n num_episodes,\n weights=None):\n \"\"\"Computes the policy gradient loss.\n Args:\n actions_distribution: A possibly batched tuple of action distributions.\n actions: Tensor with a batch of actions.\n is_boundary: Tensor of booleans that indicate if the corresponding action\n was in a boundary trajectory and should be ignored.\n returns: Tensor with a return from each timestep, aligned on index. Works\n better when returns are normalized.\n num_episodes: Number of episodes contained in the training data.\n weights: Optional scalar or element-wise (per-batch-entry) importance\n weights. May include a mask for invalid timesteps.\n Returns:\n policy_gradient_loss: A tensor that will contain policy gradient loss for\n the on-policy experience.\n \"\"\"\n # TODO(b/126594799): Add class IndependentNested(tfd.Distribution) to handle\n # nests of independent distributions like this.\n action_log_prob = common.log_probability(actions_distribution, actions,\n self.action_spec)\n\n # Filter out transitions between end state of previous episode and start\n # state of next episode.\n valid_mask = tf.cast(~is_boundary, tf.float32)\n action_log_prob *= valid_mask\n\n action_log_prob_times_return = action_log_prob * returns\n\n if weights is not None:\n action_log_prob_times_return *= weights\n\n if self._debug_summaries:\n tf.compat.v2.summary.histogram(\n name='action_log_prob',\n data=action_log_prob,\n step=self.train_step_counter)\n tf.compat.v2.summary.histogram(\n name='action_log_prob_times_return',\n data=action_log_prob_times_return,\n step=self.train_step_counter)\n\n # Policy gradient loss is defined as the sum, over timesteps, of action\n # log-probability times the cumulative return from that timestep onward.\n # For more information, see (Williams, 1992).\n policy_gradient_loss = -tf.reduce_sum(\n input_tensor=action_log_prob_times_return)\n\n # We take the mean over episodes by dividing by num_episodes.\n policy_gradient_loss = policy_gradient_loss / num_episodes\n\n return policy_gradient_loss\n\n def entropy_regularization_loss(self, actions_distribution, weights=None):\n \"\"\"Computes the optional entropy regularization loss.\n Extending REINFORCE by entropy regularization was originally proposed in\n \"Function optimization using connectionist reinforcement learning\n algorithms.\" (Williams and Peng, 1991).\n Args:\n actions_distribution: A possibly batched tuple of action distributions.\n weights: Optional scalar or element-wise (per-batch-entry) importance\n weights. May include a mask for invalid timesteps.\n Returns:\n entropy_regularization_loss: A tensor with the entropy regularization\n loss.\n \"\"\"\n if self._entropy_regularization:\n loss = _entropy_loss(actions_distribution, self.action_spec, weights)\n loss *= self._entropy_regularization\n else:\n loss = tf.constant(0.0, dtype=tf.float32)\n\n return loss\n\n def group_lasso_loss(self):\n mem_weights = self._actor_network._rnn_encoder._dynamic_unroll.trainable_weights[0]\n print('SANITY CHECK: 256 + rnn size')\n print(mem_weights.shape)\n group_lasso_loss = tf.reduce_sum(tf.sqrt(tf.reduce_sum(mem_weights ** 2, axis=0)))\n\n return group_lasso_loss\n\n def value_estimation_loss(self,\n value_preds,\n returns,\n num_episodes,\n weights=None):\n \"\"\"Computes the value estimation loss.\n Args:\n value_preds: Per-timestep estimated values.\n returns: Per-timestep returns for value function to predict.\n num_episodes: Number of episodes contained in the training data.\n weights: Optional scalar or element-wise (per-batch-entry) importance\n weights. May include a mask for invalid timesteps.\n Returns:\n value_estimation_loss: A scalar value_estimation_loss loss.\n \"\"\"\n value_estimation_error = tf.math.squared_difference(returns, value_preds)\n if weights is not None:\n value_estimation_error *= weights\n\n value_estimation_loss = (\n tf.reduce_sum(input_tensor=value_estimation_error) *\n self._value_estimation_loss_coef)\n\n # We take the mean over episodes by dividing by num_episodes.\n value_estimation_loss = value_estimation_loss / num_episodes\n\n return value_estimation_loss\n"
] |
[
[
"tensorflow.debugging.check_numerics",
"tensorflow.nn.moments",
"numpy.finfo",
"tensorflow.sqrt",
"tensorflow.math.not_equal",
"tensorflow.zeros_like",
"tensorflow.Module.__init__",
"tensorflow.cast",
"tensorflow.compat.v2.summary.scalar",
"tensorflow.shape",
"tensorflow.math.cumsum",
"tensorflow.GradientTape",
"tensorflow.constant",
"tensorflow.compat.v2.summary.histogram",
"tensorflow.nest.assert_same_structure",
"tensorflow.nest.map_structure",
"tensorflow.name_scope",
"tensorflow.reduce_sum",
"tensorflow.math.squared_difference",
"tensorflow.compat.dimension_at_index",
"tensorflow.debugging.assert_greater",
"tensorflow.reduce_mean",
"tensorflow.stop_gradient"
]
] |
CQCL/qvtsim
|
[
"875a480e4daf9331cc5ab43c49018a6f6e327183"
] |
[
"numerical_class.py"
] |
[
"#!/usr/bin/env python\n\n#####################################################################################\n#\n# Copyright 2022 Quantinuum\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0.\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# \n#####################################################################################\n\n\"\"\"Numerical estimation class.\"\"\"\n\nimport numpy as np\nimport pickle\nfrom datetime import datetime\n\nfrom qiskit import Aer, execute, QuantumCircuit\nimport qiskit.ignis.verification.quantum_volume as qv\n\nfrom estimation_class import QVEstimate\nfrom transpiler_passes import preset_passes\nfrom error_models import arbitrary_noise\nfrom utils import gate_counts, convert, qv_circuits, binstr\n\n\nclass NumericalEstimate(QVEstimate):\n \"\"\"Numerical estimation method for QVT heavy output probabilities.\"\"\"\n\n def __init__(self,\n qubit_list: list,\n optimization_lvl: list,\n error_name: str,\n error_list: list,\n ntrials: int):\n\n super().__init__(qubit_list, optimization_lvl, error_name, error_list)\n \n self.ntrials = ntrials\n self.gate_counts = {}\n \n def run(self):\n \"\"\"Runs numerical simulation over error_dict and qubit list. \"\"\"\n\n self.act_success = {}\n for n in self.qubit_list:\n start = datetime.today()\n noise_model = {\n e: arbitrary_noise(edict, n, True) \n for e, edict in self.error_dict.items()\n }\n qv_circs, heavy_outputs, self.ideal_success[n] = generate_ideal(\n n, \n self.ntrials\n )\n if self.optimization_lvl != 'high':\n # Everything but 'high' is independent of the error rate so done \n # outside next loop\n pm = preset_passes(self.optimization_lvl)\n qv_circs_new = [pm.run(qc) for qc in qv_circs]\n self.gate_counts[n] = gate_counts(qv_circs_new)\n \n for e in self.error_dict:\n if self.optimization_lvl == 'high':\n # 'high' optimization is set based on error rate so done \n # inside loop\n transpiler_options = {\n 'tol': estimate_errors(self.error_dict[e]), \n 'mirror': True\n }\n pm = preset_passes('high', transpiler_options)\n qv_circs_new = [pm.run(qc) for qc in qv_circs]\n self.gate_counts[n] = gate_counts(qv_circs_new)\n\n self.act_success[n, e] = act_outcomes(\n qv_circs_new,\n noise_model[e],\n heavy_outputs,\n self.optimization_lvl,\n ) \n \n time = datetime.today() - start\n print(f'finished n={n}, time={time}')\n\n\ndef generate_ideal(nqubits: int,\n reps: int,\n savefile: bool = True):\n \"\"\"\n Generate ideal circuits, heavy_outcomes, and success for all qubit numbers.\n \n Args:\n nqubits: number of qubits\n reps: number of random circuits\n savefile: if True then save ideal info to file\n \n \"\"\"\n backend_ideal = Aer.get_backend('statevector_simulator')\n \n # circuit generation\n qv_circs, qv_circs_nomeas = qv_circuits(nqubits, reps)\n \n # circuit simulation\n ideal_results = execute(\n qv_circs_nomeas,\n backend=backend_ideal\n ).result()\n \n # identify heavy outcomes\n plist = [\n np.array([\n np.abs(s)**2 \n for s in ideal_results.get_statevector(i)\n ])\n for i in range(reps)\n ]\n heavy_outcomes = [np.argsort(p)[len(p)//2:] for p in plist]\n ideal_results = [np.sum(p[h]) for p, h in zip(plist, heavy_outcomes)]\n \n if savefile:\n with open(f'qv_ideal_n{nqubits}.pkl', 'wb') as f:\n pickle.dump([qc.qasm() for qc in qv_circs_nomeas], f)\n pickle.dump(ideal_results, f)\n pickle.dump(heavy_outcomes, f)\n \n return qv_circs, heavy_outcomes, ideal_results\n \n \ndef act_outcomes(qv_circs: list,\n noise_model,\n heavy_outputs: list,\n optimization_level: str):\n \"\"\"\n Returns actual state under noise_model.\n \n Notes:\n - only works when optimization is done before execute\n \n Args:\n qv_circs: list of qv circuits as qasm strings\n noise_model: qiskit NoiseModel object\n heavy_outcomes: list of heavy outcomes for each circuits\n optimization_level: level of optimization of circuits\n backend_options: options used in execute for backend\n \n Returns:\n (list) list of probability of each outcome for each circuit\n \n \"\"\" \n heavy_probs = []\n for i, qc in enumerate(qv_circs):\n if optimization_level == 'high': \n meas_order = new_result_order(qc.num_qubits, qc)\n \n qc.remove_final_measurements()\n [qc.id(q) for q in range(qc.num_qubits)]\n qc.save_probabilities(label='end')\n\n backend = Aer.get_backend('qasm_simulator')\n ideal_results = execute(\n qc, \n noise_model=noise_model,\n backend=backend, \n optimization_level=0\n ).result()\n tmp_probs = ideal_results.results[0].data.end\n \n if optimization_level == 'high':\n heavy_probs.append(\n sum(\n tmp_probs[h]\n for h in np.argsort(meas_order)[heavy_outputs[i]]\n )\n )\n \n else:\n heavy_probs.append(\n sum(\n tmp_probs[h]\n for h in heavy_outputs[i]\n )\n )\n\n return heavy_probs\n\n\ndef read_meas_order(nqubits, \n qc: QuantumCircuit):\n \"\"\"Qubit order from measurement order of qasm str.\"\"\"\n \n qubits = [0] * nqubits\n for n in range(1, nqubits + 1):\n qubits[qc[-n][2][0].index] = nqubits - 1 - qc[-n][1][0].index\n\n return qubits[::-1]\n \n \ndef new_result_order(nqubits, \n qc: QuantumCircuit):\n \"\"\"Map for measurement index to new index.\"\"\"\n \n morder = read_meas_order(nqubits, qc)\n\n str_list = [binstr(i, nqubits) for i in range(2**nqubits)]\n meas_map = [\n int(''.join(np.array([b for b in bstr])[morder]), 2) \n for bstr in str_list\n ]\n \n return meas_map\n\n\ndef estimate_errors(error_dict: dict):\n \"\"\"Estimate TQ errors based on error_dict.\"\"\"\n\n tq_dep = 1\n sq_dep = 1\n if 'tq_dep' in error_dict:\n tq_dep *= convert(1 - error_dict['tq_dep'], 4, 'avg', 'dep')\n \n if 'tq_coh' in error_dict:\n tq_dep *= convert(1 - error_dict['tq_coh'], 4, 'avg', 'dep')\n \n if 'sq_dep' in error_dict:\n sq_dep *= convert(1 - error_dict['sq_dep'], 2, 'avg', 'dep')\n \n if 'sq_coh' in error_dict:\n sq_dep *= convert(1 - error_dict['sq_coh'], 2, 'avg', 'dep')\n\n if 'sq_dph' in error_dict:\n sq_dep *= convert(1 - error_dict['sq_dph'], 2, 'avg', 'dep')\n \n if 'tq_dph' in error_dict:\n tq_dep *= convert(1 - error_dict['tq_dph'], 2, 'avg', 'dep')\n \n sq_dep = convert(convert(sq_dep, 2, 'dep', 'proc') ** 2, 4, 'proc', 'dep')\n \n slice_fid = convert(sq_dep * tq_dep, 4, 'dep', 'avg')\n \n return slice_fid"
] |
[
[
"numpy.sum",
"numpy.abs",
"numpy.array",
"numpy.argsort"
]
] |
skasiraj/Parameter-Estimation-BO
|
[
"90e701db7faec8e500a74a6d58bbbd121958f326"
] |
[
"examples/rosenbrock/rosenbrock_estimate_params_scipy.py"
] |
[
"\"\"\"\nEstimate the Rosenbrock function parameters a and b\nfor the function f(x,y) = (a - x)^2 + b(y - x^2)^2\nusing generated data similar to a typical parameter\nestimation problem\n\"\"\"\n\nimport time\nimport numpy as np\nimport pandas as pd\nfrom scipy.optimize import minimize\nfrom scipy.optimize import Bounds\nfrom estimator.modelwrappers import ModelWrapper\nfrom estimator.utils import WeightedRMSE\nfrom rosenbrock_data_generate import generate_data\n\n# Change x,y,a, b to solve a\n# new generate data for a\n# parameter estimation problem\ngenerate_new_data = False\nif (generate_new_data):\n a = 10.0\n b = 200.0\n x = np.linspace(-2, 2, 10)\n y = np.linspace(-1, 3, 10)\n generate_data(a=a, b=b)\n\n\ndef rosenbrock(X, params):\n \"\"\"\n The Rosenbrock function.\n The function computed is::\n f(x,y) = (a - x)^2 + b(y - x^2)^2\n \"\"\"\n _x, _y = X\n return (params[0] - _x) ** 2 + params[1] * (_y - _x ** 2) ** 2\n\n\n# Try SciPy Optimizers for the same task\ndef loss_func_scipy(x, wrapper):\n \"\"\"\n Customized loss function specific to this problem\n \"\"\"\n loss = 0\n # customization specific to the problem\n _X, _Y = np.meshgrid(wrapper.x_inputs[0],\n wrapper.x_inputs[1])\n\n _Z = wrapper.model((_X, _Y), x)\n y_predict = _Z.reshape(1, -1)\n # end customization specific to the problem\n for i in range(wrapper.n_responses):\n # Factor in the weights\n loss += WeightedRMSE(wrapper.y_groundtruth[i], y_predict[i], wrapper.y_weights)\n wrapper.call_count += 1\n return loss\n\n\n# Read data (X,Y,Z) from the data.csv file which is used for fitting the\n# parameters a and b.\n# You edit this section for your specific problem\ndf = pd.read_csv('data.csv')\npivot_df = df.pivot(index='X', columns='Y',\n values='Z')\ny = pivot_df.columns.values\nx = pivot_df.index.values\ndata = df.to_numpy()\nx_input = [x, y] # experimental inputs read from the csv file.\ny_response = data[:, -1:].T\n\n# Set up the problem\n\n# Change the ranges of a and b if you generate new data if using a different a or b\n# these are the bounds within which # parameters are searched\nparameter_range = [[0.0, 5.0], # for default a\n [50.0, 150.0]] # for default b\npara_names = ['a', 'b']\n\nstart_time = time.time()\nestimator_name = 'rosenbrock-test-scipy'\n\nwrapper = ModelWrapper(model_function=rosenbrock, # model function used for evaluating responses = f(inputs,params)\n para_names=para_names,\n name=estimator_name,\n )\nwrapper.input_data(x_inputs=x_input,\n n_trials=100,\n y_groundtruth=y_response)\nbounds = Bounds([0.0, 5.0], [50.0, 150.0])\nres = minimize(loss_func_scipy,\n x0=[-50.0, 0.0],\n args=(wrapper,),\n method=\"Nelder-Mead\",\n options={'xtol': 1e-8, 'disp': True},\n bounds=bounds,\n )\nend_time = time.time()\na = open(\"debug-scipy.log\",'w')\na.write(\"Objective function called {} times \\n\".format(wrapper.call_count))\na.write(\"Parameters are {} \\n\".format(res['x']))\na.write(\"Total time taken in sec {} \\n \\n \\n\".format(end_time-start_time))\na.write(\"Optimizer results {}\".format(res))\nend_time = time.time()\nprint(res)\n"
] |
[
[
"scipy.optimize.Bounds",
"scipy.optimize.minimize",
"numpy.linspace",
"pandas.read_csv",
"numpy.meshgrid"
]
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.